repo_name
stringlengths
7
71
file_path
stringlengths
5
118
context
list
import_statement
stringlengths
45
12.5k
token_num
int64
641
99.4k
cropped_code
stringlengths
44
17k
all_code
stringlengths
43
754k
next_line
stringlengths
2
330
gold_snippet_index
int64
0
68
created_at
stringlengths
25
25
level
stringclasses
9 values
alexzhou907/DreamPropeller
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self, *args, **kwargs) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self, *args, **kwargs) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n \n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n # density = self.density_act(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
14,740
self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters():
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self,*args, **kwargs) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters():
broadcast(param, src=0)
9
2023-11-27 23:39:49+00:00
24k
abdulhaim/LMRL-Gym
llm_rl_scripts/wordle/ppo/train_ppo.py
[ { "identifier": "train_loop", "path": "LLM_RL/algorithms/ppo/train.py", "snippet": "def train_loop(\n trainer: PPOTrain, \n inference: PPOInference, \n policy: PPOPolicy, \n load_dataset: Callable[[PPOInference, PPOPolicy], Union[PPODataset, PPOIterableDataset]], \n evaluator: Optional[Callable[[PPOInference, PPOPolicy], Tuple[float, Dict[str, Any]]]], \n prng_key: KeyArray, \n save_dir: Optional[str], \n n_rounds: int, \n epochs: int, \n max_steps: Optional[int], \n bsize: int, \n log_every: int, \n eval_every_steps: Optional[int], \n eval_every_epochs: Optional[int], \n eval_every_rounds: Optional[int], \n eval_at_beginning: bool, \n eval_at_end: bool, \n save_every_steps: Optional[int], \n save_every_epochs: Optional[int], \n save_every_rounds: Optional[int], \n save_at_beginning: bool, \n save_at_end: bool, \n save_best: bool, \n max_checkpoints: Optional[int], \n save_train_state: bool, \n save_dtype: jnp.dtype, \n use_wandb: bool, \n wandb_project: Optional[str], \n wandb_run_name: Optional[str], \n wandb_config: Optional[Dict[str, Any]], \n is_main_process: Optional[bool]=None, \n bc_dataset: Optional[Union[MaskDataset, MaskIterableDataset]]=None, \n bc_bsize: Optional[int]=None, \n **loop_state: Dict[Hashable, Any], \n) -> Tuple[PPOTrain, PPOInference, PPOPolicy]:\n print(\"entering training loop ...\")\n assert (not use_wandb) or (use_wandb and wandb_project is not None)\n if is_main_process is None:\n is_main_process = jax.process_index() == 0\n if bc_bsize is None:\n bc_bsize = bsize\n \n # initalize wandb\n wandb_id = loop_state.get('wandb_id', None)\n if use_wandb and is_main_process:\n if wandb_id is None:\n wandb_id = wandb.util.generate_id()\n wandb.init(\n project=wandb_project, \n id=wandb_id, \n name=wandb_run_name, \n config=wandb_config, \n reinit=True, \n resume=\"allow\", \n )\n\n # initalize training loop state\n train_logs = []\n best_perf = loop_state.get('best_perf', float('inf'))\n saved_checkpoints = loop_state.get('saved_checkpoints', deque([]))\n step = 0\n epoch = -1\n round = -1\n def _save(\n name: str, \n add_to_queue: bool, \n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal saved_checkpoints\n print(f'saving checkpoint {name} ...')\n print(f'saving in {save_dir}...')\n # conditionally delete old checkpoints\n if add_to_queue and is_main_process:\n if (max_checkpoints is not None) and (len(saved_checkpoints) >= max_checkpoints):\n delete(saved_checkpoints.popleft(), recursive=True)\n curr_save_dir = os.path.join(save_dir, name)\n if is_main_process:\n create_path(curr_save_dir)\n dump_state(\n policy_model=trainer.policy_model, \n policy_train_state=trainer.policy_train_state, \n value_head_model=trainer.value_head_model, \n value_head_train_state=trainer.value_head_train_state, \n save_dir=curr_save_dir, \n save_train_state=save_train_state, \n enable_save=is_main_process, \n save_dtype=save_dtype, \n **loop_state, \n )\n if add_to_queue and is_main_process:\n saved_checkpoints.append(curr_save_dir)\n print('saved.')\n \n def _eval(\n **loop_state: Dict[Hashable, Any], \n ):\n nonlocal best_perf\n nonlocal inference\n nonlocal policy\n # get eval logs\n print(\"beginning evaluation ...\")\n inference = inference.replace(\n policy_params=trainer.policy_train_state.params, \n value_head_params=trainer.value_head_train_state.params, \n )\n policy.set_params(trainer.policy_train_state.params)\n eval_perf, eval_logs = evaluator(inference, policy)\n\n # publish eval logs\n eval_logs = pull_logs(label_logs(eval_logs, 'eval', {'step': step+1, 'epoch': epoch, 'round': round}))\n log(eval_logs, use_wandb and is_main_process)\n\n # conditionally save best model and optimizer state\n if save_dir is not None and save_best and eval_perf < best_perf:\n print('new best model!')\n best_perf = eval_perf\n _save(\n name='best', \n add_to_queue=False, \n **{**loop_state, 'best_perf': best_perf}, \n )\n\n bc_d = None\n if bc_dataset is not None:\n prng_key, new_prng = jax.random.split(prng_key)\n bc_d = dataloader(new_prng, bc_dataset, bc_bsize, truncate=True)\n \n # begin training loop\n for round in tqdm(range(n_rounds)):\n \n print(f'beginning round {round} ...')\n print(f\"best performance: {best_perf}\")\n\n # load dataset\n dataset = load_dataset(inference, policy)\n\n steps_per_epoch = len(dataset) // bsize if isinstance(dataset, Dataset) else None\n if 'steps_per_epoch' in loop_state:\n assert steps_per_epoch == loop_state['steps_per_epoch'], 'loop_state steps_per_epoch does not match dataset steps_per_epoch'\n\n # begin evaluation\n if evaluator is not None and eval_at_beginning:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save initial checkpoint\n if save_dir is not None and save_at_beginning:\n _save(\n name='initial', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n print(\"num epochs: \", epochs)\n for epoch in tqdm(range(epochs)):\n prng_key, new_prng = jax.random.split(prng_key)\n d = dataloader(new_prng, dataset, bsize, truncate=True)\n print(\"steps per epoch: \", steps_per_epoch)\n for batch in tqdm(d, total=steps_per_epoch):\n if bc_d is not None:\n try:\n bc_batch = next(bc_d)\n except StopIteration as e:\n prng_key, new_prng = jax.random.split(prng_key)\n bc_d = dataloader(new_prng, bc_dataset, bc_bsize, truncate=True)\n bc_batch = next(bc_d)\n batch = {**batch, **{'bc_data_'+k: v for k, v in bc_batch.items()}}\n \n # step model and get training logs\n if 'step' in loop_state and step < loop_state['step']:\n step += 1\n continue\n # print(\"trainer step: \", step)\n trainer, _, info = trainer.step(\n **batch, \n prng_key=new_prng, \n train=True, \n )\n train_logs.append(info)\n \n # publish training logs and clear logs\n if (step + 1) % log_every == 0:\n logs = combine_logs(train_logs)\n logs = pull_logs(label_logs(logs, 'train', {'step': step+1, 'epoch': epoch, 'round': round}))\n log(logs, use_wandb and is_main_process)\n train_logs = []\n \n # begin evaluation\n if evaluator is not None and eval_every_steps is not None and (step + 1) % eval_every_steps == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_steps is not None and (step + 1) % save_every_steps == 0:\n _save(\n name='step_%d' % (step+1), \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step+1, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n step += 1\n \n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_every_epochs is not None and (epoch + 1) % eval_every_epochs == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_epochs is not None and (epoch + 1) % save_every_epochs == 0:\n _save(\n name=f'epoch_{epoch}', \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # conditionally terminate\n if max_steps is not None and step >= max_steps:\n break\n \n # begin evaluation\n if evaluator is not None and eval_every_rounds is not None and (round + 1) % eval_every_rounds == 0:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # periodically save checkpoint\n if save_dir is not None and save_every_rounds is not None and (round + 1) % save_every_rounds == 0:\n _save(\n name='round_%d' % (round), \n add_to_queue=True, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n inference = inference.replace(\n policy_params=trainer.policy_train_state.params, \n value_head_params=trainer.value_head_train_state.params, \n )\n policy.set_params(trainer.policy_train_state.params)\n \n # begin evaluation\n if evaluator is not None and eval_at_end:\n _eval(\n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n \n # save final checkpoint\n if save_dir is not None and save_at_end:\n print(\"saving final checkpoint!\")\n _save(\n name='last', \n add_to_queue=False, \n # loop state metadata\n best_perf=best_perf, \n step=step, \n epoch=epoch, \n round=round, \n saved_checkpoints=saved_checkpoints, \n steps_per_epoch=steps_per_epoch, \n wandb_id=wandb_id, \n )\n\n # stop wandb\n if use_wandb and is_main_process:\n wandb.finish()\n \n inference = inference.replace(\n policy_params=trainer.policy_train_state.params, \n value_head_params=trainer.value_head_train_state.params, \n )\n policy.set_params(trainer.policy_train_state.params)\n return trainer, inference, policy" }, { "identifier": "ppo_loss_fn", "path": "LLM_RL/algorithms/ppo/base_interface.py", "snippet": "def ppo_loss_fn(\n attention_mask: jax.Array, # [batch, time-1] – output is masked; shift x[1:]\n logprobs: jax.Array, # [batch, time-1] – logprob of output produced; shift x[1:]\n values: jax.Array, # [batch, time-1] – value of current state; shift x[:-1]\n should_take_action: jax.Array, # [batch, time-1] – is output produced by action; shift x[1:]\n old_logprobs: jax.Array, # [batch, time-1] – logprob of output produced; shift x[1:]\n old_values: jax.Array, # [batch, time-1] – value of current state; shift x[:-1]\n old_advantages: jax.Array, # [batch, time-1] – advantage of output produced; shift x[1:]\n old_returns: jax.Array, # [batch, time-1] – return of current state; shift x[:-1]\n *, \n cliprange_value: Union[float, jax.Array], \n cliprange: Union[float, jax.Array], \n value_loss_coef: Union[float, jax.Array], \n) -> Tuple[jax.Array, Dict[str, Any]]:\n \"\"\"PPO objective function.\n References:\n - https://github.com/CarperAI/trlx/blob/main/trlx/models/modeling_ppo.py\n - https://stable-baselines.readthedocs.io/en/master/modules/ppo2.html\n \"\"\"\n mask = should_take_action.astype(jnp.float32) * attention_mask\n n = mask.sum()\n \n values_clipped = jnp.clip(\n values, \n old_values - cliprange_value, \n old_values + cliprange_value, \n )\n\n vf_loss1 = (values - old_returns) ** 2\n vf_loss2 = (values_clipped - old_returns) ** 2\n vf_loss = 0.5 * jnp.sum(jnp.maximum(vf_loss1, vf_loss2) * mask) / n\n vf_clipfrac = jnp.sum((vf_loss2 > vf_loss1).astype(jnp.float32) * mask) / n\n\n log_ratio = (logprobs - old_logprobs) * mask\n ratio = jnp.exp(log_ratio)\n # Unbiased KL-div estimates (`k3`). Ref: http://joschu.net/blog/kl-approx.html\n approx_kl = jnp.sum((ratio - 1) - log_ratio) / n\n\n pg_loss1 = -old_advantages * ratio\n pg_loss2 = -old_advantages * jnp.clip(\n ratio, \n 1.0 - cliprange, \n 1.0 + cliprange, \n )\n pg_loss = jnp.sum(jnp.maximum(pg_loss1, pg_loss2) * mask) / n\n pg_clipfrac = jnp.sum((pg_loss2 > pg_loss1).astype(jnp.float32) * mask) / n\n\n loss = pg_loss + value_loss_coef * vf_loss\n\n logs = dict(\n losses=dict(\n total_loss=loss, \n policy_loss=pg_loss, \n value_loss=vf_loss, \n ), \n values=dict(\n get_tensor_stats(values, mask, n), \n values_error=jnp.sum(((values - old_returns) * mask) ** 2) / n, \n clipfrac=vf_clipfrac, \n ), \n old_values=get_tensor_stats(old_values, mask, n), \n returns=get_tensor_stats(old_returns, mask, n), \n policy=dict(\n approx_kl=approx_kl, \n clipfrac=pg_clipfrac, \n ), \n ratio=(ratio * mask).sum() / n, \n padding_percentage=n / mask.size, \n )\n\n return loss, logs" }, { "identifier": "FixedKLController", "path": "LLM_RL/algorithms/ppo/base_interface.py", "snippet": "class FixedKLController:\n \"\"\"Fixed KL controller.\"\"\"\n\n def __init__(self, kl_coef):\n self.value = kl_coef\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns updated KL coefficient, βₜ₊₁.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n pass" }, { "identifier": "AdaptiveKLController", "path": "LLM_RL/algorithms/ppo/base_interface.py", "snippet": "class AdaptiveKLController:\n \"\"\"Adaptive KL Controller as described in Ziegler et al. \"Fine-Tuning Language Models from Human Preferences\"\n Reference: Section 2.2 https://arxiv.org/pdf/1909.08593.pdf#page=2\n Source: https://github.com/openai/lm-human-preferences/blob/master/lm_human_preferences/train_policy.py\n \"\"\"\n\n def __init__(self, init_kl_coef: float, target: float, horizon: int):\n self.value = init_kl_coef\n self.target = target\n self.horizon = horizon\n\n def update(self, current: float, n_steps: int):\n \"\"\"Returns adaptively updated KL coefficient, βₜ₊₁.\n Arguments:\n current: The current KL value between the newest policy and the initial policy.\n \"\"\"\n proportional_error = np.clip(current / self.target - 1, -0.2, 0.2) # ϵₜ\n mult = 1 + proportional_error * n_steps / self.horizon\n self.value *= mult # βₜ₊₁" }, { "identifier": "text_env_eval", "path": "LLM_RL/environment.py", "snippet": "class Text:\nclass TextTrajectory:\nclass TextTrajectoryChain:\nclass TextEnv(ABC):\nclass BatchedTextEnv(ABC):\nclass TextEnvToBatchedTextEnv(BatchedTextEnv):\nclass BatchedTextEnvToTextEnv(TextEnv):\nclass TextPolicy(ABC):\nclass BatchedTextPolicy(ABC):\nclass TextPolicyToBatchedTextPolicy(BatchedTextPolicy):\nclass BatchedTextPolicyToTextPolicy(TextPolicy):\nclass InteractionTransition(NamedTuple):\nclass UserPolicy(TextPolicy): \nclass TokenHistory:\nclass TokenTrajectory:\nclass TokenTrajectoryChain:\n def __post_init__(self):\n def step(self, text_history: TextHistory) -> Tuple[TextHistory, float, bool]:\n def reset(self, seed: Optional[int]=None, options: Optional[Dict]=None) -> TextHistory:\n def close(self) -> None:\n def copy(self) -> TextEnv:\n def step(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[Tuple[TextHistory, float, bool]]]:\n def reset(self, seed: Optional[List[Optional[int]]]=None, options: Optional[List[Optional[Dict]]]=None) -> List[TextHistory]:\n def close(self) -> None:\n def copy(self) -> BatchedTextEnv:\n def __init__(self, env: TextEnv):\n def step(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[Tuple[TextHistory, float, bool]]]:\n def reset(self, seed: Optional[List[Optional[int]]]=None, options: Optional[List[Optional[Dict]]]=None) -> List[TextHistory]:\n def close(self) -> None:\n def __init__(self, env: BatchedTextEnv):\n def step(self, text_history: TextHistory) -> Tuple[TextHistory, float, bool]:\n def reset(self, seed: Optional[int]=None, options: Optional[Dict]=None) -> TextHistory:\n def close(self) -> None:\n def act(self, text_history: TextHistory) -> TextHistory:\n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n def __init__(self, policy: TextPolicy):\n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n def __init__(self, policy: BatchedTextPolicy):\n def act(self, text_history: TextHistory) -> TextHistory:\ndef interact_environment(\n env: Union[TextEnv, BatchedTextEnv], \n policy: Union[TextPolicy, BatchedTextPolicy], \n initial_text_history: Optional[Union[TextHistory, List[TextHistory]]]=None, \n env_seed: Union[Optional[int], Optional[List[Optional[int]]]]=None, \n env_options: Union[Optional[Dict], Optional[List[Optional[int]]]]=None, \n bsize: int=1, \n npad: int=0,\n) -> List[List[InteractionTransition]]:\ndef text_env_eval(\n env: Union[TextEnv, BatchedTextEnv], \n policy: Union[TextPolicy, BatchedTextPolicy], \n n_rollouts: int, \n initial_text_history: Optional[TextHistory]=None, # only allow one initial_text_history here\n seed_generator: Optional[Iterator[int]]=None, \n env_options: Optional[Dict]=None, # only allow one env_options here\n interaction_callback: Optional[Callable[[List[Tuple[TextHistory, TextHistory, TextHistory, float, bool]]], None]]=None, \n bsize: int=1, \n verbose: bool=True, \n) -> Tuple[List[List[InteractionTransition]], Dict[str, Any]]:\n def __init__(\n self, \n initial_str: str, \n postproc_print_f: Optional[Callable[[str], str]]=None, \n postproc_action_f: Optional[Callable[[str], str]]=None, \n ):\n def act(self, text_history: TextHistory) -> TextHistory:\n def __post_init__(self):\n def from_text_history(\n cls, \n text_history: TextHistory, \n tokenizer: PreTrainedTokenizer, \n token_process: Optional[Callable[[List[int]], List[int]]]=None, \n ) -> TokenHistory:\n def __post_init__(self):\n def from_text_trajectory(\n cls, \n text_trajectory: TextTrajectory, \n tokenizer: PreTrainedTokenizer, \n token_process: Optional[Callable[[List[int]], List[int]]]=None, \n ) -> TokenTrajectory:\n def __post_init__(self):\n def to_list(self) -> List[TokenTrajectory]:\n def from_text_trajectory_chain(\n cls, \n text_trajectory_chain: TextTrajectoryChain, \n tokenizer: PreTrainedTokenizer, \n token_process: Optional[Callable[[List[int]], List[int]]]=None, \n ) -> TokenTrajectoryChain:" }, { "identifier": "GPTJPPOPolicy", "path": "LLM_RL/algorithms/ppo/gptj/interface.py", "snippet": "class GPTJPPOPolicy(PPOPolicy):\n def __init__(\n self, \n inference: GPTJInference, \n prng_key: Optional[jax.random.KeyArray], \n generation_config: Optional[GenerationConfig]=None, \n blocking_strategy: BlockingStrategy=BlockingStrategy(padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=None), \n in_str_process: Optional[Callable[[str], str]]=None, \n out_str_process: Optional[Callable[[str], str]]=None, \n input_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n target_token_process: Optional[Callable[[List[int]], List[int]]]=None, \n trace: bool=True, \n ):\n self.inference = inference\n self.prng_key = prng_key\n self.generation_config = generation_config\n self.blocking_strategy = blocking_strategy\n self.in_str_process = in_str_process\n self.out_str_process = out_str_process\n self.input_token_process = input_token_process\n self.target_token_process = target_token_process\n if self.in_str_process is None:\n self.in_str_process = lambda x: x\n if self.out_str_process is None:\n self.out_str_process = lambda x: x\n self.trace = trace\n \n def act(self, text_history: List[Optional[TextHistory]], done: Optional[List[bool]]=None) -> List[Optional[TextHistory]]:\n if done is None:\n done = [False]*len(text_history)\n # force eos_token for done sequences\n eos_token = self.inference.tokenizer.eos_token\n if self.generation_config is not None and self.generation_config.eos_token_id is not None:\n eos_token = self.inference.tokenizer.decode(self.generation_config.eos_token_id)\n if eos_token is None:\n eos_token = self.inference.tokenizer.pad_token\n if eos_token is None:\n eos_token = ''\n \n raw_input_strs = [\n eos_token if d else self.in_str_process(text_history_to_str(item)) \\\n for item, d in zip(text_history, done)\n ]\n\n new_key = None\n if self.prng_key is not None:\n self.prng_key, new_key = jax.random.split(self.prng_key)\n model_outputs = self.inference.generate_from_str(\n input_strs=raw_input_strs, \n prng_key=new_key, \n blocking_strategy=self.blocking_strategy, \n generation_config=self.generation_config, \n input_token_process=self.input_token_process, \n target_token_process=self.target_token_process, \n trace=self.trace, \n )\n\n raw_output_strs = model_outputs.output_strs\n output_strs = [\n \"\" if d else self.out_str_process(strip_prompt_from_completion(raw_input_str, raw_output_str)) \\\n for raw_input_str, raw_output_str, d in zip(raw_input_strs, raw_output_strs, done)\n ]\n\n return [\n None if d else text_history_item+(Text(output_str, True),) \\\n for text_history_item, output_str, d in zip(text_history, output_strs, done)\n ]\n \n def set_params(self, policy_params: PyTree) -> None:\n self.inference = self.inference.replace(params=policy_params)" }, { "identifier": "GPTJPPOInference", "path": "LLM_RL/algorithms/ppo/gptj/interface.py", "snippet": "class GPTJPPOInference(PPOInference):\n @classmethod\n def load_inference(\n cls, \n initial_policy_params: Optional[PyTree], \n policy_params: PyTree, \n value_head_params: PyTree, \n initial_policy_model: Optional[FlaxPreTrainedModel], \n policy_model: FlaxPreTrainedModel, \n value_head_model: nn.Module, \n tokenizer: PreTrainedTokenizerBase, \n loss_fn: Optional[Callable], \n dp_shard_logits: bool=True, \n bc_loss_fn: Optional[Callable]=None, \n bc_loss_weight: float=0.0, \n ) -> GPTJPPOInference:\n mesh = policy_model.config.mesh\n assert mesh is not None\n assert mesh == value_head_model.config.mesh\n assert (initial_policy_params is None and initial_policy_model) is None or (initial_policy_params is not None and initial_policy_model is not None)\n has_initial_policy = initial_policy_params is not None\n initial_policy_params_partition_spec = None\n if has_initial_policy:\n initial_policy_params_partition_spec = match_partition_rules(initial_policy_model.config.get_partition_rules(), initial_policy_params)\n policy_params_partition_spec = match_partition_rules(policy_model.config.get_partition_rules(), policy_params)\n value_head_params_partition_spec = match_partition_rules(value_head_model.config.get_partition_rules(), value_head_params)\n\n @partial(\n pjit, \n static_argnames=('initial_policy_output_attentions', 'initial_policy_output_hidden_states', 'policy_output_attentions', 'train'), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), initial_policy_params_partition_spec) if has_initial_policy else NamedSharding(mesh, PS()), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), policy_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), value_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=PPOForwardOutput(\n initial_policy_raw_output=FlaxCausalLMOutput(\n logits=NamedSharding(mesh, PS((\"dp\", \"fsdp\"), None, None)) if dp_shard_logits else NamedSharding(mesh, PS()), \n hidden_states=NamedSharding(mesh, PS()), # assume no sharding for hidden states\n attentions=NamedSharding(mesh, PS()), # assume no sharding for attentions\n ) if has_initial_policy else NamedSharding(mesh, PS()), \n policy_raw_output=FlaxCausalLMOutput(\n logits=NamedSharding(mesh, PS((\"dp\", \"fsdp\"), None, None)) if dp_shard_logits else NamedSharding(mesh, PS()), \n hidden_states=NamedSharding(mesh, PS()), # assume no sharding for hidden states\n attentions=NamedSharding(mesh, PS()), # assume no sharding for attentions\n ), \n values=NamedSharding(mesh, PS()), \n ), \n )\n def _forward(\n initial_policy_params: Optional[PyTree], \n policy_params: PyTree, \n value_head_params: PyTree, \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray]=None, \n initial_policy_output_attentions: Optional[bool]=None, \n initial_policy_output_hidden_states: Optional[bool]=None, \n policy_output_attentions: Optional[bool]=None, # no policy_output_hidden_states option because this is required\n train: bool=False, \n ) -> PPOForwardOutput:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS((\"dp\", \"fsdp\"), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n \n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n initial_model_output = None\n if has_initial_policy:\n initial_model_output = initial_policy_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=initial_policy_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=initial_policy_output_hidden_states, \n output_attentions=initial_policy_output_attentions, \n )\n # trunc padded logits\n initial_model_output = initial_model_output.replace(logits=initial_model_output.logits.at[:, :, initial_policy_model.config.unpadded_vocab_size:].set(-float('inf')))\n model_output = policy_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=policy_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n output_attentions=policy_output_attentions, \n )\n # trunc padded logits\n model_output = model_output.replace(logits=model_output.logits.at[:, :, policy_model.config.unpadded_vocab_size:].set(-float('inf')))\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n values = value_head_model.apply(\n {'params': value_head_params}, \n model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if new_key is not None else None, \n )\n values = jnp.squeeze(values, axis=-1)\n\n # assert sharding on outputs\n if dp_shard_logits:\n if has_initial_policy:\n initial_model_output = initial_model_output.replace(logits=with_named_sharding_constraint(initial_model_output.logits, mesh, PS((\"dp\", \"fsdp\"), None, None)))\n model_output = model_output.replace(logits=with_named_sharding_constraint(model_output.logits, mesh, PS((\"dp\", \"fsdp\"), None, None)))\n return PPOForwardOutput(\n initial_policy_raw_output=initial_model_output, \n policy_raw_output=model_output, \n values=values, \n )\n \n @partial(\n pjit, \n static_argnames=('train',), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), policy_params_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), value_head_params_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=(\n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n )\n def _eval_loss(\n policy_params: PyTree, \n value_head_params: PyTree, \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n old_logprobs: jax.Array, \n old_values: jax.Array, \n old_advantages: jax.Array, \n old_returns: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray], \n bc_data_input_ids: Optional[jax.Array], \n bc_data_input_attention_mask: Optional[jax.Array], \n bc_data_input_position_ids: Optional[jax.Array], \n bc_data_input_training_mask: Optional[jax.Array], \n train: bool=False, \n ) -> Tuple[jax.Array, PyTree]:\n assert loss_fn is not None, \"loss_fn must be set to use eval_loss\"\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS((\"dp\", \"fsdp\"), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n should_take_action = with_named_sharding_constraint(should_take_action, mesh, PS((\"dp\", \"fsdp\"), None))\n old_logprobs = with_named_sharding_constraint(old_logprobs, mesh, PS((\"dp\", \"fsdp\"), None))\n old_values = with_named_sharding_constraint(old_values, mesh, PS((\"dp\", \"fsdp\"), None))\n old_advantages = with_named_sharding_constraint(old_advantages, mesh, PS((\"dp\", \"fsdp\"), None))\n old_returns = with_named_sharding_constraint(old_returns, mesh, PS((\"dp\", \"fsdp\"), None))\n if bc_data_input_ids is not None:\n bc_data_input_ids = with_named_sharding_constraint(bc_data_input_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n bc_data_input_attention_mask = with_named_sharding_constraint(bc_data_input_attention_mask, mesh, PS((\"dp\", \"fsdp\"), None))\n bc_data_input_position_ids = with_named_sharding_constraint(bc_data_input_position_ids, mesh, PS((\"dp\", \"fsdp\"), None))\n bc_data_input_training_mask = with_named_sharding_constraint(bc_data_input_training_mask, mesh, PS((\"dp\", \"fsdp\"), None))\n \n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n model_output = policy_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=policy_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n values = value_head_model.apply(\n {'params': value_head_params}, \n model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if new_key is not None else None, \n )[:, :-1]\n values = jnp.squeeze(values, axis=-1)\n\n logits = model_output.logits.astype(jnp.float32)\n logprobs = -softmax_cross_entropy_with_integer_labels(logits[:, :-1], input_ids[:, 1:])\n\n loss, info = loss_fn(\n attention_mask, \n logprobs, \n values, \n should_take_action, \n old_logprobs, \n old_values, \n old_advantages, \n old_returns, \n )\n\n if bc_loss_fn is not None:\n bc_loss, bc_info = bc_loss_fn(\n policy_model, \n policy_params, \n bc_data_input_ids, \n bc_data_input_attention_mask, \n bc_data_input_position_ids, \n bc_data_input_training_mask, \n prng_key, \n train, \n )\n\n info = {'ppo': info, 'bc': bc_info, 'total_loss': loss + bc_loss * bc_loss_weight}\n loss = loss + bc_loss * bc_loss_weight\n\n return loss, info\n \n return cls(\n initial_policy_params=initial_policy_params, \n policy_params=policy_params, \n value_head_params=value_head_params, \n initial_policy_model=initial_policy_model, \n policy_model=policy_model, \n value_head_model=value_head_model, \n tokenizer=tokenizer, \n _forward=_forward, \n _eval_loss=_eval_loss, \n )" }, { "identifier": "GPTJPPOTrain", "path": "LLM_RL/algorithms/ppo/gptj/interface.py", "snippet": "class GPTJPPOTrain(PPOTrain):\n @classmethod\n def load_train(\n cls, \n policy_train_state: TrainState, \n value_head_train_state: TrainState, \n policy_model: FlaxPreTrainedModel, \n value_head_model: nn.Module, \n tokenizer: PreTrainedTokenizerBase, \n loss_fn: Callable, \n bc_loss_fn: Optional[Callable]=None, \n bc_loss_weight: float=0.0, \n ) -> GPTJPPOTrain:\n mesh = policy_model.config.mesh\n assert mesh is not None\n assert mesh == value_head_model.config.mesh\n policy_train_state_partition_spec = match_partition_rules(policy_model.config.get_partition_rules(), policy_train_state)\n value_head_train_state_partition_spec = match_partition_rules(value_head_model.config.get_partition_rules(), value_head_train_state)\n\n @partial(\n pjit, \n donate_argnums=(0, 1), \n static_argnames=('train',), \n in_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), policy_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), value_head_train_state_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n out_shardings=(\n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), policy_train_state_partition_spec), \n jax.tree_util.tree_map(lambda ps: NamedSharding(mesh, ps), value_head_train_state_partition_spec), \n NamedSharding(mesh, PS()), \n NamedSharding(mesh, PS()), \n ), \n )\n def _step(\n policy_train_state: TrainState, \n value_head_train_state: TrainState, \n input_ids: jax.Array, \n attention_mask: jax.Array, \n position_ids: jax.Array, \n should_take_action: jax.Array, \n old_logprobs: jax.Array, \n old_values: jax.Array, \n old_advantages: jax.Array, \n old_returns: jax.Array, \n prng_key: Optional[jax.random.PRNGKeyArray], \n bc_data_input_ids: Optional[jax.Array], \n bc_data_input_attention_mask: Optional[jax.Array], \n bc_data_input_position_ids: Optional[jax.Array], \n bc_data_input_training_mask: Optional[jax.Array], \n train: bool=True, \n ) -> Tuple[TrainState, TrainState, jax.Array, PyTree]:\n # data parallel shard inputs\n input_ids = with_named_sharding_constraint(input_ids, mesh, PS(('dp', 'fsdp'), None))\n attention_mask = with_named_sharding_constraint(attention_mask, mesh, PS(('dp', 'fsdp'), None))\n position_ids = with_named_sharding_constraint(position_ids, mesh, PS(('dp', 'fsdp'), None))\n should_take_action = with_named_sharding_constraint(should_take_action, mesh, PS(('dp', 'fsdp'), None))\n old_logprobs = with_named_sharding_constraint(old_logprobs, mesh, PS(('dp', 'fsdp'), None))\n old_values = with_named_sharding_constraint(old_values, mesh, PS(('dp', 'fsdp'), None))\n old_advantages = with_named_sharding_constraint(old_advantages, mesh, PS(('dp', 'fsdp'), None))\n old_returns = with_named_sharding_constraint(old_returns, mesh, PS(('dp', 'fsdp'), None))\n if bc_loss_fn is not None:\n bc_data_input_ids = with_named_sharding_constraint(bc_data_input_ids, mesh, PS(('dp', 'fsdp'), None))\n bc_data_input_attention_mask = with_named_sharding_constraint(bc_data_input_attention_mask, mesh, PS(('dp', 'fsdp'), None))\n bc_data_input_position_ids = with_named_sharding_constraint(bc_data_input_position_ids, mesh, PS(('dp', 'fsdp'), None))\n bc_data_input_training_mask = with_named_sharding_constraint(bc_data_input_training_mask, mesh, PS(('dp', 'fsdp'), None))\n \n # define loss function\n def grad_loss(policy_params: PyTree, value_head_params: PyTree, prng_key: jax.random.PRNGKeyArray):\n \n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n model_output = policy_model(\n input_ids=input_ids, \n attention_mask=attention_mask, \n position_ids=position_ids, \n params=policy_params, \n dropout_rng=new_key, \n train=train, \n output_hidden_states=True, \n )\n\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n values = value_head_model.apply(\n {'params': value_head_params}, \n model_output.hidden_states[-1], \n train=train, \n rngs={'dropout': new_key} if new_key is not None else None, \n )[:, :-1]\n values = jnp.squeeze(values, axis=-1)\n\n logits = model_output.logits.astype(jnp.float32)\n logprobs = -softmax_cross_entropy_with_integer_labels(logits[:, :-1], input_ids[:, 1:])\n\n loss, info = loss_fn(\n attention_mask[:, 1:], \n logprobs, \n values, \n should_take_action, \n old_logprobs, \n old_values, \n old_advantages, \n old_returns, \n )\n return loss, info\n \n # define bc loss function\n def grad_bc_loss(policy_params: PyTree, prng_key: Optional[jax.random.PRNGKeyArray]):\n loss, info = bc_loss_fn(\n policy_model, \n policy_params, \n bc_data_input_ids, \n bc_data_input_attention_mask, \n bc_data_input_position_ids, \n bc_data_input_training_mask, \n prng_key, \n train, \n )\n return loss, info\n\n # take loss\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n (loss, info), (policy_grads, value_head_grads) = jax.value_and_grad(grad_loss, has_aux=True, argnums=(0, 1))(\n policy_train_state.params, \n value_head_train_state.params, \n prng_key, \n )\n\n # assert shard gradients\n policy_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n policy_grads, \n policy_train_state_partition_spec.params, \n )\n value_head_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n value_head_grads, \n value_head_train_state_partition_spec.params, \n )\n\n if bc_loss_fn is not None:\n new_key = None\n if prng_key is not None:\n prng_key, new_key = jax.random.split(prng_key)\n (bc_loss, bc_info), bc_grads = jax.value_and_grad(grad_bc_loss, has_aux=True, argnums=0)(\n policy_train_state.params, \n new_key, \n )\n\n info = {'ppo': info, 'bc': bc_info, 'total_loss': loss + bc_loss * bc_loss_weight}\n loss = loss + bc_loss * bc_loss_weight\n\n bc_grads = jax.tree_util.tree_map(\n lambda x, ps: with_named_sharding_constraint(x, mesh, ps), \n bc_grads, \n policy_train_state_partition_spec.params, \n )\n\n policy_grads = jax.tree_util.tree_map(\n lambda x, y: x + y * bc_loss_weight, \n policy_grads, \n bc_grads, \n )\n\n # update params and optim state\n policy_train_state = policy_train_state.apply_gradients(grads=policy_grads)\n value_head_train_state = value_head_train_state.apply_gradients(grads=value_head_grads)\n\n return policy_train_state, value_head_train_state, loss, info\n \n return cls(\n policy_train_state=policy_train_state, \n value_head_train_state=value_head_train_state, \n policy_model=policy_model, \n value_head_model=value_head_model, \n tokenizer=tokenizer, \n _step=_step, \n )" }, { "identifier": "load_train_state_from_config", "path": "LLM_RL/heads/linear_head.py", "snippet": "def load_train_state_from_config(\n model_config: LinearHeadConfig, \n model_dtype: Union[str, jnp.dtype], \n optim_getter: Callable[[PyTree], optax.GradientTransformation], \n mesh: Mesh, # should be shape (dp, mp)\n prng_key: jax.random.PRNGKeyArray, \n pad_to_output_dim: Optional[int]=None, \n params_dtype: Optional[Union[str, jnp.dtype]]=jnp.float32, \n) -> Tuple[TrainState, LinearHead]:\n \n model = LinearHead(model_config, dtype=model_dtype)\n model.config.mesh = mesh\n # shard params\n params = freeze(shard_params_from_config(model, prng_key, params_dtype=params_dtype))\n # pad outputs\n if pad_to_output_dim is not None:\n params = freeze(pad_outputs(unfreeze(params), model, pad_to_output_dim, dtype=params_dtype))\n # shard train_state\n train_state = shard_train_state_from_params(model, params, optim_getter(params))\n\n return train_state, model" }, { "identifier": "LinearHeadConfig", "path": "LLM_RL/heads/linear_head.py", "snippet": "class LinearHeadConfig(HeadConfig):\n def __init__(\n self, \n input_dim: int, \n output_dim: int, \n use_bias: bool=True, \n unpadded_output_dim: Optional[int]=None, \n initializer_range: Optional[int]=None, \n bias_init: Optional[float]=None, \n mesh: Optional[jax.sharding.Mesh]=None, \n ) -> None:\n self.input_dim = input_dim\n self.output_dim = output_dim\n self.use_bias = use_bias\n self.initializer_range = initializer_range\n self.bias_init = bias_init\n self.mesh = mesh\n self.unpadded_output_dim = unpadded_output_dim\n if self.unpadded_output_dim is None:\n self.unpadded_output_dim = self.output_dim\n super().__init__()\n \n @staticmethod\n def get_partition_rules():\n return [\n (re.escape(\"['dense']['kernel']\"), PS()), \n (re.escape(\"['dense']['bias']\"), PS()), \n ]\n \n def to_dict(self) -> Dict[str, Any]:\n if self.mesh is None:\n return super().to_dict()\n else:\n new_conf = LinearHeadConfig(**self.__dict__)\n new_conf.mesh = None\n return new_conf.to_dict()" }, { "identifier": "PPODataset", "path": "LLM_RL/algorithms/ppo/data.py", "snippet": "class PPODataset(Dataset):\n def __init__(\n self, \n input_ids: np.ndarray, # [b, t]\n should_take_action: np.ndarray, # [b, t-1]\n old_logprobs: np.ndarray, # [b, t-1]\n old_values: np.ndarray, # [b, t-1]\n old_advantages: np.ndarray, # [b, t-1]\n old_returns: np.ndarray, # [b, t-1]\n ):\n assert input_ids.shape[1] == (should_take_action.shape[1]+1)\n assert input_ids.shape[1] == (old_logprobs.shape[1]+1)\n assert input_ids.shape[1] == (old_values.shape[1]+1)\n assert input_ids.shape[1] == (old_advantages.shape[1]+1)\n assert input_ids.shape[1] == (old_returns.shape[1]+1)\n\n assert input_ids.shape[0] == should_take_action.shape[0]\n assert input_ids.shape[0] == old_logprobs.shape[0]\n assert input_ids.shape[0] == old_values.shape[0]\n assert input_ids.shape[0] == old_advantages.shape[0]\n assert input_ids.shape[0] == old_returns.shape[0]\n\n self.input_ids = input_ids\n self.should_take_action = should_take_action\n self.old_logprobs = old_logprobs\n self.old_values = old_values\n self.old_advantages = old_advantages\n self.old_returns = old_returns\n \n def __getitem__(self, index):\n return {\n 'input_ids': jnp.asarray(self.input_ids[index], dtype=jnp.int32), \n 'should_take_action': jnp.asarray(self.should_take_action[index], dtype=jnp.bool_), \n 'old_logprobs': jnp.asarray(self.old_logprobs[index], dtype=jnp.float32), \n 'old_values': jnp.asarray(self.old_values[index], dtype=jnp.float32), \n 'old_advantages': jnp.asarray(self.old_advantages[index], dtype=jnp.float32), \n 'old_returns': jnp.asarray(self.old_returns[index], dtype=jnp.float32), \n }\n \n def __len__(self):\n return self.input_ids.shape[0]\n \n @classmethod\n def from_ppo_data_list(\n cls, \n ppo_data_list: List[PPOData], \n tokenizer: PreTrainedTokenizerBase, \n blocking_strategy: BlockingStrategy, \n ) -> PPODataset:\n \n data = PPOData.block(ppo_data_list, blocking_strategy, tokenizer)\n\n return cls(**data)" }, { "identifier": "get_tensor_stats_np", "path": "LLM_RL/utils.py", "snippet": "def get_tensor_stats_np(xs: np.ndarray, mask: np.ndarray, n: int):\n \"\"\"get stats about a tensor, used for logging\"\"\"\n mean = (xs * mask).sum() / n\n mask = mask.astype(np.bool_)\n return dict(\n mean=mean, \n min=np.min(xs, where=mask, initial=float('inf')), \n max=np.max(xs, where=mask, initial=float('-inf')), \n std=np.std(xs, where=mask), \n )" }, { "identifier": "ReformatWordleEnvironment", "path": "llm_rl_scripts/wordle/env/env.py", "snippet": "class ReformatWordleEnvironment(TextEnv):\n def __init__(self, env: WordleEnvironment):\n self.env = env\n \n def step(self, text_history: TextHistory) -> Tuple[TextHistory, float, bool]:\n text_history, r, done = self.env.step(deformat_history(text_history))\n return reformat_history(text_history), r, done\n\n def reset(self, seed: Optional[int] = None, options: Optional[Dict] = None) -> TextHistory:\n return reformat_history(self.env.reset(seed=seed, options=options))" }, { "identifier": "WordleEnvironment", "path": "llm_rl_scripts/wordle/env/env.py", "snippet": "class WordleEnvironment(TextEnv):\n def __init__(self, vocab: Vocabulary, require_words_in_vocab: bool = True, bad_word_reward: float = -1.0):\n self.vocab = vocab\n self.require_words_in_vocab = require_words_in_vocab\n self.bad_word_reward = bad_word_reward\n self.reset()\n \n def step(self, text_history: TextHistory) -> Tuple[TextHistory, float, bool]:\n assert text_history[-1].is_action\n self.state, r, t = self.state.next(text_history[-1].text)\n transition = Text(self.state.transition_sequence()[-1], False)\n return text_history+(transition,), r, t\n \n def reset(self, seed: Optional[int] = None, options: Optional[Dict] = None) -> TextHistory:\n self.vocab.rng = random.Random(seed)\n self.state = WordleGame.initialize(self.vocab, require_words_in_vocab=self.require_words_in_vocab, bad_word_reward=self.bad_word_reward)\n return tuple()" }, { "identifier": "Vocabulary", "path": "llm_rl_scripts/wordle/env/game.py", "snippet": "class Vocabulary:\n def __init__(\n self, \n all_vocab: List[str], \n wordle_state: Optional[WordleState], \n cache: Optional[Cache]=None, \n fill_cache: bool=True, \n rng: Optional[random.Random]=None, \n ):\n # assert all([len(w) == N_CHARS for w in filtered_vocab])\n self.fill_cache = fill_cache\n self.cache = cache\n if self.cache is None:\n self.cache = Cache()\n self.all_vocab = all_vocab\n self.all_vocab_set = set(self.all_vocab)\n if wordle_state is not None:\n if wordle_state in self.cache:\n self.filtered_vocab = self.cache[wordle_state]\n else:\n self.filtered_vocab = list(filter(lambda x: wordle_state.word_in_state(x), self.all_vocab))\n if self.fill_cache:\n self.cache[wordle_state] = self.filtered_vocab\n else:\n self.filtered_vocab = list(self.all_vocab)\n if rng is None:\n rng = random.Random()\n self.rng = rng\n \n @classmethod\n def from_file(cls, vocab_file: str, fill_cache: bool=True, rng: Optional[random.Random]=None):\n vocab = []\n for item in open(vocab_file, 'r'):\n item = item.strip()\n if len(item) == N_CHARS:\n vocab.append(item)\n return cls(vocab, None, None, fill_cache, rng)\n \n def filtered_vocab_size(self):\n return len(self.filtered_vocab)\n \n def all_vocab_size(self):\n return len(self.all_vocab)\n \n def get_random_word_filtered(self):\n return self.rng.choice(self.filtered_vocab)\n \n def get_random_word_all(self):\n return self.rng.choice(self.all_vocab)\n \n def update_vocab(self, wordle_state: WordleState):\n return Vocabulary(self.all_vocab, wordle_state, cache=self.cache, fill_cache=self.fill_cache, rng=self.rng)\n \n def __contains__(self, item: str) -> bool:\n return item in self.all_vocab_set\n\n def __str__(self) -> str:\n return '\\n'.join(self.filtered_vocab)" } ]
from typing import Optional from JaxSeq.bucket_manager import open_with_bucket as open from transformers import AutoTokenizer from JaxSeq.utils import jsonl_stream, convert_path, load_mesh, setup_experiment_save from JaxSeq.utils import BlockingStrategy, Padding, Truncation, get_weight_decay_mask, create_path, get_enabled_save_path, MapIterable, FileOpenIterable from JaxSeq.models.gptj.interface import GPTJInference from JaxSeq.models.gptj.load import load_train_state, ModelLoadMode from LLM_RL.algorithms.ppo.train import train_loop from LLM_RL.algorithms.ppo.base_interface import ppo_loss_fn, FixedKLController, AdaptiveKLController from transformers.generation import GenerationConfig from jaxtyping import PyTree from LLM_RL.environment import text_env_eval, TextTrajectory, TextTrajectoryChain, TokenTrajectory, text_history_to_str from LLM_RL.algorithms.ppo.gptj.interface import GPTJPPOPolicy, GPTJPPOInference, GPTJPPOTrain from LLM_RL.heads.linear_head import load_train_state_from_config as load_head_train_state_from_config from LLM_RL.heads.linear_head import LinearHeadConfig from JaxSeq.shard_model import shard_params_from_params from LLM_RL.algorithms.ppo.data import PPODataset from LLM_RL.utils import get_tensor_stats_np from functools import partial from JaxSeq.logs import label_logs, log, pull_logs from JaxSeq.utils import multihost_device_get from JaxSeq.data import MaskIterableDataset from llm_rl_scripts.wordle.env.env import ReformatWordleEnvironment, WordleEnvironment from llm_rl_scripts.wordle.env.game import Vocabulary from dataclasses import replace from JaxSeq.models.gptj.interface import loss_fn_mask import tyro import jax import jax.numpy as jnp import os import optax import pickle as pkl import re import numpy as np import json
16,580
force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) policy_model.config.gradient_checkpointing = gradient_checkpointing policy_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy policy_model.config.resid_pdrop = 0.0 policy_model.config.embd_pdrop = 0.0 policy_model.config.attn_pdrop = 0.0 with jax.default_device(jax.devices('cpu')[0]): initital_policy_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), policy_train_state.params, ) initital_policy_params = shard_params_from_params( model=policy_model, params=initital_policy_params, ) loop_state = dict() if should_restore_loop_state and (model_load_mode in {ModelLoadMode.TRAIN_STATE, ModelLoadMode.TRAIN_STATE_PARAMS, ModelLoadMode.PARAMS}): with open(os.path.join(convert_path(model_load_path), 'loop_state.pkl'), 'rb') as f: loop_state = pkl.load(f) policy_inference = GPTJInference.load_inference( params=policy_train_state.params, model=policy_model, tokenizer=tokenizer, ) vocab = Vocabulary.from_file( vocab_file=vocab_file, fill_cache=False, ) env = ReformatWordleEnvironment(WordleEnvironment(vocab, require_words_in_vocab=True, bad_word_reward=-10.0)) policy_prng = jax.random.PRNGKey(0) policy = GPTJPPOPolicy( inference=policy_inference, prng_key=policy_prng, generation_config=GenerationConfig( do_sample=policy_do_sample, num_beams=policy_num_beams, temperature=policy_temperature, top_p=policy_top_p, top_k=policy_top_k, eos_token_id=tokenizer.encode('\n')[0], pad_token_id=tokenizer.pad_token_id, max_new_tokens=max_output_length, ), blocking_strategy=BlockingStrategy( padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=max_input_length, ), out_str_process=lambda x: x.removesuffix('\n')+'\n', ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) optim = optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ) if grad_accum_steps is not None: optim = optax.MultiSteps( optim, every_k_schedule=grad_accum_steps, ) return optim head_prng_key = jax.random.PRNGKey(3) value_head_train_state, value_head = load_head_train_state_from_config( model_config=LinearHeadConfig( input_dim=policy_model.config.n_embd, output_dim=1, use_bias=True, initializer_range=0.0, bias_init=-4.1, ), model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32, optim_getter=value_head_optim_getter, mesh=mesh, prng_key=head_prng_key, pad_to_output_dim=None, params_dtype=jnp.float32, ) loss_f = partial(ppo_loss_fn, cliprange_value=cliprange_value, cliprange=cliprange, value_loss_coef=value_loss_coef) ppo_inference = GPTJPPOInference.load_inference( initial_policy_params=initital_policy_params, policy_params=policy_train_state.params, value_head_params=value_head_train_state.params, initial_policy_model=policy_model, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, bc_loss_fn=loss_fn_mask, bc_loss_weight=bc_loss_weight, ) ppo_trainer = GPTJPPOTrain.load_train( policy_train_state=policy_train_state, value_head_train_state=value_head_train_state, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, bc_loss_fn=loss_fn_mask, bc_loss_weight=bc_loss_weight, ) if use_adaptive_kl:
def main( model_load_mode: ModelLoadMode, model_load_path: str, bc_data_path: str, vocab_file: str, /, # Mark the end of positional arguments. exp_name: Optional[str]=None, outputs_path: Optional[str]=None, data_mesh_shape: int=1, fsdp_mesh_shape: int=1, model_mesh_shape: int=-1, use_wandb: bool=False, wandb_project: Optional[str]=None, n_rounds: int=1, epochs: int=1, max_steps: Optional[int]=None, lr: float=1e-5, weight_decay: float=0.0, train_bsize: int=32, train_bc_bsize: Optional[int]=None, grad_accum_steps: Optional[int]=None, rollout_bsize: int=32, n_rollouts: int=128, ppo_data_bsize: int=32, bf16_activations: bool=False, gradient_checkpointing: bool=False, gradient_checkpointing_policy: str='nothing_saveable', max_input_length: int=512, max_output_length: int=512, log_every: int=256, eval_every_steps: Optional[int]=None, eval_every_epochs: Optional[int]=None, eval_every_rounds: Optional[int]=None, eval_at_beginning: bool=False, eval_at_end: bool=True, save_every_steps: Optional[int]=None, save_every_epochs: Optional[int]=None, save_every_rounds: Optional[int]=None, save_at_beginning: bool=False, save_at_end: bool=False, save_best: bool=True, max_checkpoints: Optional[int]=None, save_train_state: bool=True, save_ppo_dataset: bool=True, save_bf16: bool=True, policy_do_sample: bool=True, policy_num_beams: int=1, policy_temperature: Optional[float]=None, policy_top_p: Optional[float]=None, policy_top_k: Optional[int]=None, gamma: float=1.0, lam: float=0.95, use_advantage_whitening: bool=True, init_kl_coef: float=0.001, kl_target: Optional[float]=None, kl_horizon: Optional[int]=None, cliprange_value: float=0.2, cliprange: float=0.2, value_loss_coef: float=1.0, bc_loss_weight: float=1.0, force_pad_embeddings: bool=False, should_restore_loop_state: bool=False, ): input_args = locals() print(input_args) use_adaptive_kl = (kl_target is not None and kl_horizon is not None) if not use_adaptive_kl: assert kl_target is None and kl_horizon is None tokenizer = AutoTokenizer.from_pretrained('EleutherAI/gpt-j-6B') tokenizer.add_special_tokens({'pad_token': '<|pad|>'}) mesh = load_mesh((data_mesh_shape, fsdp_mesh_shape, model_mesh_shape), ('dp', 'fsdp', 'mp')) is_main_process = jax.process_index() == 0 print(f"Mesh: {mesh}") print(f"Is main process: {is_main_process}") # load data bc_data = MaskIterableDataset.blocked_from_str_segments_iterable( MapIterable(lambda x: [(tokenizer.bos_token, 0.0)]+x['sequence']+[(tokenizer.eos_token, 1.0)], FileOpenIterable(convert_path(bc_data_path), 'r', pipe=jsonl_stream)), tokenizer, blocking_strategy=BlockingStrategy( padding=Padding.RIGHT, truncation=Truncation.LEFT, max_length=max_input_length+max_output_length, ), ) def policy_optim_getter(params: PyTree): mask = get_weight_decay_mask(( "".join([r"\['ln_[0-9]+'\]", re.escape("['bias']")]), "".join([r"\['ln_[0-9]+'\]", re.escape("['scale']")]), re.escape("['ln_f']['bias']"), re.escape("['ln_f']['scale']"), "bias", ))(params) optim = optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ) if grad_accum_steps is not None: optim = optax.MultiSteps( optim, every_k_schedule=grad_accum_steps, ) return optim model_prng_key = jax.random.PRNGKey(2) policy_train_state, policy_model = load_train_state( model_load_mode=model_load_mode, model_load_path=convert_path(model_load_path) if model_load_mode != ModelLoadMode.HF else model_load_path, model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32, optim_getter=policy_optim_getter, tokenizer=tokenizer, mesh=mesh, prng_key=model_prng_key, force_pad_embeddings=force_pad_embeddings, params_dtype=jnp.float32, ) policy_model.config.gradient_checkpointing = gradient_checkpointing policy_model.config.gradient_checkpointing_policy = gradient_checkpointing_policy policy_model.config.resid_pdrop = 0.0 policy_model.config.embd_pdrop = 0.0 policy_model.config.attn_pdrop = 0.0 with jax.default_device(jax.devices('cpu')[0]): initital_policy_params = jax.tree_util.tree_map( lambda x: multihost_device_get(x, mesh=mesh).copy(), policy_train_state.params, ) initital_policy_params = shard_params_from_params( model=policy_model, params=initital_policy_params, ) loop_state = dict() if should_restore_loop_state and (model_load_mode in {ModelLoadMode.TRAIN_STATE, ModelLoadMode.TRAIN_STATE_PARAMS, ModelLoadMode.PARAMS}): with open(os.path.join(convert_path(model_load_path), 'loop_state.pkl'), 'rb') as f: loop_state = pkl.load(f) policy_inference = GPTJInference.load_inference( params=policy_train_state.params, model=policy_model, tokenizer=tokenizer, ) vocab = Vocabulary.from_file( vocab_file=vocab_file, fill_cache=False, ) env = ReformatWordleEnvironment(WordleEnvironment(vocab, require_words_in_vocab=True, bad_word_reward=-10.0)) policy_prng = jax.random.PRNGKey(0) policy = GPTJPPOPolicy( inference=policy_inference, prng_key=policy_prng, generation_config=GenerationConfig( do_sample=policy_do_sample, num_beams=policy_num_beams, temperature=policy_temperature, top_p=policy_top_p, top_k=policy_top_k, eos_token_id=tokenizer.encode('\n')[0], pad_token_id=tokenizer.pad_token_id, max_new_tokens=max_output_length, ), blocking_strategy=BlockingStrategy( padding=Padding.LEFT, truncation=Truncation.LEFT, max_length=max_input_length, ), out_str_process=lambda x: x.removesuffix('\n')+'\n', ) def value_head_optim_getter(params: PyTree): mask = get_weight_decay_mask(("bias",))(params) optim = optax.adamw( learning_rate=lr, b1=0.9, b2=0.95, eps=1e-8, weight_decay=weight_decay, mask=mask, ) if grad_accum_steps is not None: optim = optax.MultiSteps( optim, every_k_schedule=grad_accum_steps, ) return optim head_prng_key = jax.random.PRNGKey(3) value_head_train_state, value_head = load_head_train_state_from_config( model_config=LinearHeadConfig( input_dim=policy_model.config.n_embd, output_dim=1, use_bias=True, initializer_range=0.0, bias_init=-4.1, ), model_dtype=jnp.bfloat16 if bf16_activations else jnp.float32, optim_getter=value_head_optim_getter, mesh=mesh, prng_key=head_prng_key, pad_to_output_dim=None, params_dtype=jnp.float32, ) loss_f = partial(ppo_loss_fn, cliprange_value=cliprange_value, cliprange=cliprange, value_loss_coef=value_loss_coef) ppo_inference = GPTJPPOInference.load_inference( initial_policy_params=initital_policy_params, policy_params=policy_train_state.params, value_head_params=value_head_train_state.params, initial_policy_model=policy_model, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, bc_loss_fn=loss_fn_mask, bc_loss_weight=bc_loss_weight, ) ppo_trainer = GPTJPPOTrain.load_train( policy_train_state=policy_train_state, value_head_train_state=value_head_train_state, policy_model=policy_model, value_head_model=value_head, tokenizer=tokenizer, loss_fn=loss_f, bc_loss_fn=loss_fn_mask, bc_loss_weight=bc_loss_weight, ) if use_adaptive_kl:
kl_controller = AdaptiveKLController(init_kl_coef=init_kl_coef, target=kl_target, horizon=kl_horizon)
3
2023-11-21 00:16:42+00:00
24k
jzmzhong/Automatic-Prosody-Annotator-with-SSWP-CLAP
src/clap_module/conformer/encoder.py
[ { "identifier": "ConvolutionModule", "path": "src/clap_module/conformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\r\n \"\"\"ConvolutionModule in Conformer model.\r\n\r\n Args:\r\n channels (int): The number of channels of conv layers.\r\n kernel_size (int): Kernerl size of conv layers.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, channels, kernel_size, activation=nn.ReLU(), bias=True):\r\n \"\"\"Construct an ConvolutionModule object.\r\n \"\"\"\r\n super(ConvolutionModule, self).__init__()\r\n # kernerl_size should be a odd number for 'SAME' padding\r\n assert (kernel_size - 1) % 2 == 0\r\n\r\n self.pointwise_conv1 = nn.Conv1d(\r\n channels,\r\n 2 * channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.depthwise_conv = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n groups=channels,\r\n bias=bias,\r\n )\r\n self.norm = nn.BatchNorm1d(channels)\r\n self.pointwise_conv2 = nn.Conv1d(\r\n channels,\r\n channels,\r\n kernel_size=1,\r\n stride=1,\r\n padding=0,\r\n bias=bias,\r\n )\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Compute convolution module.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, channels).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, channels).\r\n\r\n \"\"\"\r\n # exchange the temporal dimension and the feature dimension\r\n x = x.transpose(1, 2)\r\n\r\n # GLU mechanism\r\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\r\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\r\n\r\n # 1D Depthwise Conv\r\n x = self.depthwise_conv(x)\r\n x = self.activation(self.norm(x))\r\n\r\n x = self.pointwise_conv2(x)\r\n\r\n return x.transpose(1, 2)\r" }, { "identifier": "EncoderLayer", "path": "src/clap_module/conformer/encoder_layer.py", "snippet": "class EncoderLayer(nn.Module):\r\n \"\"\"Encoder layer module.\r\n\r\n Args:\r\n size (int): Input dimension.\r\n self_attn (torch.nn.Module): Self-attention module instance.\r\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention` instance\r\n can be used as the argument.\r\n feed_forward (torch.nn.Module): Feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n feed_forward_macaron (torch.nn.Module): Additional feed-forward module instance.\r\n `PositionwiseFeedForward`, `MultiLayeredConv1d`, or `Conv1dLinear` instance\r\n can be used as the argument.\r\n conv_module (torch.nn.Module): Convolution module instance.\r\n `ConvlutionModule` instance can be used as the argument.\r\n dropout_rate (float): Dropout rate.\r\n normalize_before (bool): Whether to use layer_norm before the first block.\r\n concat_after (bool): Whether to concat attention layer's input and output.\r\n if True, additional linear will be applied.\r\n i.e. x -> x + linear(concat(x, att(x)))\r\n if False, no additional linear will be applied. i.e. x -> x + att(x)\r\n stochastic_depth_rate (float): Proability to skip this layer.\r\n During training, the layer may skip residual computation and return input\r\n as-is with given probability.\r\n\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n size,\r\n self_attn,\r\n feed_forward,\r\n feed_forward_macaron,\r\n conv_module,\r\n dropout_rate,\r\n normalize_before=True,\r\n concat_after=False,\r\n stochastic_depth_rate=0.0,\r\n ):\r\n \"\"\"Construct an EncoderLayer object.\"\"\"\r\n super(EncoderLayer, self).__init__()\r\n self.self_attn = self_attn\r\n self.feed_forward = feed_forward\r\n self.feed_forward_macaron = feed_forward_macaron\r\n self.conv_module = conv_module\r\n self.norm_ff = LayerNorm(size) # for the FNN module\r\n self.norm_mha = LayerNorm(size) # for the MHA module\r\n if feed_forward_macaron is not None:\r\n self.norm_ff_macaron = LayerNorm(size)\r\n self.ff_scale = 0.5\r\n else:\r\n self.ff_scale = 1.0\r\n if self.conv_module is not None:\r\n self.norm_conv = LayerNorm(size) # for the CNN module\r\n self.norm_final = LayerNorm(size) # for the final output of the block\r\n self.dropout = nn.Dropout(dropout_rate)\r\n self.size = size\r\n self.normalize_before = normalize_before\r\n self.concat_after = concat_after\r\n if self.concat_after:\r\n self.concat_linear = nn.Linear(size + size, size)\r\n self.stochastic_depth_rate = stochastic_depth_rate\r\n\r\n def forward(self, x_input, mask, cache=None):\r\n \"\"\"Compute encoded features.\r\n\r\n Args:\r\n x_input (Union[Tuple, torch.Tensor]): Input tensor w/ or w/o pos emb.\r\n - w/ pos emb: Tuple of tensors [(#batch, time, size), (1, time, size)].\r\n - w/o pos emb: Tensor (#batch, time, size).\r\n mask (torch.Tensor): Mask tensor for the input (#batch, 1, time).\r\n cache (torch.Tensor): Cache tensor of the input (#batch, time - 1, size).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time, size).\r\n torch.Tensor: Mask tensor (#batch, 1, time).\r\n\r\n \"\"\"\r\n if isinstance(x_input, tuple):\r\n x, pos_emb = x_input[0], x_input[1]\r\n else:\r\n x, pos_emb = x_input, None\r\n\r\n skip_layer = False\r\n # with stochastic depth, residual connection `x + f(x)` becomes\r\n # `x <- x + 1 / (1 - p) * f(x)` at training time.\r\n stoch_layer_coeff = 1.0\r\n if self.training and self.stochastic_depth_rate > 0:\r\n skip_layer = torch.rand(1).item() < self.stochastic_depth_rate\r\n stoch_layer_coeff = 1.0 / (1 - self.stochastic_depth_rate)\r\n\r\n if skip_layer:\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n return x, mask\r\n\r\n # whether to use macaron style\r\n if self.feed_forward_macaron is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward_macaron(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff_macaron(x)\r\n\r\n # convolution module\r\n \"\"\"\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n \"\"\"\r\n\r\n # multi-headed self-attention module\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n if cache is None:\r\n x_q = x\r\n else:\r\n assert cache.shape == (x.shape[0], x.shape[1] - 1, self.size)\r\n x_q = x[:, -1:, :]\r\n residual = residual[:, -1:, :]\r\n mask = None if mask is None else mask[:, -1:, :]\r\n\r\n if pos_emb is not None:\r\n x_att = self.self_attn(x_q, x, x, pos_emb, mask)\r\n else:\r\n x_att = self.self_attn(x_q, x, x, mask)\r\n\r\n if self.concat_after:\r\n x_concat = torch.cat((x, x_att), dim=-1)\r\n x = residual + stoch_layer_coeff * self.concat_linear(x_concat)\r\n else:\r\n x = residual + stoch_layer_coeff * self.dropout(x_att)\r\n if not self.normalize_before:\r\n x = self.norm_mha(x)\r\n\r\n # convolution module\r\n if self.conv_module is not None:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_conv(x)\r\n x = residual + stoch_layer_coeff * self.dropout(self.conv_module(x))\r\n if not self.normalize_before:\r\n x = self.norm_conv(x)\r\n\r\n # feed forward module\r\n if self.feed_forward:\r\n residual = x\r\n if self.normalize_before:\r\n x = self.norm_ff(x)\r\n x = residual + stoch_layer_coeff * self.ff_scale * self.dropout(\r\n self.feed_forward(x)\r\n )\r\n if not self.normalize_before:\r\n x = self.norm_ff(x)\r\n else:\r\n raise ValueError(\"not exit\")\r\n\r\n if self.conv_module is not None:\r\n x = self.norm_final(x)\r\n\r\n if cache is not None:\r\n x = torch.cat([cache, x], dim=1)\r\n\r\n if pos_emb is not None:\r\n return (x, pos_emb), mask\r\n\r\n return x, mask\r" }, { "identifier": "get_activation", "path": "src/clap_module/conformer/modules.py", "snippet": "def get_activation(act):\r\n \"\"\"Return activation function.\r\n \"\"\"\r\n # Lazy load to avoid unused import\r\n\r\n activation_funcs = {\r\n \"hardtanh\": torch.nn.Hardtanh,\r\n \"tanh\": torch.nn.Tanh,\r\n \"relu\": torch.nn.ReLU,\r\n \"selu\": torch.nn.SELU,\r\n \"swish\": Swish,\r\n }\r\n\r\n return activation_funcs[act]()\r" }, { "identifier": "VGG2L", "path": "src/clap_module/conformer/modules.py", "snippet": "class VGG2L(torch.nn.Module):\r\n \"\"\"VGG2L module for custom encoder.\r\n\r\n Args:\r\n idim: Input dimension.\r\n odim: Output dimension.\r\n pos_enc: Positional encoding class.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim: int, odim: int, pos_enc: torch.nn.Module = None):\r\n \"\"\"Construct a VGG2L object.\"\"\"\r\n super().__init__()\r\n\r\n self.vgg2l = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(64, 64, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((3, 2)),\r\n torch.nn.Conv2d(64, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(128, 128, 3, stride=1, padding=1),\r\n torch.nn.ReLU(),\r\n torch.nn.MaxPool2d((2, 2)),\r\n )\r\n\r\n if pos_enc is not None:\r\n self.output = torch.nn.Sequential(\r\n torch.nn.Linear(128 * ((idim // 2) // 2), odim), pos_enc\r\n )\r\n else:\r\n self.output = torch.nn.Linear(128 * ((idim // 2) // 2), odim)\r\n\r\n def forward(\r\n self, feats: torch.Tensor, feats_mask: torch.Tensor\r\n ) -> Union[\r\n Tuple[torch.Tensor, torch.Tensor],\r\n Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor],\r\n ]:\r\n \"\"\"Forward VGG2L bottleneck.\r\n\r\n Args:\r\n feats: Feature sequences. (B, F, D_feats)\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_output: VGG output sequences.\r\n (B, sub(F), D_out) or ((B, sub(F), D_out), (B, sub(F), D_att))\r\n vgg_mask: Mask of VGG output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n feats = feats.unsqueeze(1)\r\n vgg_output = self.vgg2l(feats)\r\n\r\n b, c, t, f = vgg_output.size()\r\n\r\n vgg_output = self.output(\r\n vgg_output.transpose(1, 2).contiguous().view(b, t, c * f)\r\n )\r\n\r\n if feats_mask is not None:\r\n vgg_mask = self.create_new_mask(feats_mask)\r\n else:\r\n vgg_mask = feats_mask\r\n\r\n return vgg_output, vgg_mask\r\n\r\n def create_new_mask(self, feats_mask: torch.Tensor) -> torch.Tensor:\r\n \"\"\"Create a subsampled mask of feature sequences.\r\n\r\n Args:\r\n feats_mask: Mask of feature sequences. (B, 1, F)\r\n\r\n Returns:\r\n vgg_mask: Mask of VGG2L output sequences. (B, 1, sub(F))\r\n\r\n \"\"\"\r\n vgg1_t_len = feats_mask.size(2) - (feats_mask.size(2) % 3)\r\n vgg_mask = feats_mask[:, :, :vgg1_t_len][:, :, ::3]\r\n\r\n vgg2_t_len = vgg_mask.size(2) - (vgg_mask.size(2) % 2)\r\n vgg_mask = vgg_mask[:, :, :vgg2_t_len][:, :, ::2]\r\n\r\n return vgg_mask\r" }, { "identifier": "LegacyRelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class LegacyRelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)))\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor (#batch, time1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, time1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "MultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class MultiHeadedAttention(nn.Module):\r\n \"\"\"Multi-Head Attention layer.\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate):\r\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\r\n super(MultiHeadedAttention, self).__init__()\r\n assert n_feat % n_head == 0\r\n # We assume d_v always equals d_k\r\n self.d_k = n_feat // n_head\r\n self.h = n_head\r\n self.linear_q = nn.Linear(n_feat, n_feat)\r\n self.linear_k = nn.Linear(n_feat, n_feat)\r\n self.linear_v = nn.Linear(n_feat, n_feat)\r\n self.linear_out = nn.Linear(n_feat, n_feat)\r\n self.attn = None\r\n self.dropout = nn.Dropout(p=dropout_rate)\r\n\r\n def forward_qkv(self, query, key, value):\r\n \"\"\"Transform query, key and value.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n\r\n Returns:\r\n torch.Tensor: Transformed query tensor (#batch, n_head, time1, d_k).\r\n torch.Tensor: Transformed key tensor (#batch, n_head, time2, d_k).\r\n torch.Tensor: Transformed value tensor (#batch, n_head, time2, d_k).\r\n\r\n \"\"\"\r\n n_batch = query.size(0)\r\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\r\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\r\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\r\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\r\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\r\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\r\n\r\n return q, k, v\r\n\r\n def forward_attention(self, value, scores, mask):\r\n \"\"\"Compute attention context vector.\r\n\r\n Args:\r\n value (torch.Tensor): Transformed value (#batch, n_head, time2, d_k).\r\n scores (torch.Tensor): Attention score (#batch, n_head, time1, time2).\r\n mask (torch.Tensor): Mask (#batch, 1, time2) or (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Transformed value (#batch, time1, d_model)\r\n weighted by the attention score (#batch, time1, time2).\r\n\r\n \"\"\"\r\n n_batch = value.size(0)\r\n if mask is not None:\r\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\r\n min_value = torch.finfo(scores.dtype).min\r\n scores = scores.masked_fill(mask, min_value)\r\n self.attn = torch.softmax(scores, dim=-1).masked_fill(\r\n mask, 0.0\r\n ) # (batch, head, time1, time2)\r\n else:\r\n self.attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\r\n\r\n p_attn = self.dropout(self.attn)\r\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\r\n x = (\r\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\r\n ) # (batch, time1, d_model)\r\n\r\n return self.linear_out(x) # (batch, time1, d_model)\r\n\r\n def forward(self, query, key, value, mask):\r\n \"\"\"Compute scaled dot product attention.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "src/clap_module/conformer/modules.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\r\n \"\"\"Multi-Head Attention layer with relative position encoding (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n Paper: https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n n_head (int): The number of heads.\r\n n_feat (int): The number of features.\r\n dropout_rate (float): Dropout rate.\r\n zero_triu (bool): Whether to zero the upper triangular part of attention matrix.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, n_head, n_feat, dropout_rate, zero_triu=False):\r\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\r\n super().__init__(n_head, n_feat, dropout_rate)\r\n self.zero_triu = zero_triu\r\n # linear transformation for positional encoding\r\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\r\n # these two learnable bias are used in matrix c and matrix d\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\r\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\r\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\r\n\r\n def rel_shift(self, x):\r\n \"\"\"Compute relative positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, head, time1, 2*time1-1).\r\n time1 means the length of query vector.\r\n\r\n Returns:\r\n torch.Tensor: Output tensor.\r\n\r\n \"\"\"\r\n zero_pad = torch.zeros((*x.size()[:3], 1), device=x.device, dtype=x.dtype)\r\n x_padded = torch.cat([zero_pad, x], dim=-1)\r\n\r\n x_padded = x_padded.view(*x.size()[:2], x.size(3) + 1, x.size(2))\r\n x = x_padded[:, :, 1:].view_as(x)[\r\n :, :, :, : x.size(-1) // 2 + 1\r\n ] # only keep the positions from 0 to time2\r\n\r\n if self.zero_triu:\r\n ones = torch.ones((x.size(2), x.size(3)), device=x.device)\r\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\r\n\r\n return x\r\n\r\n def forward(self, query, key, value, pos_emb, mask):\r\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\r\n\r\n Args:\r\n query (torch.Tensor): Query tensor (#batch, time1, size).\r\n key (torch.Tensor): Key tensor (#batch, time2, size).\r\n value (torch.Tensor): Value tensor (#batch, time2, size).\r\n pos_emb (torch.Tensor): Positional embedding tensor\r\n (#batch, 2*time1-1, size).\r\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\r\n (#batch, time1, time2).\r\n\r\n Returns:\r\n torch.Tensor: Output tensor (#batch, time1, d_model).\r\n\r\n \"\"\"\r\n q, k, v = self.forward_qkv(query, key, value)\r\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\r\n\r\n n_batch_pos = pos_emb.size(0)\r\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\r\n p = p.transpose(1, 2) # (batch, head, 2*time1-1, d_k)\r\n\r\n # (batch, head, time1, d_k)\r\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\r\n # (batch, head, time1, d_k)\r\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\r\n\r\n # compute attention score\r\n # first compute matrix a and matrix c\r\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\r\n # (batch, head, time1, time2)\r\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\r\n\r\n # compute matrix b and matrix d\r\n # (batch, head, time1, 2*time1-1)\r\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\r\n matrix_bd = self.rel_shift(matrix_bd)\r\n\r\n scores = (matrix_ac + matrix_bd) / math.sqrt(\r\n self.d_k\r\n ) # (batch, head, time1, time2)\r\n\r\n return self.forward_attention(v, scores, mask)\r" }, { "identifier": "LegacyRelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class LegacyRelPositionalEncoding(PositionalEncoding):\r\n \"\"\"Relative positional encoding module (old version).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(\r\n d_model=d_model,\r\n dropout_rate=dropout_rate,\r\n max_len=max_len,\r\n reverse=True,\r\n )\r\n\r\n def forward(self, x):\r\n \"\"\"Compute positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n torch.Tensor: Positional embedding tensor (1, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[:, : x.size(1)]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "PositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class PositionalEncoding(torch.nn.Module):\r\n \"\"\"Positional encoding.\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n reverse (bool): Whether to reverse the input position. Only for\r\n the class LegacyRelPositionalEncoding. We remove it in the current\r\n class RelPositionalEncoding.\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000, reverse=False):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(PositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.reverse = reverse\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n self._register_load_state_dict_pre_hook(_pre_hook)\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n if self.pe.size(1) >= x.size(1):\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n pe = torch.zeros(x.size(1), self.d_model)\r\n if self.reverse:\r\n position = torch.arange(\r\n x.size(1) - 1, -1, -1.0, dtype=torch.float32\r\n ).unsqueeze(1)\r\n else:\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe[:, 0::2] = torch.sin(position * div_term)\r\n pe[:, 1::2] = torch.cos(position * div_term)\r\n pe = pe.unsqueeze(0)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale + self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "RelPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class RelPositionalEncoding(torch.nn.Module):\r\n \"\"\"Relative positional encoding module (new implementation).\r\n\r\n Details can be found in https://github.com/espnet/espnet/pull/2816.\r\n\r\n See : Appendix B in https://arxiv.org/abs/1901.02860\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Construct an PositionalEncoding object.\r\n \"\"\"\r\n super(RelPositionalEncoding, self).__init__()\r\n self.d_model = d_model\r\n self.xscale = math.sqrt(self.d_model)\r\n self.dropout = torch.nn.Dropout(p=dropout_rate)\r\n self.pe = None\r\n self.extend_pe(torch.tensor(0.0).expand(1, max_len))\r\n\r\n def extend_pe(self, x):\r\n \"\"\"Reset the positional encodings.\r\n \"\"\"\r\n if self.pe is not None:\r\n # self.pe contains both positive and negative parts\r\n # the length of self.pe is 2 * input_len - 1\r\n if self.pe.size(1) >= x.size(1) * 2 - 1:\r\n if self.pe.dtype != x.dtype or self.pe.device != x.device:\r\n self.pe = self.pe.to(dtype=x.dtype, device=x.device)\r\n return\r\n # Suppose `i` means to the position of query vecotr and `j` means the\r\n # position of key vector. We use position relative positions when keys\r\n # are to the left (i>j) and negative relative positions otherwise (i<j).\r\n pe_positive = torch.zeros(x.size(1), self.d_model)\r\n pe_negative = torch.zeros(x.size(1), self.d_model)\r\n position = torch.arange(0, x.size(1), dtype=torch.float32).unsqueeze(1)\r\n div_term = torch.exp(\r\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\r\n * -(math.log(10000.0) / self.d_model)\r\n )\r\n pe_positive[:, 0::2] = torch.sin(position * div_term)\r\n pe_positive[:, 1::2] = torch.cos(position * div_term)\r\n pe_negative[:, 0::2] = torch.sin(-1 * position * div_term)\r\n pe_negative[:, 1::2] = torch.cos(-1 * position * div_term)\r\n\r\n # Reserve the order of positive indices and concat both positive and\r\n # negative indices. This is used to support the shifting trick\r\n # as in https://arxiv.org/abs/1901.02860\r\n pe_positive = torch.flip(pe_positive, [0]).unsqueeze(0)\r\n pe_negative = pe_negative[1:].unsqueeze(0)\r\n pe = torch.cat([pe_positive, pe_negative], dim=1)\r\n self.pe = pe.to(device=x.device, dtype=x.dtype)\r\n\r\n def forward(self, x: torch.Tensor):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x * self.xscale\r\n pos_emb = self.pe[\r\n :,\r\n self.pe.size(1) // 2 - x.size(1) + 1 : self.pe.size(1) // 2 + x.size(1),\r\n ]\r\n return self.dropout(x), self.dropout(pos_emb)\r" }, { "identifier": "ScaledPositionalEncoding", "path": "src/clap_module/conformer/embedding.py", "snippet": "class ScaledPositionalEncoding(PositionalEncoding):\r\n \"\"\"Scaled positional encoding module.\r\n\r\n See Sec. 3.2 https://arxiv.org/abs/1809.08895\r\n\r\n Args:\r\n d_model (int): Embedding dimension.\r\n dropout_rate (float): Dropout rate.\r\n max_len (int): Maximum input length.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, d_model, dropout_rate, max_len=5000):\r\n \"\"\"Initialize class.\"\"\"\r\n super().__init__(d_model=d_model, dropout_rate=dropout_rate, max_len=max_len)\r\n self.alpha = torch.nn.Parameter(torch.tensor(1.0))\r\n\r\n def reset_parameters(self):\r\n \"\"\"Reset parameters.\"\"\"\r\n self.alpha.data = torch.tensor(1.0)\r\n\r\n def forward(self, x):\r\n \"\"\"Add positional encoding.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (batch, time, `*`).\r\n\r\n Returns:\r\n torch.Tensor: Encoded tensor (batch, time, `*`).\r\n\r\n \"\"\"\r\n self.extend_pe(x)\r\n x = x + self.alpha * self.pe[:, : x.size(1)]\r\n return self.dropout(x)\r" }, { "identifier": "LayerNorm", "path": "src/clap_module/conformer/modules.py", "snippet": "class LayerNorm(torch.nn.LayerNorm):\r\n \"\"\"Layer normalization module.\r\n\r\n Args:\r\n nout (int): Output dim size.\r\n dim (int): Dimension to be normalized.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, nout, dim=-1):\r\n \"\"\"Construct an LayerNorm object.\"\"\"\r\n super(LayerNorm, self).__init__(nout, eps=1e-12)\r\n self.dim = dim\r\n\r\n def forward(self, x):\r\n \"\"\"Apply layer normalization.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor.\r\n\r\n Returns:\r\n torch.Tensor: Normalized tensor.\r\n\r\n \"\"\"\r\n if self.dim == -1:\r\n return super(LayerNorm, self).forward(x)\r\n return (\r\n super(LayerNorm, self)\r\n .forward(x.transpose(self.dim, -1))\r\n .transpose(self.dim, -1)\r\n )\r" }, { "identifier": "Conv1dLinear", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class Conv1dLinear(torch.nn.Module):\r\n \"\"\"Conv1D + Linear for Transformer block.\r\n\r\n A variant of MultiLayeredConv1d, which replaces second conv-layer to linear.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize Conv1dLinear module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(Conv1dLinear, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Linear(hidden_chans, in_chans)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x))\r" }, { "identifier": "MultiLayeredConv1d", "path": "src/clap_module/conformer/multi_layer_conv.py", "snippet": "class MultiLayeredConv1d(torch.nn.Module):\r\n \"\"\"Multi-layered conv1d for Transformer block.\r\n\r\n This is a module of multi-leyered conv1d designed\r\n to replace positionwise feed-forward network\r\n in Transforner block, which is introduced in\r\n `FastSpeech: Fast, Robust and Controllable Text to Speech`_.\r\n\r\n .. _`FastSpeech: Fast, Robust and Controllable Text to Speech`:\r\n https://arxiv.org/pdf/1905.09263.pdf\r\n\r\n \"\"\"\r\n\r\n def __init__(self, in_chans, hidden_chans, kernel_size, dropout_rate):\r\n \"\"\"Initialize MultiLayeredConv1d module.\r\n\r\n Args:\r\n in_chans (int): Number of input channels.\r\n hidden_chans (int): Number of hidden channels.\r\n kernel_size (int): Kernel size of conv1d.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n super(MultiLayeredConv1d, self).__init__()\r\n self.w_1 = torch.nn.Conv1d(\r\n in_chans,\r\n hidden_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.w_2 = torch.nn.Conv1d(\r\n hidden_chans,\r\n in_chans,\r\n kernel_size,\r\n stride=1,\r\n padding=(kernel_size - 1) // 2,\r\n )\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n\r\n def forward(self, x):\r\n \"\"\"Calculate forward propagation.\r\n\r\n Args:\r\n x (torch.Tensor): Batch of input tensors (B, T, in_chans).\r\n\r\n Returns:\r\n torch.Tensor: Batch of output tensors (B, T, hidden_chans).\r\n\r\n \"\"\"\r\n x = torch.relu(self.w_1(x.transpose(-1, 1))).transpose(-1, 1)\r\n return self.w_2(self.dropout(x).transpose(-1, 1)).transpose(-1, 1)\r" }, { "identifier": "PositionwiseFeedForward", "path": "src/clap_module/conformer/modules.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\r\n \"\"\"Positionwise feed forward layer.\r\n\r\n Args:\r\n idim (int): Input dimenstion.\r\n hidden_units (int): The number of hidden units.\r\n dropout_rate (float): Dropout rate.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, hidden_units, dropout_rate, activation=torch.nn.ReLU()):\r\n \"\"\"Construct an PositionwiseFeedForward object.\"\"\"\r\n super(PositionwiseFeedForward, self).__init__()\r\n self.w_1 = torch.nn.Linear(idim, hidden_units)\r\n self.w_2 = torch.nn.Linear(hidden_units, idim)\r\n self.dropout = torch.nn.Dropout(dropout_rate)\r\n self.activation = activation\r\n\r\n def forward(self, x):\r\n \"\"\"Forward function.\"\"\"\r\n return self.w_2(self.dropout(self.activation(self.w_1(x))))\r" }, { "identifier": "repeat", "path": "src/clap_module/conformer/modules.py", "snippet": "def repeat(N, fn, layer_drop_rate=0.0):\r\n \"\"\"Repeat module N times.\r\n\r\n Args:\r\n N (int): Number of repeat time.\r\n fn (Callable): Function to generate module.\r\n layer_drop_rate (float): Probability of dropping out each fn (layer).\r\n\r\n Returns:\r\n MultiSequential: Repeated model instance.\r\n\r\n \"\"\"\r\n return MultiSequential(*[fn(n) for n in range(N)], layer_drop_rate=layer_drop_rate)\r" }, { "identifier": "Conv2dSubsampling", "path": "src/clap_module/conformer/sub_sampling.py", "snippet": "class Conv2dSubsampling(torch.nn.Module):\r\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\r\n\r\n Args:\r\n idim (int): Input dimension.\r\n odim (int): Output dimension.\r\n dropout_rate (float): Dropout rate.\r\n pos_enc (torch.nn.Module): Custom position encoding layer.\r\n\r\n \"\"\"\r\n\r\n def __init__(self, idim, odim, dropout_rate, pos_enc=None):\r\n \"\"\"Construct an Conv2dSubsampling object.\"\"\"\r\n super(Conv2dSubsampling, self).__init__()\r\n self.conv = torch.nn.Sequential(\r\n torch.nn.Conv2d(1, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n torch.nn.Conv2d(odim, odim, 3, 2),\r\n torch.nn.ReLU(),\r\n )\r\n self.out = torch.nn.Sequential(\r\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim),\r\n pos_enc if pos_enc is not None else PositionalEncoding(odim, dropout_rate),\r\n )\r\n\r\n def forward(self, x, x_mask):\r\n \"\"\"Subsample x.\r\n\r\n Args:\r\n x (torch.Tensor): Input tensor (#batch, time, idim).\r\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\r\n\r\n Returns:\r\n torch.Tensor: Subsampled tensor (#batch, time', odim),\r\n where time' = time // 4.\r\n torch.Tensor: Subsampled mask (#batch, 1, time'),\r\n where time' = time // 4.\r\n\r\n \"\"\"\r\n x = x.unsqueeze(1) # (b, c, t, f)\r\n x = self.conv(x)\r\n b, c, t, f = x.size()\r\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\r\n if x_mask is None:\r\n return x, None\r\n return x, x_mask[:, :, :-2:2][:, :, :-2:2]\r\n\r\n def __getitem__(self, key):\r\n \"\"\"Get item.\r\n\r\n When reset_parameters() is called, if use_scaled_pos_enc is used,\r\n return the positioning encoding.\r\n\r\n \"\"\"\r\n if key != -1:\r\n raise NotImplementedError(\"Support only `-1` (for `reset_parameters`).\")\r\n return self.out[key]\r" }, { "identifier": "AttentionPool1d", "path": "src/clap_module/feature_fusion.py", "snippet": "class AttentionPool1d(nn.Module):\r\n def __init__(\r\n self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None\r\n ):\r\n super().__init__()\r\n self.positional_embedding = nn.Parameter(\r\n torch.randn(spacial_dim + 1, embed_dim) / embed_dim\r\n # torch.randn(spacial_dim, embed_dim) / embed_dim\r\n )\r\n self.k_proj = nn.Linear(embed_dim, embed_dim)\r\n self.q_proj = nn.Linear(embed_dim, embed_dim)\r\n self.v_proj = nn.Linear(embed_dim, embed_dim)\r\n self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)\r\n self.num_heads = num_heads\r\n\r\n def forward(self, x):\r\n # import pdb; pdb.set_trace()\r\n x = x.permute(1, 0, 2) # B*L*D -> L*B*D; NCHW -> (HW)NC\r\n x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC\r\n x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC\r\n x, _ = F.multi_head_attention_forward(\r\n query=x,\r\n key=x,\r\n value=x,\r\n embed_dim_to_check=x.shape[-1],\r\n num_heads=self.num_heads,\r\n q_proj_weight=self.q_proj.weight,\r\n k_proj_weight=self.k_proj.weight,\r\n v_proj_weight=self.v_proj.weight,\r\n in_proj_weight=None,\r\n in_proj_bias=torch.cat(\r\n [self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]\r\n ),\r\n bias_k=None,\r\n bias_v=None,\r\n add_zero_attn=False,\r\n dropout_p=0,\r\n out_proj_weight=self.c_proj.weight,\r\n out_proj_bias=self.c_proj.bias,\r\n use_separate_proj_weight=True,\r\n training=self.training,\r\n need_weights=False,\r\n )\r\n\r\n return x[0] # B*D\r" }, { "identifier": "DAF", "path": "src/clap_module/feature_fusion.py", "snippet": "class DAF(nn.Module):\r\n \"\"\"直接相加 DirectAddFuse\r\n \"\"\"\r\n\r\n def __init__(self):\r\n super(DAF, self).__init__()\r\n\r\n def forward(self, x, residual):\r\n return x + residual\r" }, { "identifier": "AFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class AFF(nn.Module):\r\n \"\"\"多特征融合 AFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(AFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported.'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xo = 2 * x * wei + 2 * residual * (1 - wei)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" }, { "identifier": "iAFF", "path": "src/clap_module/feature_fusion.py", "snippet": "class iAFF(nn.Module):\r\n \"\"\"多特征融合 iAFF\r\n \"\"\"\r\n\r\n def __init__(self, channels=64, r=4, type='2D'):\r\n super(iAFF, self).__init__()\r\n inter_channels = int(channels // r)\r\n\r\n if type == '1D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool1d(1),\r\n nn.Conv1d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv1d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm1d(channels),\r\n )\r\n elif type == '2D':\r\n # 本地注意力\r\n self.local_att = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 全局注意力\r\n self.global_att = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n\r\n # 第二次本地注意力\r\n self.local_att2 = nn.Sequential(\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n # 第二次全局注意力\r\n self.global_att2 = nn.Sequential(\r\n nn.AdaptiveAvgPool2d(1),\r\n nn.Conv2d(channels, inter_channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(inter_channels),\r\n nn.ReLU(inplace=True),\r\n nn.Conv2d(inter_channels, channels, kernel_size=1, stride=1, padding=0),\r\n nn.BatchNorm2d(channels),\r\n )\r\n else:\r\n raise f'the type is not supported'\r\n\r\n self.sigmoid = nn.Sigmoid()\r\n\r\n def forward(self, x, residual):\r\n flag = False\r\n xa = x + residual\r\n if xa.size(0) == 1:\r\n xa = torch.cat([xa, xa], dim=0)\r\n flag = True\r\n xl = self.local_att(xa)\r\n xg = self.global_att(xa)\r\n xlg = xl + xg\r\n wei = self.sigmoid(xlg)\r\n xi = x * wei + residual * (1 - wei)\r\n\r\n xl2 = self.local_att2(xi)\r\n xg2 = self.global_att(xi)\r\n xlg2 = xl2 + xg2\r\n wei2 = self.sigmoid(xlg2)\r\n xo = x * wei2 + residual * (1 - wei2)\r\n if flag:\r\n xo = xo[0].unsqueeze(0)\r\n return xo\r" } ]
import logging import torch import math from .convolution import ConvolutionModule from .encoder_layer import EncoderLayer from .modules import get_activation from .modules import VGG2L from .modules import ( LegacyRelPositionMultiHeadedAttention, MultiHeadedAttention, RelPositionMultiHeadedAttention, ) from .embedding import ( LegacyRelPositionalEncoding, PositionalEncoding, RelPositionalEncoding, ScaledPositionalEncoding, ) from .modules import LayerNorm from .multi_layer_conv import ( Conv1dLinear, MultiLayeredConv1d, ) from .modules import ( PositionwiseFeedForward, ) from .modules import repeat from .sub_sampling import Conv2dSubsampling from ..feature_fusion import AttentionPool1d, DAF, AFF, iAFF
14,564
attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention")
# Copyright 2020 Johns Hopkins University (Shinji Watanabe) # Northwestern Polytechnical University (Pengcheng Guo) # Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) """Encoder definition.""" class Encoder(torch.nn.Module): """Conformer encoder module. Args: idim (int): Input dimension. attention_dim (int): Dimension of attention. attention_heads (int): The number of heads of multi head attention. linear_units (int): The number of units of position-wise feed forward. num_blocks (int): The number of decoder blocks. dropout_rate (float): Dropout rate. positional_dropout_rate (float): Dropout rate after adding positional encoding. attention_dropout_rate (float): Dropout rate in attention. input_layer (Union[str, torch.nn.Module]): Input layer type. normalize_before (bool): Whether to use layer_norm before the first block. concat_after (bool): Whether to concat attention layer's input and output. if True, additional linear will be applied. i.e. x -> x + linear(concat(x, att(x))) if False, no additional linear will be applied. i.e. x -> x + att(x) positionwise_layer_type (str): "linear", "conv1d", or "conv1d-linear". positionwise_conv_kernel_size (int): Kernel size of positionwise conv1d layer. macaron_style (bool): Whether to use macaron style for positionwise layer. pos_enc_layer_type (str): Encoder positional encoding layer type. selfattention_layer_type (str): Encoder attention layer type. activation_type (str): Encoder activation function type. use_cnn_module (bool): Whether to use convolution module. zero_triu (bool): Whether to zero the upper triangular part of attention matrix. cnn_module_kernel (int): Kernerl size of convolution module. padding_idx (int): Padding idx for input_layer=embed. stochastic_depth_rate (float): Maximum probability to skip the encoder layer. intermediate_layers (Union[List[int], None]): indices of intermediate CTC layer. indices start from 1. if not None, intermediate outputs are returned (which changes return type signature.) """ def __init__( self, idim, attention_dim=256, attention_heads=4, linear_units=2048, num_blocks=6, dropout_rate=0.1, positional_dropout_rate=0.1, attention_dropout_rate=0.0, input_layer="conv2d", normalize_before=True, concat_after=False, ffn_layer_type="linear", ffn_conv_kernel_size=1, macaron_style=False, pos_enc_layer_type="abs_pos", selfattention_layer_type="selfattn", activation_type="relu", use_cnn_module=True, zero_triu=False, cnn_module_kernel=31, padding_idx=-1, stochastic_depth_rate=0.0, intermediate_layers=None, ctc_softmax=None, conditioning_layer_dim=None, max_seq_len=100, enable_fusion=False, fusion_type="", ): """Construct an Encoder object.""" super(Encoder, self).__init__() self.max_seq_len = max_seq_len activation = get_activation(activation_type) if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "scaled_abs_pos": pos_enc_class = ScaledPositionalEncoding elif pos_enc_layer_type == "rel_pos": assert selfattention_layer_type == "rel_selfattn" pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "legacy_rel_pos": assert selfattention_layer_type == "legacy_rel_selfattn" pos_enc_class = LegacyRelPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) self.conv_subsampling_factor = 1 if input_layer == "linear": self.embed = torch.nn.Sequential( torch.nn.Linear(idim, attention_dim), torch.nn.LayerNorm(attention_dim), torch.nn.Dropout(dropout_rate), pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer == "conv2d": self.embed = Conv2dSubsampling( idim, attention_dim, dropout_rate, pos_enc_class(attention_dim, positional_dropout_rate), ) self.conv_subsampling_factor = 4 elif input_layer == "vgg2l": self.embed = VGG2L(idim, attention_dim) self.conv_subsampling_factor = 4 elif input_layer == "embed": self.embed = torch.nn.Sequential( torch.nn.Embedding(idim, attention_dim, padding_idx=padding_idx), pos_enc_class(attention_dim, positional_dropout_rate), ) elif isinstance(input_layer, torch.nn.Module): self.embed = torch.nn.Sequential( input_layer, pos_enc_class(attention_dim, positional_dropout_rate), ) elif input_layer is None: self.embed = torch.nn.Sequential( pos_enc_class(attention_dim, positional_dropout_rate) ) else: raise ValueError("unknown input_layer: " + input_layer) self.normalize_before = normalize_before # self-attention module definition if selfattention_layer_type == "selfattn": logging.info("encoder self-attention layer type = self-attention")
encoder_selfattn_layer = MultiHeadedAttention
5
2023-11-25 02:38:32+00:00
24k
Luo-Z13/pointobb
PointOBB/mmdet/models/roi_heads/PointOBB_head.py
[ { "identifier": "HEADS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "HEADS = MODELS" }, { "identifier": "MODELS", "path": "PointOBB/mmdet/models/builder.py", "snippet": "MODELS = Registry('models', parent=MMCV_MODELS)" }, { "identifier": "build_head", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_head(cfg):\n \"\"\"Build head.\"\"\"\n return HEADS.build(cfg)" }, { "identifier": "build_roi_extractor", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_roi_extractor(cfg):\n \"\"\"Build roi extractor.\"\"\"\n return ROI_EXTRACTORS.build(cfg)" }, { "identifier": "build_loss", "path": "PointOBB/mmdet/models/builder.py", "snippet": "def build_loss(cfg):\n \"\"\"Build loss.\"\"\"\n return LOSSES.build(cfg)" }, { "identifier": "StandardRoIHead", "path": "PointOBB/mmdet/models/roi_heads/standard_roi_head.py", "snippet": "class StandardRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):\n \"\"\"Simplest base roi head including one bbox head and one mask head.\"\"\"\n\n def init_assigner_sampler(self):\n \"\"\"Initialize assigner and sampler.\"\"\"\n self.bbox_assigner = None\n self.bbox_sampler = None\n if self.train_cfg:\n self.bbox_assigner = build_assigner(self.train_cfg.assigner)\n self.bbox_sampler = build_sampler(\n self.train_cfg.sampler, context=self)\n\n def init_bbox_head(self, bbox_roi_extractor, bbox_head):\n \"\"\"Initialize ``bbox_head``\"\"\"\n self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor)\n self.bbox_head = build_head(bbox_head)\n\n def init_mask_head(self, mask_roi_extractor, mask_head):\n \"\"\"Initialize ``mask_head``\"\"\"\n if mask_roi_extractor is not None:\n self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor)\n self.share_roi_extractor = False\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n self.mask_head = build_head(mask_head)\n\n def forward_dummy(self, x, proposals):\n \"\"\"Dummy forward function.\"\"\"\n # bbox head\n outs = ()\n rois = bbox2roi([proposals])\n if self.with_bbox:\n bbox_results = self._bbox_forward(x, rois)\n outs = outs + (bbox_results['cls_score'],\n bbox_results['bbox_pred'])\n # mask head\n if self.with_mask:\n mask_rois = rois[:100]\n mask_results = self._mask_forward(x, mask_rois)\n outs = outs + (mask_results['mask_pred'], )\n return outs\n\n def forward_train(self,\n x,\n img_metas,\n proposal_list,\n gt_bboxes,\n gt_labels,\n ann_weight,\n gt_bboxes_ignore=None,\n gt_masks=None):\n \"\"\"\n Args:\n x (list[Tensor]): list of multi-level img features.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n proposals (list[Tensors]): list of region proposals.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n # assign gts and sample proposals\n if self.with_bbox or self.with_mask:\n num_imgs = len(img_metas)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n sampling_results = []\n for i in range(num_imgs):\n assign_result = self.bbox_assigner.assign(\n proposal_list[i], gt_bboxes[i], gt_bboxes_ignore[i],\n gt_labels[i])\n sampling_result = self.bbox_sampler.sample(\n assign_result,\n proposal_list[i],\n gt_bboxes[i],\n gt_labels[i],\n feats=[lvl_feat[i][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n losses = dict()\n # bbox head forward and loss\n if self.with_bbox:\n bbox_results = self._bbox_forward_train(x, sampling_results,\n gt_bboxes, gt_labels,ann_weight, #add by fei\n img_metas)\n losses.update(bbox_results['loss_bbox'])\n\n # mask head forward and loss\n if self.with_mask:\n mask_results = self._mask_forward_train(x, sampling_results,\n bbox_results['bbox_feats'],\n gt_masks, img_metas)\n losses.update(mask_results['loss_mask'])\n\n return losses\n\n def _bbox_forward(self, x, rois):\n \"\"\"Box head forward function used in both training and testing.\"\"\"\n # TODO: a more flexible way to decide which feature maps to use\n bbox_feats = self.bbox_roi_extractor(\n x[:self.bbox_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n bbox_feats = self.shared_head(bbox_feats)\n cls_score, bbox_pred = self.bbox_head(bbox_feats)\n\n bbox_results = dict(\n cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n return bbox_results\n\n def _bbox_forward_train(self, x, sampling_results, gt_bboxes, gt_labels, ann_weight,\n img_metas):\n \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n rois = bbox2roi([res.bboxes for res in sampling_results])\n bbox_results = self._bbox_forward(x, rois)\n\n bbox_targets = self.bbox_head.get_targets(sampling_results, gt_bboxes,\n gt_labels,ann_weight, self.train_cfg) ## add by fei\n loss_bbox = self.bbox_head.loss(bbox_results['cls_score'],\n bbox_results['bbox_pred'], rois,\n *bbox_targets)\n\n bbox_results.update(loss_bbox=loss_bbox)\n return bbox_results\n\n def _mask_forward_train(self, x, sampling_results, bbox_feats, gt_masks,\n img_metas):\n \"\"\"Run forward function and calculate loss for mask head in\n training.\"\"\"\n if not self.share_roi_extractor:\n pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n mask_results = self._mask_forward(x, pos_rois)\n else:\n pos_inds = []\n device = bbox_feats.device\n for res in sampling_results:\n pos_inds.append(\n torch.ones(\n res.pos_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds.append(\n torch.zeros(\n res.neg_bboxes.shape[0],\n device=device,\n dtype=torch.uint8))\n pos_inds = torch.cat(pos_inds)\n\n mask_results = self._mask_forward(\n x, pos_inds=pos_inds, bbox_feats=bbox_feats)\n\n mask_targets = self.mask_head.get_targets(sampling_results, gt_masks,\n self.train_cfg)\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head.loss(mask_results['mask_pred'],\n mask_targets, pos_labels)\n\n mask_results.update(loss_mask=loss_mask, mask_targets=mask_targets)\n return mask_results\n\n def _mask_forward(self, x, rois=None, pos_inds=None, bbox_feats=None):\n \"\"\"Mask head forward function used in both training and testing.\"\"\"\n assert ((rois is not None) ^\n (pos_inds is not None and bbox_feats is not None))\n if rois is not None:\n mask_feats = self.mask_roi_extractor(\n x[:self.mask_roi_extractor.num_inputs], rois)\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n else:\n assert bbox_feats is not None\n mask_feats = bbox_feats[pos_inds]\n\n mask_pred = self.mask_head(mask_feats)\n mask_results = dict(mask_pred=mask_pred, mask_feats=mask_feats)\n return mask_results\n\n async def async_simple_test(self,\n x,\n proposal_list,\n img_metas,\n proposals=None,\n rescale=False):\n \"\"\"Async test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n det_bboxes, det_labels = await self.async_test_bboxes(\n x, img_metas, proposal_list, self.test_cfg, rescale=rescale)\n bbox_results = bbox2result(det_bboxes, det_labels,\n self.bbox_head.num_classes)\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = await self.async_test_mask(\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=rescale,\n mask_test_cfg=self.test_cfg.get('mask'))\n return bbox_results, segm_results\n\n def simple_test(self,\n x,\n proposal_list,\n img_metas,\n proposals=None,\n rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n\n det_bboxes, det_labels = self.simple_test_bboxes(\n x, img_metas, proposal_list, self.test_cfg, rescale=rescale)\n\n bbox_results = [\n bbox2result(det_bboxes[i], det_labels[i],\n self.bbox_head.num_classes)\n for i in range(len(det_bboxes))\n ]\n\n if not self.with_mask:\n return bbox_results\n else:\n segm_results = self.simple_test_mask(\n x, img_metas, det_bboxes, det_labels, rescale=rescale)\n return list(zip(bbox_results, segm_results))\n\n def aug_test(self, x, proposal_list, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n det_bboxes, det_labels = self.aug_test_bboxes(x, img_metas,\n proposal_list,\n self.test_cfg)\n if rescale:\n _det_bboxes = det_bboxes\n else:\n _det_bboxes = det_bboxes.clone()\n _det_bboxes[:, :4] *= det_bboxes.new_tensor(\n img_metas[0][0]['scale_factor'])\n bbox_results = bbox2result(_det_bboxes, det_labels,\n self.bbox_head.num_classes)\n\n # det_bboxes always keep the original scale\n if self.with_mask:\n segm_results = self.aug_test_mask(x, img_metas, det_bboxes,\n det_labels)\n return [(bbox_results, segm_results)]\n else:\n return [bbox_results]\n\n def onnx_export(self, x, proposals, img_metas, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n det_bboxes, det_labels = self.bbox_onnx_export(\n x, img_metas, proposals, self.test_cfg, rescale=rescale)\n\n if not self.with_mask:\n return det_bboxes, det_labels\n else:\n segm_results = self.mask_onnx_export(\n x, img_metas, det_bboxes, det_labels, rescale=rescale)\n return det_bboxes, det_labels, segm_results\n\n def mask_onnx_export(self, x, img_metas, det_bboxes, det_labels, **kwargs):\n \"\"\"Export mask branch to onnx which supports batch inference.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n det_bboxes (Tensor): Bboxes and corresponding scores.\n has shape [N, num_bboxes, 5].\n det_labels (Tensor): class labels of\n shape [N, num_bboxes].\n\n Returns:\n tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]\n and class labels of shape [N, num_bboxes].\n \"\"\"\n # image shapes of images in the batch\n\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n raise RuntimeError('[ONNX Error] Can not record MaskHead '\n 'as it has not been executed this time')\n batch_size = det_bboxes.size(0)\n # if det_bboxes is rescaled to the original image size, we need to\n # rescale it back to the testing scale to obtain RoIs.\n det_bboxes = det_bboxes[..., :4]\n batch_index = torch.arange(\n det_bboxes.size(0), device=det_bboxes.device).float().view(\n -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)\n mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)\n mask_rois = mask_rois.view(-1, 5)\n mask_results = self._mask_forward(x, mask_rois)\n mask_pred = mask_results['mask_pred']\n max_shape = img_metas[0]['img_shape_for_onnx']\n num_det = det_bboxes.shape[1]\n det_bboxes = det_bboxes.reshape(-1, 4)\n det_labels = det_labels.reshape(-1)\n segm_results = self.mask_head.onnx_export(mask_pred, det_bboxes,\n det_labels, self.test_cfg,\n max_shape)\n segm_results = segm_results.reshape(batch_size, num_det, max_shape[0],\n max_shape[1])\n return segm_results\n\n def bbox_onnx_export(self, x, img_metas, proposals, rcnn_test_cfg,\n **kwargs):\n \"\"\"Export bbox branch to onnx which supports batch inference.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n proposals (Tensor): Region proposals with\n batch dimension, has shape [N, num_bboxes, 5].\n rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n\n Returns:\n tuple[Tensor, Tensor]: bboxes of shape [N, num_bboxes, 5]\n and class labels of shape [N, num_bboxes].\n \"\"\"\n # get origin input shape to support onnx dynamic input shape\n assert len(\n img_metas\n ) == 1, 'Only support one input image while in exporting to ONNX'\n img_shapes = img_metas[0]['img_shape_for_onnx']\n\n rois = proposals\n batch_index = torch.arange(\n rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(\n rois.size(0), rois.size(1), 1)\n rois = torch.cat([batch_index, rois[..., :4]], dim=-1)\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n # Eliminate the batch dimension\n rois = rois.view(-1, 5)\n bbox_results = self._bbox_forward(x, rois)\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n\n # Recover the batch dimension\n rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))\n cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n cls_score.size(-1))\n\n bbox_pred = bbox_pred.reshape(batch_size, num_proposals_per_img,\n bbox_pred.size(-1))\n det_bboxes, det_labels = self.bbox_head.onnx_export(\n rois, cls_score, bbox_pred, img_shapes, cfg=rcnn_test_cfg)\n\n return det_bboxes, det_labels" }, { "identifier": "CascadeRoIHead", "path": "PointOBB/mmdet/models/roi_heads/cascade_roi_head.py", "snippet": "class CascadeRoIHead(BaseRoIHead, BBoxTestMixin, MaskTestMixin):\n \"\"\"Cascade roi head including one bbox head and one mask head.\n\n https://arxiv.org/abs/1712.00726\n \"\"\"\n\n def __init__(self,\n num_stages,\n stage_loss_weights,\n bbox_roi_extractor=None,\n bbox_head=None,\n mask_roi_extractor=None,\n mask_head=None,\n shared_head=None,\n train_cfg=None,\n test_cfg=None,\n pretrained=None,\n init_cfg=None):\n assert bbox_roi_extractor is not None\n assert bbox_head is not None\n assert shared_head is None, \\\n 'Shared head is not supported in Cascade RCNN anymore'\n\n self.num_stages = num_stages\n self.stage_loss_weights = stage_loss_weights\n super(CascadeRoIHead, self).__init__(\n bbox_roi_extractor=bbox_roi_extractor,\n bbox_head=bbox_head,\n mask_roi_extractor=mask_roi_extractor,\n mask_head=mask_head,\n shared_head=shared_head,\n train_cfg=train_cfg,\n test_cfg=test_cfg,\n pretrained=pretrained,\n init_cfg=init_cfg)\n\n def init_bbox_head(self, bbox_roi_extractor, bbox_head):\n \"\"\"Initialize box head and box roi extractor.\n\n Args:\n bbox_roi_extractor (dict): Config of box roi extractor.\n bbox_head (dict): Config of box in box head.\n \"\"\"\n self.bbox_roi_extractor = ModuleList()\n self.bbox_head = ModuleList()\n if not isinstance(bbox_roi_extractor, list):\n bbox_roi_extractor = [\n bbox_roi_extractor for _ in range(self.num_stages)\n ]\n if not isinstance(bbox_head, list):\n bbox_head = [bbox_head for _ in range(self.num_stages)]\n assert len(bbox_roi_extractor) == len(bbox_head) == self.num_stages\n for roi_extractor, head in zip(bbox_roi_extractor, bbox_head):\n self.bbox_roi_extractor.append(build_roi_extractor(roi_extractor))\n self.bbox_head.append(build_head(head))\n\n def init_mask_head(self, mask_roi_extractor, mask_head):\n \"\"\"Initialize mask head and mask roi extractor.\n\n Args:\n mask_roi_extractor (dict): Config of mask roi extractor.\n mask_head (dict): Config of mask in mask head.\n \"\"\"\n self.mask_head = nn.ModuleList()\n if not isinstance(mask_head, list):\n mask_head = [mask_head for _ in range(self.num_stages)]\n assert len(mask_head) == self.num_stages\n for head in mask_head:\n self.mask_head.append(build_head(head))\n if mask_roi_extractor is not None:\n self.share_roi_extractor = False\n self.mask_roi_extractor = ModuleList()\n if not isinstance(mask_roi_extractor, list):\n mask_roi_extractor = [\n mask_roi_extractor for _ in range(self.num_stages)\n ]\n assert len(mask_roi_extractor) == self.num_stages\n for roi_extractor in mask_roi_extractor:\n self.mask_roi_extractor.append(\n build_roi_extractor(roi_extractor))\n else:\n self.share_roi_extractor = True\n self.mask_roi_extractor = self.bbox_roi_extractor\n\n def init_assigner_sampler(self):\n \"\"\"Initialize assigner and sampler for each stage.\"\"\"\n self.bbox_assigner = []\n self.bbox_sampler = []\n if self.train_cfg is not None:\n for idx, rcnn_train_cfg in enumerate(self.train_cfg):\n self.bbox_assigner.append(\n build_assigner(rcnn_train_cfg.assigner))\n self.current_stage = idx\n self.bbox_sampler.append(\n build_sampler(rcnn_train_cfg.sampler, context=self))\n\n def forward_dummy(self, x, proposals):\n \"\"\"Dummy forward function.\"\"\"\n # bbox head\n outs = ()\n rois = bbox2roi([proposals])\n if self.with_bbox:\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n outs = outs + (bbox_results['cls_score'],\n bbox_results['bbox_pred'])\n # mask heads\n if self.with_mask:\n mask_rois = rois[:100]\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n outs = outs + (mask_results['mask_pred'], )\n return outs\n\n def _bbox_forward(self, stage, x, rois):\n \"\"\"Box head forward function used in both training and testing.\"\"\"\n bbox_roi_extractor = self.bbox_roi_extractor[stage]\n bbox_head = self.bbox_head[stage]\n bbox_feats = bbox_roi_extractor(x[:bbox_roi_extractor.num_inputs],\n rois)\n # do not support caffe_c4 model anymore\n cls_score, bbox_pred = bbox_head(bbox_feats)\n\n bbox_results = dict(\n cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)\n return bbox_results\n\n def _bbox_forward_train(self, stage, x, sampling_results, gt_bboxes,\n gt_labels, rcnn_train_cfg):\n \"\"\"Run forward function and calculate loss for box head in training.\"\"\"\n rois = bbox2roi([res.bboxes for res in sampling_results])\n bbox_results = self._bbox_forward(stage, x, rois)\n bbox_targets = self.bbox_head[stage].get_targets(\n sampling_results, gt_bboxes, gt_labels, rcnn_train_cfg)\n loss_bbox = self.bbox_head[stage].loss(bbox_results['cls_score'],\n bbox_results['bbox_pred'], rois,\n *bbox_targets)\n\n bbox_results.update(\n loss_bbox=loss_bbox, rois=rois, bbox_targets=bbox_targets)\n return bbox_results\n\n def _mask_forward(self, stage, x, rois):\n \"\"\"Mask head forward function used in both training and testing.\"\"\"\n mask_roi_extractor = self.mask_roi_extractor[stage]\n mask_head = self.mask_head[stage]\n mask_feats = mask_roi_extractor(x[:mask_roi_extractor.num_inputs],\n rois)\n # do not support caffe_c4 model anymore\n mask_pred = mask_head(mask_feats)\n\n mask_results = dict(mask_pred=mask_pred)\n return mask_results\n\n def _mask_forward_train(self,\n stage,\n x,\n sampling_results,\n gt_masks,\n rcnn_train_cfg,\n bbox_feats=None):\n \"\"\"Run forward function and calculate loss for mask head in\n training.\"\"\"\n pos_rois = bbox2roi([res.pos_bboxes for res in sampling_results])\n mask_results = self._mask_forward(stage, x, pos_rois)\n\n mask_targets = self.mask_head[stage].get_targets(\n sampling_results, gt_masks, rcnn_train_cfg)\n pos_labels = torch.cat([res.pos_gt_labels for res in sampling_results])\n loss_mask = self.mask_head[stage].loss(mask_results['mask_pred'],\n mask_targets, pos_labels)\n\n mask_results.update(loss_mask=loss_mask)\n return mask_results\n\n def forward_train(self,\n x,\n img_metas,\n proposal_list,\n gt_bboxes,\n gt_labels,\n gt_bboxes_ignore=None,\n gt_masks=None):\n \"\"\"\n Args:\n x (list[Tensor]): list of multi-level img features.\n img_metas (list[dict]): list of image info dict where each dict\n has: 'img_shape', 'scale_factor', 'flip', and may also contain\n 'filename', 'ori_shape', 'pad_shape', and 'img_norm_cfg'.\n For details on the values of these keys see\n `mmdet/datasets/pipelines/formatting.py:Collect`.\n proposals (list[Tensors]): list of region proposals.\n gt_bboxes (list[Tensor]): Ground truth bboxes for each image with\n shape (num_gts, 4) in [tl_x, tl_y, br_x, br_y] format.\n gt_labels (list[Tensor]): class indices corresponding to each box\n gt_bboxes_ignore (None | list[Tensor]): specify which bounding\n boxes can be ignored when computing the loss.\n gt_masks (None | Tensor) : true segmentation masks for each box\n used if the architecture supports a segmentation task.\n\n Returns:\n dict[str, Tensor]: a dictionary of loss components\n \"\"\"\n losses = dict()\n for i in range(self.num_stages):\n self.current_stage = i\n rcnn_train_cfg = self.train_cfg[i]\n lw = self.stage_loss_weights[i]\n\n # assign gts and sample proposals\n sampling_results = []\n if self.with_bbox or self.with_mask:\n bbox_assigner = self.bbox_assigner[i]\n bbox_sampler = self.bbox_sampler[i]\n num_imgs = len(img_metas)\n if gt_bboxes_ignore is None:\n gt_bboxes_ignore = [None for _ in range(num_imgs)]\n\n for j in range(num_imgs):\n assign_result = bbox_assigner.assign(\n proposal_list[j], gt_bboxes[j], gt_bboxes_ignore[j],\n gt_labels[j])\n sampling_result = bbox_sampler.sample(\n assign_result,\n proposal_list[j],\n gt_bboxes[j],\n gt_labels[j],\n feats=[lvl_feat[j][None] for lvl_feat in x])\n sampling_results.append(sampling_result)\n\n # bbox head forward and loss\n bbox_results = self._bbox_forward_train(i, x, sampling_results,\n gt_bboxes, gt_labels,\n rcnn_train_cfg)\n\n for name, value in bbox_results['loss_bbox'].items():\n losses[f's{i}.{name}'] = (\n value * lw if 'loss' in name else value)\n\n # mask head forward and loss\n if self.with_mask:\n mask_results = self._mask_forward_train(\n i, x, sampling_results, gt_masks, rcnn_train_cfg,\n bbox_results['bbox_feats'])\n for name, value in mask_results['loss_mask'].items():\n losses[f's{i}.{name}'] = (\n value * lw if 'loss' in name else value)\n\n # refine bboxes\n if i < self.num_stages - 1:\n pos_is_gts = [res.pos_is_gt for res in sampling_results]\n # bbox_targets is a tuple\n roi_labels = bbox_results['bbox_targets'][0]\n with torch.no_grad():\n roi_labels = torch.where(\n roi_labels == self.bbox_head[i].num_classes,\n bbox_results['cls_score'][:, :-1].argmax(1),\n roi_labels)\n proposal_list = self.bbox_head[i].refine_bboxes(\n bbox_results['rois'], roi_labels,\n bbox_results['bbox_pred'], pos_is_gts, img_metas)\n\n return losses\n\n def simple_test(self, x, proposal_list, img_metas, rescale=False):\n \"\"\"Test without augmentation.\"\"\"\n assert self.with_bbox, 'Bbox head must be implemented.'\n num_imgs = len(proposal_list)\n img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n # \"ms\" in variable names means multi-stage\n ms_bbox_result = {}\n ms_segm_result = {}\n ms_scores = []\n rcnn_test_cfg = self.test_cfg\n\n rois = bbox2roi(proposal_list)\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n\n # split batch bbox prediction back to each image\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n num_proposals_per_img = tuple(\n len(proposals) for proposals in proposal_list)\n rois = rois.split(num_proposals_per_img, 0)\n cls_score = cls_score.split(num_proposals_per_img, 0)\n if isinstance(bbox_pred, torch.Tensor):\n bbox_pred = bbox_pred.split(num_proposals_per_img, 0)\n else:\n bbox_pred = self.bbox_head[i].bbox_pred_split(\n bbox_pred, num_proposals_per_img)\n ms_scores.append(cls_score)\n\n if i < self.num_stages - 1:\n bbox_label = [s[:, :-1].argmax(dim=1) for s in cls_score]\n rois = torch.cat([\n self.bbox_head[i].regress_by_class(rois[j], bbox_label[j],\n bbox_pred[j],\n img_metas[j])\n for j in range(num_imgs)\n ])\n\n # average scores of each image by stages\n cls_score = [\n sum([score[i] for score in ms_scores]) / float(len(ms_scores))\n for i in range(num_imgs)\n ]\n\n # apply bbox post-processing to each image individually\n det_bboxes = []\n det_labels = []\n for i in range(num_imgs):\n det_bbox, det_label = self.bbox_head[-1].get_bboxes(\n rois[i],\n cls_score[i],\n bbox_pred[i],\n img_shapes[i],\n scale_factors[i],\n rescale=rescale,\n cfg=rcnn_test_cfg)\n det_bboxes.append(det_bbox)\n det_labels.append(det_label)\n\n if torch.onnx.is_in_onnx_export():\n return det_bboxes, det_labels\n bbox_results = [\n bbox2result(det_bboxes[i], det_labels[i],\n self.bbox_head[-1].num_classes)\n for i in range(num_imgs)\n ]\n ms_bbox_result['ensemble'] = bbox_results\n\n if self.with_mask:\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n mask_classes = self.mask_head[-1].num_classes\n segm_results = [[[] for _ in range(mask_classes)]\n for _ in range(num_imgs)]\n else:\n if rescale and not isinstance(scale_factors[0], float):\n scale_factors = [\n torch.from_numpy(scale_factor).to(det_bboxes[0].device)\n for scale_factor in scale_factors\n ]\n _bboxes = [\n det_bboxes[i][:, :4] *\n scale_factors[i] if rescale else det_bboxes[i][:, :4]\n for i in range(len(det_bboxes))\n ]\n mask_rois = bbox2roi(_bboxes)\n num_mask_rois_per_img = tuple(\n _bbox.size(0) for _bbox in _bboxes)\n aug_masks = []\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n mask_pred = mask_results['mask_pred']\n # split batch mask prediction back to each image\n mask_pred = mask_pred.split(num_mask_rois_per_img, 0)\n aug_masks.append(\n [m.sigmoid().cpu().numpy() for m in mask_pred])\n\n # apply mask post-processing to each image individually\n segm_results = []\n for i in range(num_imgs):\n if det_bboxes[i].shape[0] == 0:\n segm_results.append(\n [[]\n for _ in range(self.mask_head[-1].num_classes)])\n else:\n aug_mask = [mask[i] for mask in aug_masks]\n merged_masks = merge_aug_masks(\n aug_mask, [[img_metas[i]]] * self.num_stages,\n rcnn_test_cfg)\n segm_result = self.mask_head[-1].get_seg_masks(\n merged_masks, _bboxes[i], det_labels[i],\n rcnn_test_cfg, ori_shapes[i], scale_factors[i],\n rescale)\n segm_results.append(segm_result)\n ms_segm_result['ensemble'] = segm_results\n\n if self.with_mask:\n results = list(\n zip(ms_bbox_result['ensemble'], ms_segm_result['ensemble']))\n else:\n results = ms_bbox_result['ensemble']\n\n return results\n\n def aug_test(self, features, proposal_list, img_metas, rescale=False):\n \"\"\"Test with augmentations.\n\n If rescale is False, then returned bboxes and masks will fit the scale\n of imgs[0].\n \"\"\"\n rcnn_test_cfg = self.test_cfg\n aug_bboxes = []\n aug_scores = []\n for x, img_meta in zip(features, img_metas):\n # only one image in the batch\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n\n proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n scale_factor, flip, flip_direction)\n # \"ms\" in variable names means multi-stage\n ms_scores = []\n\n rois = bbox2roi([proposals])\n for i in range(self.num_stages):\n bbox_results = self._bbox_forward(i, x, rois)\n ms_scores.append(bbox_results['cls_score'])\n\n if i < self.num_stages - 1:\n bbox_label = bbox_results['cls_score'][:, :-1].argmax(\n dim=1)\n rois = self.bbox_head[i].regress_by_class(\n rois, bbox_label, bbox_results['bbox_pred'],\n img_meta[0])\n\n cls_score = sum(ms_scores) / float(len(ms_scores))\n bboxes, scores = self.bbox_head[-1].get_bboxes(\n rois,\n cls_score,\n bbox_results['bbox_pred'],\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None)\n aug_bboxes.append(bboxes)\n aug_scores.append(scores)\n\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes, merged_scores = merge_aug_bboxes(\n aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms,\n rcnn_test_cfg.max_per_img)\n\n bbox_result = bbox2result(det_bboxes, det_labels,\n self.bbox_head[-1].num_classes)\n\n if self.with_mask:\n if det_bboxes.shape[0] == 0:\n segm_result = [[]\n for _ in range(self.mask_head[-1].num_classes)]\n else:\n aug_masks = []\n aug_img_metas = []\n for x, img_meta in zip(features, img_metas):\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n scale_factor, flip, flip_direction)\n mask_rois = bbox2roi([_bboxes])\n for i in range(self.num_stages):\n mask_results = self._mask_forward(i, x, mask_rois)\n aug_masks.append(\n mask_results['mask_pred'].sigmoid().cpu().numpy())\n aug_img_metas.append(img_meta)\n merged_masks = merge_aug_masks(aug_masks, aug_img_metas,\n self.test_cfg)\n\n ori_shape = img_metas[0][0]['ori_shape']\n segm_result = self.mask_head[-1].get_seg_masks(\n merged_masks,\n det_bboxes,\n det_labels,\n rcnn_test_cfg,\n ori_shape,\n scale_factor=1.0,\n rescale=False)\n return [(bbox_result, segm_result)]\n else:\n return [bbox_result]" }, { "identifier": "BBoxTestMixin", "path": "PointOBB/mmdet/models/roi_heads/test_mixins.py", "snippet": "class BBoxTestMixin:\n\n if sys.version_info >= (3, 7):\n\n async def async_test_bboxes(self,\n x,\n img_metas,\n proposals,\n rcnn_test_cfg,\n rescale=False,\n **kwargs):\n \"\"\"Asynchronized test for box head without augmentation.\"\"\"\n rois = bbox2roi(proposals)\n roi_feats = self.bbox_roi_extractor(\n x[:len(self.bbox_roi_extractor.featmap_strides)], rois)\n if self.with_shared_head:\n roi_feats = self.shared_head(roi_feats)\n sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017)\n\n async with completed(\n __name__, 'bbox_head_forward',\n sleep_interval=sleep_interval):\n cls_score, bbox_pred = self.bbox_head(roi_feats)\n\n img_shape = img_metas[0]['img_shape']\n scale_factor = img_metas[0]['scale_factor']\n det_bboxes, det_labels = self.bbox_head.get_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shape,\n scale_factor,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n return det_bboxes, det_labels\n\n def simple_test_bboxes(self,\n x,\n img_metas,\n proposals,\n rcnn_test_cfg,\n rescale=False):\n \"\"\"Test only det bboxes without augmentation.\n\n Args:\n x (tuple[Tensor]): Feature maps of all scale level.\n img_metas (list[dict]): Image meta info.\n proposals (List[Tensor]): Region proposals.\n rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of R-CNN.\n rescale (bool): If True, return boxes in original image space.\n Default: False.\n\n Returns:\n tuple[list[Tensor], list[Tensor]]: The first list contains\n the boxes of the corresponding image in a batch, each\n tensor has the shape (num_boxes, 5) and last dimension\n 5 represent (tl_x, tl_y, br_x, br_y, score). Each Tensor\n in the second list is the labels with shape (num_boxes, ).\n The length of both lists should be equal to batch_size.\n \"\"\"\n # get origin input shape to support onnx dynamic input shape\n\n img_shapes = tuple(meta['img_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n # The length of proposals of different batches may be different.\n # In order to form a batch, a padding operation is required.\n max_size = max([proposal.size(0) for proposal in proposals])\n # padding to form a batch\n for i, proposal in enumerate(proposals):\n supplement = proposal.new_full(\n (max_size - proposal.size(0), proposal.size(1)), 0)\n proposals[i] = torch.cat((supplement, proposal), dim=0)\n rois = torch.stack(proposals, dim=0)\n\n batch_index = torch.arange(\n rois.size(0), device=rois.device).float().view(-1, 1, 1).expand(\n rois.size(0), rois.size(1), 1)\n rois = torch.cat([batch_index, rois[..., :4]], dim=-1)\n batch_size = rois.shape[0]\n num_proposals_per_img = rois.shape[1]\n\n # Eliminate the batch dimension\n rois = rois.view(-1, 5)\n bbox_results = self._bbox_forward(x, rois)\n cls_score = bbox_results['cls_score']\n bbox_pred = bbox_results['bbox_pred']\n\n # Recover the batch dimension\n rois = rois.reshape(batch_size, num_proposals_per_img, rois.size(-1))\n cls_score = cls_score.reshape(batch_size, num_proposals_per_img,\n cls_score.size(-1))\n\n # remove padding, ignore batch_index when calculating mask\n supplement_mask = rois.abs()[..., 1:].sum(dim=-1) == 0\n cls_score[supplement_mask, :] = 0\n\n # bbox_pred would be None in some detector when with_reg is False,\n # e.g. Grid R-CNN.\n if bbox_pred is not None:\n # the bbox prediction of some detectors like SABL is not Tensor\n if isinstance(bbox_pred, torch.Tensor):\n bbox_pred = bbox_pred.reshape(batch_size,\n num_proposals_per_img,\n bbox_pred.size(-1))\n bbox_pred[supplement_mask, :] = 0\n else:\n # TODO: Looking forward to a better way\n # TODO move these special process to a corresponding head\n # For SABL\n bbox_preds = self.bbox_head.bbox_pred_split(\n bbox_pred, num_proposals_per_img)\n # apply bbox post-processing to each image individually\n det_bboxes = []\n det_labels = []\n for i in range(len(proposals)):\n # remove padding\n supplement_mask = proposals[i].abs().sum(dim=-1) == 0\n for bbox in bbox_preds[i]:\n bbox[supplement_mask] = 0\n det_bbox, det_label = self.bbox_head.get_bboxes(\n rois[i],\n cls_score[i],\n bbox_preds[i],\n img_shapes[i],\n scale_factors[i],\n rescale=rescale,\n cfg=rcnn_test_cfg)\n det_bboxes.append(det_bbox)\n det_labels.append(det_label)\n return det_bboxes, det_labels\n else:\n bbox_pred = None\n\n return self.bbox_head.get_bboxes(\n rois,\n cls_score,\n bbox_pred,\n img_shapes,\n scale_factors,\n rescale=rescale,\n cfg=rcnn_test_cfg)\n\n def aug_test_bboxes(self, feats, img_metas, proposal_list, rcnn_test_cfg):\n \"\"\"Test det bboxes with test time augmentation.\"\"\"\n aug_bboxes = []\n aug_scores = []\n for x, img_meta in zip(feats, img_metas):\n # only one image in the batch\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n # TODO more flexible\n proposals = bbox_mapping(proposal_list[0][:, :4], img_shape,\n scale_factor, flip, flip_direction, img_meta[0].get('tile_offset', None)) # add by hui\n rois = bbox2roi([proposals])\n bbox_results = self._bbox_forward(x, rois)\n bboxes, scores = self.bbox_head.get_bboxes(\n rois,\n bbox_results['cls_score'],\n bbox_results['bbox_pred'],\n img_shape,\n scale_factor,\n rescale=False,\n cfg=None)\n aug_bboxes.append(bboxes)\n aug_scores.append(scores)\n # after merging, bboxes will be rescaled to the original image size\n merged_bboxes, merged_scores = merge_aug_bboxes(\n aug_bboxes, aug_scores, img_metas, rcnn_test_cfg)\n det_bboxes, det_labels = multiclass_nms(merged_bboxes, merged_scores,\n rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms,\n rcnn_test_cfg.max_per_img)\n return det_bboxes, det_labels" }, { "identifier": "MaskTestMixin", "path": "PointOBB/mmdet/models/roi_heads/test_mixins.py", "snippet": "class MaskTestMixin:\n\n if sys.version_info >= (3, 7):\n\n async def async_test_mask(self,\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=False,\n mask_test_cfg=None):\n \"\"\"Asynchronized test for mask head without augmentation.\"\"\"\n # image shape of the first image in the batch (only one)\n ori_shape = img_metas[0]['ori_shape']\n scale_factor = img_metas[0]['scale_factor']\n if det_bboxes.shape[0] == 0:\n segm_result = [[] for _ in range(self.mask_head.num_classes)]\n else:\n if rescale:\n scale_factor = det_bboxes.new_tensor(scale_factor)\n _bboxes = (\n det_bboxes[:, :4] *\n scale_factor if rescale else det_bboxes)\n mask_rois = bbox2roi([_bboxes])\n mask_feats = self.mask_roi_extractor(\n x[:len(self.mask_roi_extractor.featmap_strides)],\n mask_rois)\n\n if self.with_shared_head:\n mask_feats = self.shared_head(mask_feats)\n if mask_test_cfg and mask_test_cfg.get('async_sleep_interval'):\n sleep_interval = mask_test_cfg['async_sleep_interval']\n else:\n sleep_interval = 0.035\n async with completed(\n __name__,\n 'mask_head_forward',\n sleep_interval=sleep_interval):\n mask_pred = self.mask_head(mask_feats)\n segm_result = self.mask_head.get_seg_masks(\n mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape,\n scale_factor, rescale)\n return segm_result\n\n def simple_test_mask(self,\n x,\n img_metas,\n det_bboxes,\n det_labels,\n rescale=False):\n \"\"\"Simple test for mask head without augmentation.\"\"\"\n # image shapes of images in the batch\n ori_shapes = tuple(meta['ori_shape'] for meta in img_metas)\n scale_factors = tuple(meta['scale_factor'] for meta in img_metas)\n\n if all(det_bbox.shape[0] == 0 for det_bbox in det_bboxes):\n segm_results = [[[] for _ in range(self.mask_head.num_classes)]\n for _ in range(len(det_bboxes))]\n return segm_results\n\n # The length of proposals of different batches may be different.\n # In order to form a batch, a padding operation is required.\n\n # padding to form a batch\n max_size = max([bboxes.size(0) for bboxes in det_bboxes])\n for i, (bbox, label) in enumerate(zip(det_bboxes, det_labels)):\n supplement_bbox = bbox.new_full(\n (max_size - bbox.size(0), bbox.size(1)), 0)\n supplement_label = label.new_full((max_size - label.size(0), ), 0)\n det_bboxes[i] = torch.cat((supplement_bbox, bbox), dim=0)\n det_labels[i] = torch.cat((supplement_label, label), dim=0)\n det_bboxes = torch.stack(det_bboxes, dim=0)\n det_labels = torch.stack(det_labels, dim=0)\n\n batch_size = det_bboxes.size(0)\n num_proposals_per_img = det_bboxes.shape[1]\n\n # if det_bboxes is rescaled to the original image size, we need to\n # rescale it back to the testing scale to obtain RoIs.\n det_bboxes = det_bboxes[..., :4]\n if rescale:\n scale_factors = det_bboxes.new_tensor(scale_factors)\n det_bboxes = det_bboxes * scale_factors.unsqueeze(1)\n\n batch_index = torch.arange(\n det_bboxes.size(0), device=det_bboxes.device).float().view(\n -1, 1, 1).expand(det_bboxes.size(0), det_bboxes.size(1), 1)\n mask_rois = torch.cat([batch_index, det_bboxes], dim=-1)\n mask_rois = mask_rois.view(-1, 5)\n mask_results = self._mask_forward(x, mask_rois)\n mask_pred = mask_results['mask_pred']\n\n # Recover the batch dimension\n mask_preds = mask_pred.reshape(batch_size, num_proposals_per_img,\n *mask_pred.shape[1:])\n\n # apply mask post-processing to each image individually\n segm_results = []\n for i in range(batch_size):\n mask_pred = mask_preds[i]\n det_bbox = det_bboxes[i]\n det_label = det_labels[i]\n\n # remove padding\n supplement_mask = det_bbox.abs().sum(dim=-1) != 0\n mask_pred = mask_pred[supplement_mask]\n det_bbox = det_bbox[supplement_mask]\n det_label = det_label[supplement_mask]\n\n if det_label.shape[0] == 0:\n segm_results.append([[]\n for _ in range(self.mask_head.num_classes)\n ])\n else:\n segm_result = self.mask_head.get_seg_masks(\n mask_pred, det_bbox, det_label, self.test_cfg,\n ori_shapes[i], scale_factors[i], rescale)\n segm_results.append(segm_result)\n return segm_results\n\n def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels):\n \"\"\"Test for mask head with test time augmentation.\"\"\"\n if det_bboxes.shape[0] == 0:\n segm_result = [[] for _ in range(self.mask_head.num_classes)]\n else:\n aug_masks = []\n for x, img_meta in zip(feats, img_metas):\n img_shape = img_meta[0]['img_shape']\n scale_factor = img_meta[0]['scale_factor']\n flip = img_meta[0]['flip']\n flip_direction = img_meta[0]['flip_direction']\n _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape,\n scale_factor, flip, flip_direction, img_meta[0].get('tile_offset', None)) # add by hui\n mask_rois = bbox2roi([_bboxes])\n mask_results = self._mask_forward(x, mask_rois)\n # convert to numpy array to save memory\n aug_masks.append(\n mask_results['mask_pred'].sigmoid().cpu().numpy())\n merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg)\n\n ori_shape = img_metas[0][0]['ori_shape']\n scale_factor = det_bboxes.new_ones(4)\n segm_result = self.mask_head.get_seg_masks(\n merged_masks,\n det_bboxes,\n det_labels,\n self.test_cfg,\n ori_shape,\n scale_factor=scale_factor,\n rescale=False)\n return segm_result" }, { "identifier": "obb2xyxy", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def obb2xyxy(rbboxes, version='oc'):\n \"\"\"Convert oriented bounding boxes to horizontal bounding boxes.\n\n Args:\n obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle]\n version (Str): angle representations.\n\n Returns:\n hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb]\n \"\"\"\n if version == 'oc':\n results = obb2xyxy_oc(rbboxes)\n elif version == 'le135':\n results = obb2xyxy_le135(rbboxes)\n elif version == 'le90':\n results = obb2xyxy_le90(rbboxes)\n else:\n raise NotImplementedError\n return results" }, { "identifier": "regularize_boxes", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def regularize_boxes(boxes,\n pattern: str = None,\n width_longer: bool = True,\n start_angle: float = -90) -> Tensor:\n \"\"\"Regularize rotated boxes.\n\n Due to the angle periodicity, one rotated box can be represented in\n many different (x, y, w, h, t). To make each rotated box unique,\n ``regularize_boxes`` will take the remainder of the angle divided by\n 180 degrees.\n\n However, after taking the remainder of the angle, there are still two\n representations for one rotate box. For example, (0, 0, 4, 5, 0.5) and\n (0, 0, 5, 4, 0.5 + pi/2) are the same areas in the image. To solve the\n problem, the code will swap edges w.r.t ``width_longer``:\n\n - width_longer=True: Make sure the width is longer than the height. If\n not, swap the width and height. The angle ranges in [start_angle,\n start_angle + 180). For the above example, the rotated box will be\n represented as (0, 0, 5, 4, 0.5 + pi/2).\n - width_longer=False: Make sure the angle is lower than\n start_angle+pi/2. If not, swap the width and height. The angle\n ranges in [start_angle, start_angle + 90). For the above example,\n the rotated box will be represented as (0, 0, 4, 5, 0.5).\n\n For convenience, three commonly used patterns are preset in\n ``regualrize_boxes``:\n\n - 'oc': OpenCV Definition. Has the same box representation as\n ``cv2.minAreaRect`` the angle ranges in [-90, 0). Equal to set\n width_longer=False and start_angle=-90.\n - 'le90': Long Edge Definition (90). the angle ranges in [-90, 90).\n The width is always longer than the height. Equal to set\n width_longer=True and start_angle=-90.\n - 'le135': Long Edge Definition (135). the angle ranges in [-45, 135).\n The width is always longer than the height. Equal to set\n width_longer=True and start_angle=-45.\n\n Args:\n pattern (str, Optional): Regularization pattern. Can only be 'oc',\n 'le90', or 'le135'. Defaults to None.\n width_longer (bool): Whether to make sure width is larger than\n height. Defaults to True.\n start_angle (float): The starting angle of the box angle\n represented in degrees. Defaults to -90.\n\n Returns:\n Tensor: Regularized box tensor.\n \"\"\"\n\n if pattern is not None:\n if pattern == 'oc':\n width_longer, start_angle = False, -90\n elif pattern == 'le90':\n width_longer, start_angle = True, -90\n elif pattern == 'le135':\n width_longer, start_angle = True, -45\n else:\n raise ValueError(\"pattern only can be 'oc', 'le90', and\"\n f\"'le135', but get {pattern}.\")\n start_angle = start_angle / 180 * np.pi\n\n x, y, w, h, t = boxes.unbind(dim=-1)\n if width_longer:\n # swap edge and angle if h >= w\n w_ = torch.where(w > h, w, h)\n h_ = torch.where(w > h, h, w)\n t = torch.where(w > h, t, t + np.pi / 2)\n t = ((t - start_angle) % np.pi) + start_angle\n else:\n # swap edge and angle if angle > pi/2\n t = ((t - start_angle) % np.pi)\n w_ = torch.where(t < np.pi / 2, w, h)\n h_ = torch.where(t < np.pi / 2, h, w)\n t = torch.where(t < np.pi / 2, t, t - np.pi / 2) + start_angle\n obb = torch.stack([x, y, w_, h_, t], dim=-1)\n return obb" }, { "identifier": "reduce_mean", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def reduce_mean(tensor):\n \"\"\"\"Obtain the mean of tensor on different GPUs.\"\"\"\n if not (dist.is_available() and dist.is_initialized()):\n return tensor\n tensor = tensor.clone()\n dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM)\n return tensor" }, { "identifier": "obb2poly_np", "path": "PointOBB/mmdet/models/detectors/utils.py", "snippet": "def obb2poly_np(rbboxes, version='oc'):\n \"\"\"Convert oriented bounding boxes to polygons.\n\n Args:\n obbs (ndarray): [x_ctr,y_ctr,w,h,angle]\n version (Str): angle representations.\n\n Returns:\n polys (ndarray): [x0,y0,x1,y1,x2,y2,x3,y3]\n \"\"\"\n if version == 'oc':\n results = obb2poly_np_oc(rbboxes)\n elif version == 'le135':\n results = obb2poly_np_le135(rbboxes)\n elif version == 'le90':\n results = obb2poly_np_le90(rbboxes)\n else:\n raise NotImplementedError\n return results" } ]
import math import torch import torch.nn.functional as F import torch.nn as nn import copy import numpy as np import cv2 from mmdet.core import bbox2result, bbox2roi, rbbox2roi, build_assigner, build_sampler, multi_apply from ..builder import HEADS, MODELS, build_head, build_roi_extractor, build_loss from .standard_roi_head import StandardRoIHead from .cascade_roi_head import CascadeRoIHead from mmdet.core.bbox.iou_calculators import bbox_overlaps from .test_mixins import BBoxTestMixin, MaskTestMixin from mmdet.core.bbox import bbox_xyxy_to_cxcywh from mmdet.core.bbox.transforms import rbbox2result from mmcv.cnn import Scale, ConvModule from mmcv.ops import box_iou_rotated from typing import Any, List, Sequence, Tuple, Union from torch import Tensor from mmdet.models.utils.base_bbox_coder import BaseBBoxCoder from ..detectors.utils import obb2xyxy, regularize_boxes, reduce_mean, obb2poly_np
16,504
bbox_results = self._bbox_forward(x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) return outs def grid_priors(self, featmap_sizes: List[Tuple], dtype: torch.dtype = torch.float32, device = 'cuda', with_stride: bool = False): num_levels = len(self.featmap_strides) assert num_levels == len(featmap_sizes) multi_level_priors = [] for i in range(num_levels): priors = self.single_level_grid_priors( featmap_sizes[i], level_idx=i, dtype=dtype, device=device, with_stride=with_stride) multi_level_priors.append(priors) return multi_level_priors def single_level_grid_priors(self, featmap_size: Tuple[int], level_idx: int, dtype: torch.dtype = torch.float32, device = 'cuda', offset = 0.5, with_stride: bool = False) -> Tensor: feat_h, feat_w = featmap_size stride_w = self.featmap_strides[level_idx] stride_h = stride_w shift_x = ((torch.arange(0, feat_w, device=device) + offset) * stride_w).to(dtype) shift_y = ((torch.arange(0, feat_h, device=device) + offset) * stride_h).to(dtype) shift_xx, shift_yy = meshgrid(shift_x, shift_y) if not with_stride: shifts = torch.stack([shift_xx, shift_yy], dim=-1) else: stride_w = shift_xx.new_full((shift_xx.shape[0], ), stride_w).to(dtype) stride_h = shift_xx.new_full((shift_yy.shape[0], ), stride_h).to(dtype) shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1) all_points = shifts.to(device) return all_points def get_targets(self, x, points, gt_points, proposals, gt_labels, img_metas): self.norm_on_bbox = True num_levels = len(x) concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] labels_list, angle_targets_list, id_targets_list = multi_apply( self._get_targets_single, gt_points, proposals, gt_labels, img_metas, points=concat_points, num_points_per_lvl=num_points) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] angle_targets_list = [ angle_targets.split(num_points, 0) for angle_targets in angle_targets_list ] id_targets_list = [ id_targets.split(num_points, 0) for id_targets in id_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_angle_targets = [] concat_lvl_id_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) # bbox_targets = torch.cat( # [bbox_targets[i] for bbox_targets in bbox_targets_list]) angle_targets = torch.cat( [angle_targets[i] for angle_targets in angle_targets_list]) id_targets = torch.cat( [id_targets[i] for id_targets in id_targets_list]) concat_lvl_angle_targets.append(angle_targets) concat_lvl_id_targets.append(id_targets) return (concat_lvl_labels, concat_lvl_angle_targets, concat_lvl_id_targets) def _get_targets_single( self, gt_points, proposals, gt_label, img_meta, points, num_points_per_lvl: List[int]) -> Tuple[Tensor, Tensor, Tensor]: """Compute regression and classification targets for a single image.""" self.center_sampling = True self.center_sample_radius = 1.5 self.pseudow = 3 self.pseudoh = 2 num_points = points.size(0) num_gts = len(gt_points) gt_labels = gt_label gt_bid = img_meta['gt_bid'] gen_proposals = proposals.reshape(len(gt_points), -1, proposals.size(-1)) if gt_points.size(-1) == 2: extra_tensor = torch.tensor([self.pseudow, self.pseudoh, gen_proposals[0,0,-1]], device=gt_points.device, dtype=gt_points.dtype).repeat(len(gt_points), 1) gt_bboxes = torch.cat((gt_points, extra_tensor), dim=1) else: gt_bboxes = gt_points.clone() if num_gts == 0: return gt_labels.new_full((num_points,), self.num_classes), \ gt_bboxes.new_zeros((num_points, 1)), \ gt_bboxes.new_zeros((num_points,)) areas = (gt_bboxes[:,2] * gt_bboxes[:,3]).squeeze()
RangeType = Sequence[Tuple[int, int]] INF = 1e8 def meshgrid(x: Tensor, y: Tensor, row_major: bool = True) -> Tuple[Tensor, Tensor]: yy, xx = torch.meshgrid(y, x) if row_major: # warning .flatten() would cause error in ONNX exportingF # have to use reshape here return xx.reshape(-1), yy.reshape(-1) else: return yy.reshape(-1), xx.reshape(-1) def obb2cxcywh_le90(obboxes): """Convert oriented bounding boxes to horizontal bounding boxes. Args: obbs (torch.Tensor): [x_ctr,y_ctr,w,h,angle] Returns: hbbs (torch.Tensor): [x_lt,y_lt,x_rb,y_rb] """ center, w, h, theta = torch.split(obboxes, [2, 1, 1, 1], dim=-1) Cos, Sin = torch.cos(theta), torch.sin(theta) x_bias = torch.abs(w / 2 * Cos) + torch.abs(h / 2 * Sin) y_bias = torch.abs(w / 2 * Sin) + torch.abs(h / 2 * Cos) bias = torch.cat([x_bias, y_bias], dim=-1) wh = bias * 2 return torch.cat([center, wh, torch.zeros_like(theta)], dim=-1) @HEADS.register_module() class PSCCoder(BaseBBoxCoder): """Phase-Shifting Coder. `Phase-Shifting Coder (PSC) <https://arxiv.org/abs/2211.06368>`. Args: angle_version (str): Angle definition. Only 'le90' is supported at present. dual_freq (bool, optional): Use dual frequency. Default: True. num_step (int, optional): Number of phase steps. Default: 3. thr_mod (float): Threshold of modulation. Default: 0.47. """ def __init__(self, angle_version: str, dual_freq: bool = True, num_step: int = 3, thr_mod: float = 0.47): super().__init__() self.angle_version = angle_version assert angle_version in ['le90'] self.dual_freq = dual_freq self.num_step = num_step self.thr_mod = thr_mod if self.dual_freq: self.encode_size = 2 * self.num_step else: self.encode_size = self.num_step self.coef_sin = torch.tensor( tuple( torch.sin(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) self.coef_cos = torch.tensor( tuple( torch.cos(torch.tensor(2 * k * math.pi / self.num_step)) for k in range(self.num_step))) def encode(self, angle_targets: Tensor) -> Tensor: """Phase-Shifting Encoder. Args: angle_targets (Tensor): Angle offset for each scale level. Has shape (num_anchors * H * W, 1) Returns: list[Tensor]: The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) """ phase_targets = angle_targets * 2 phase_shift_targets = tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) # Dual-freq PSC for square-like problem if self.dual_freq: phase_targets = angle_targets * 4 phase_shift_targets += tuple( torch.cos(phase_targets + 2 * math.pi * x / self.num_step) for x in range(self.num_step)) return torch.cat(phase_shift_targets, axis=-1) def decode(self, angle_preds: Tensor, keepdim: bool = False) -> Tensor: """Phase-Shifting Decoder. Args: angle_preds (Tensor): The psc coded data (phase-shifting patterns) for each scale level. Has shape (num_anchors * H * W, encode_size) keepdim (bool): Whether the output tensor has dim retained or not. Returns: list[Tensor]: Angle offset for each scale level. Has shape (num_anchors * H * W, 1) when keepdim is true, (num_anchors * H * W) otherwise """ self.coef_sin = self.coef_sin.to(angle_preds) self.coef_cos = self.coef_cos.to(angle_preds) phase_sin = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, 0:self.num_step] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase = -torch.atan2(phase_sin, phase_cos) # In range [-pi,pi) if self.dual_freq: phase_sin = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_sin, dim=-1, keepdim=keepdim) phase_cos = torch.sum( angle_preds[:, self.num_step:(2 * self.num_step)] * self.coef_cos, dim=-1, keepdim=keepdim) phase_mod = phase_cos**2 + phase_sin**2 phase2 = -torch.atan2(phase_sin, phase_cos) / 2 # Phase unwarpping, dual freq mixing # Angle between phase and phase2 is obtuse angle idx = torch.cos(phase) * torch.cos(phase2) + torch.sin( phase) * torch.sin(phase2) < 0 # Add pi to phase2 and keep it in range [-pi,pi) phase2[idx] = phase2[idx] % (2 * math.pi) - math.pi phase = phase2 # Set the angle of isotropic objects to zero phase[phase_mod < self.thr_mod] *= 0 angle_pred = phase / 2 return angle_pred @HEADS.register_module() class PointOBBHead(StandardRoIHead): """Simplest base roi head including one bbox head and one mask head.""" def __init__(self, bbox_roi_extractor, num_stages, bbox_head, top_k=7, with_atten=None, conv_cfg=None, norm_cfg=None, scale_angle: bool = True, stacked_convs = 4, loss_symmetry_ss=dict( type='SmoothL1Loss', loss_weight=1.0, beta=0.1), angle_coder=dict( type='PSCCoder', angle_version='le90', dual_freq=False, num_step=3, thr_mod=0), angle_version = 'le90', use_angle_loss = True, add_angle_pred_begin = False, not_use_rot_mil = False, detach_angle_head = False, rotation_agnostic_classes = None, agnostic_resize_classes = None, cls_scores_weight = 1.0, ins_scores_weight = 1.0, **kwargs): super(PointOBBHead, self).__init__(bbox_roi_extractor=bbox_roi_extractor, bbox_head=bbox_head, **kwargs) self.threshold = 0.3 self.merge_mode = 'weighted_clsins' self.test_mean_iou = False # self.test_mean_iou = True self.sum_iou = 0 self.sum_num = 0 self.num_stages = num_stages self.topk1 = top_k # 7 self.topk2 = top_k # 7 self.featmap_strides = bbox_roi_extractor.featmap_strides self.with_atten = with_atten self.conv_cfg = conv_cfg self.norm_cfg = norm_cfg self.in_channels=256 self.feat_channels=256 self.stacked_convs=stacked_convs self.is_scale_angle = scale_angle self.angle_coder = HEADS.build(angle_coder) self.loss_symmetry_ss = build_loss(loss_symmetry_ss) self.angle_version = angle_version self.rotation_agnostic_classes = rotation_agnostic_classes self.agnostic_resize_classes = agnostic_resize_classes self.add_angle_pred_begin = add_angle_pred_begin self.use_angle_loss = use_angle_loss self.not_use_rot_mil = not_use_rot_mil self.detach_angle_head = detach_angle_head self.cls_scores_weight = cls_scores_weight self.ins_scores_weight = ins_scores_weight self.num_classes = self.bbox_head.num_classes self._init_layers() def _init_layers(self): """Initialize layers of the head.""" self.relu = nn.ReLU(inplace=True) self.cls_convs = nn.ModuleList() for i in range(self.stacked_convs): chn = self.in_channels if i == 0 else self.feat_channels self.cls_convs.append( ConvModule( chn, self.feat_channels, 3, stride=1, padding=1, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) self.conv_angle = nn.Conv2d( self.feat_channels, self.angle_coder.encode_size, 3, padding=1) if self.is_scale_angle: self.scale_angle = Scale(1.0) def angle_forward(self, feats: Tuple[Tensor]) -> Tuple[List[Tensor], List[Tensor]]: angle_results = [] for feat in feats: if self.detach_angle_head: feat_detach = feat.clone().detach() single_angle_pred = self.angle_forward_single(feat_detach) else: single_angle_pred = self.angle_forward_single(feat) angle_results.append(single_angle_pred) return tuple(angle_results) def angle_forward_single(self, x: Tensor): cls_feat = x for cls_layer in self.cls_convs: cls_feat = cls_layer(cls_feat) # cls_score = self.conv_cls(cls_feat) angle_pred = self.conv_angle(cls_feat) if self.is_scale_angle: angle_pred = self.scale_angle(angle_pred).float() return angle_pred def init_assigner_sampler(self): """Initialize assigner and sampler.""" self.bbox_assigner = None self.bbox_sampler = None if self.train_cfg: self.bbox_assigner = build_assigner(self.train_cfg.assigner) self.bbox_sampler = build_sampler( self.train_cfg.sampler, context=self) def init_bbox_head(self, bbox_roi_extractor, bbox_head): """Initialize ``bbox_head``""" self.bbox_roi_extractor = build_roi_extractor(bbox_roi_extractor) # self.cdb = build_head(dict(type='ConvConcreteDB', cfg=None, planes=256)) self.bbox_head = build_head(bbox_head) def init_mask_head(self, mask_roi_extractor, mask_head): """Initialize ``mask_head``""" if mask_roi_extractor is not None: self.mask_roi_extractor = build_roi_extractor(mask_roi_extractor) self.share_roi_extractor = False else: self.share_roi_extractor = True self.mask_roi_extractor = self.bbox_roi_extractor self.mask_head = build_head(mask_head) def forward_dummy(self, x, proposals): """Dummy forward function.""" # bbox head outs = () rois = bbox2roi([proposals]) if self.with_bbox: bbox_results = self._bbox_forward(x, rois) outs = outs + (bbox_results['cls_score'], bbox_results['bbox_pred']) return outs def grid_priors(self, featmap_sizes: List[Tuple], dtype: torch.dtype = torch.float32, device = 'cuda', with_stride: bool = False): num_levels = len(self.featmap_strides) assert num_levels == len(featmap_sizes) multi_level_priors = [] for i in range(num_levels): priors = self.single_level_grid_priors( featmap_sizes[i], level_idx=i, dtype=dtype, device=device, with_stride=with_stride) multi_level_priors.append(priors) return multi_level_priors def single_level_grid_priors(self, featmap_size: Tuple[int], level_idx: int, dtype: torch.dtype = torch.float32, device = 'cuda', offset = 0.5, with_stride: bool = False) -> Tensor: feat_h, feat_w = featmap_size stride_w = self.featmap_strides[level_idx] stride_h = stride_w shift_x = ((torch.arange(0, feat_w, device=device) + offset) * stride_w).to(dtype) shift_y = ((torch.arange(0, feat_h, device=device) + offset) * stride_h).to(dtype) shift_xx, shift_yy = meshgrid(shift_x, shift_y) if not with_stride: shifts = torch.stack([shift_xx, shift_yy], dim=-1) else: stride_w = shift_xx.new_full((shift_xx.shape[0], ), stride_w).to(dtype) stride_h = shift_xx.new_full((shift_yy.shape[0], ), stride_h).to(dtype) shifts = torch.stack([shift_xx, shift_yy, stride_w, stride_h], dim=-1) all_points = shifts.to(device) return all_points def get_targets(self, x, points, gt_points, proposals, gt_labels, img_metas): self.norm_on_bbox = True num_levels = len(x) concat_points = torch.cat(points, dim=0) # the number of points per img, per lvl num_points = [center.size(0) for center in points] labels_list, angle_targets_list, id_targets_list = multi_apply( self._get_targets_single, gt_points, proposals, gt_labels, img_metas, points=concat_points, num_points_per_lvl=num_points) # split to per img, per level labels_list = [labels.split(num_points, 0) for labels in labels_list] angle_targets_list = [ angle_targets.split(num_points, 0) for angle_targets in angle_targets_list ] id_targets_list = [ id_targets.split(num_points, 0) for id_targets in id_targets_list ] # concat per level image concat_lvl_labels = [] concat_lvl_angle_targets = [] concat_lvl_id_targets = [] for i in range(num_levels): concat_lvl_labels.append( torch.cat([labels[i] for labels in labels_list])) # bbox_targets = torch.cat( # [bbox_targets[i] for bbox_targets in bbox_targets_list]) angle_targets = torch.cat( [angle_targets[i] for angle_targets in angle_targets_list]) id_targets = torch.cat( [id_targets[i] for id_targets in id_targets_list]) concat_lvl_angle_targets.append(angle_targets) concat_lvl_id_targets.append(id_targets) return (concat_lvl_labels, concat_lvl_angle_targets, concat_lvl_id_targets) def _get_targets_single( self, gt_points, proposals, gt_label, img_meta, points, num_points_per_lvl: List[int]) -> Tuple[Tensor, Tensor, Tensor]: """Compute regression and classification targets for a single image.""" self.center_sampling = True self.center_sample_radius = 1.5 self.pseudow = 3 self.pseudoh = 2 num_points = points.size(0) num_gts = len(gt_points) gt_labels = gt_label gt_bid = img_meta['gt_bid'] gen_proposals = proposals.reshape(len(gt_points), -1, proposals.size(-1)) if gt_points.size(-1) == 2: extra_tensor = torch.tensor([self.pseudow, self.pseudoh, gen_proposals[0,0,-1]], device=gt_points.device, dtype=gt_points.dtype).repeat(len(gt_points), 1) gt_bboxes = torch.cat((gt_points, extra_tensor), dim=1) else: gt_bboxes = gt_points.clone() if num_gts == 0: return gt_labels.new_full((num_points,), self.num_classes), \ gt_bboxes.new_zeros((num_points, 1)), \ gt_bboxes.new_zeros((num_points,)) areas = (gt_bboxes[:,2] * gt_bboxes[:,3]).squeeze()
gt_bboxes = regularize_boxes(gt_bboxes, pattern=self.angle_version)
10
2023-11-20 07:50:12+00:00
24k
ModelTC/EasyLLM
llm/models/hf_models/qwen_vl/modeling_qwen.py
[ { "identifier": "QWenConfig", "path": "llm/models/hf_models/qwen_vl/configuration_qwen.py", "snippet": "class QWenConfig(PretrainedConfig):\n model_type = \"qwen\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=151936,\n hidden_size=4096,\n num_hidden_layers=32,\n num_attention_heads=32,\n emb_dropout_prob=0.0,\n attn_dropout_prob=0.0,\n layer_norm_epsilon=1e-6,\n initializer_range=0.02,\n max_position_embeddings=8192,\n scale_attn_weights=True,\n use_cache=True,\n bf16=False,\n fp16=False,\n fp32=False,\n kv_channels=128,\n rotary_pct=1.0,\n rotary_emb_base=10000,\n use_dynamic_ntk=True,\n use_logn_attn=True,\n use_flash_attn=\"auto\",\n intermediate_size=22016,\n no_bias=True,\n tie_word_embeddings=False,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.emb_dropout_prob = emb_dropout_prob\n self.attn_dropout_prob = attn_dropout_prob\n self.layer_norm_epsilon = layer_norm_epsilon\n self.initializer_range = initializer_range\n self.scale_attn_weights = scale_attn_weights\n self.use_cache = use_cache\n self.max_position_embeddings = max_position_embeddings\n self.bf16 = bf16\n self.fp16 = fp16\n self.fp32 = fp32\n self.kv_channels = kv_channels\n self.rotary_pct = rotary_pct\n self.rotary_emb_base = rotary_emb_base\n self.use_dynamic_ntk = use_dynamic_ntk\n self.use_logn_attn = use_logn_attn\n self.use_flash_attn = use_flash_attn\n self.no_bias = no_bias\n super().__init__(\n tie_word_embeddings=tie_word_embeddings,\n **kwargs\n )" }, { "identifier": "make_context", "path": "llm/models/hf_models/qwen_vl/qwen_generation_utils.py", "snippet": "def make_context(\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: List[Tuple[str, str]] = None,\n system: str = \"\",\n max_window_size: int = 6144,\n chat_format: str = \"chatml\",\n):\n if history is None:\n history = []\n\n if chat_format == \"chatml\":\n im_start, im_end = \"<|im_start|>\", \"<|im_end|>\"\n im_start_tokens = [tokenizer.im_start_id]\n im_end_tokens = [tokenizer.im_end_id]\n nl_tokens = tokenizer.encode(\"\\n\")\n\n def _tokenize_str(role, content):\n return f\"{role}\\n{content}\", tokenizer.encode(\n role, allowed_special=set(tokenizer.IMAGE_ST)\n ) + nl_tokens + tokenizer.encode(content, allowed_special=set(tokenizer.IMAGE_ST))\n\n system_text, system_tokens_part = _tokenize_str(\"system\", system)\n system_tokens = im_start_tokens + system_tokens_part + im_end_tokens\n\n raw_text = \"\"\n context_tokens = []\n\n for turn_query, turn_response in reversed(history):\n query_text, query_tokens_part = _tokenize_str(\"user\", turn_query)\n query_tokens = im_start_tokens + query_tokens_part + im_end_tokens\n if turn_response is not None:\n response_text, response_tokens_part = _tokenize_str(\n \"assistant\", turn_response\n )\n response_tokens = im_start_tokens + response_tokens_part + im_end_tokens\n\n next_context_tokens = nl_tokens + query_tokens + nl_tokens + response_tokens\n prev_chat = (\n f\"\\n{im_start}{query_text}{im_end}\\n{im_start}{response_text}{im_end}\"\n )\n else:\n next_context_tokens = nl_tokens + query_tokens + nl_tokens\n prev_chat = f\"\\n{im_start}{query_text}{im_end}\\n\"\n\n current_context_size = (\n len(system_tokens) + len(next_context_tokens) + len(context_tokens)\n )\n if current_context_size < max_window_size:\n context_tokens = next_context_tokens + context_tokens\n raw_text = prev_chat + raw_text\n else:\n break\n\n context_tokens = system_tokens + context_tokens\n raw_text = f\"{im_start}{system_text}{im_end}\" + raw_text\n context_tokens += (\n nl_tokens\n + im_start_tokens\n + _tokenize_str(\"user\", query)[1]\n + im_end_tokens\n + nl_tokens\n + im_start_tokens\n + tokenizer.encode(\"assistant\")\n + nl_tokens\n )\n raw_text += f\"\\n{im_start}user\\n{query}{im_end}\\n{im_start}assistant\\n\"\n\n elif chat_format == \"raw\":\n raw_text = query\n context_tokens = tokenizer.encode(raw_text)\n else:\n raise NotImplementedError(f\"Unknown chat format {chat_format!r}\")\n\n return raw_text, context_tokens" }, { "identifier": "HistoryType", "path": "llm/models/hf_models/qwen/qwen_generation_utils.py", "snippet": "def pad_batch(batch: BatchTokensType, pad_id: int, seq_length: int) -> BatchTokensType:\ndef get_ltor_masks_and_position_ids(\n data,\n eod_token,\n reset_position_ids,\n reset_attention_mask,\n eod_mask_loss,\n):\ndef get_batch(context_tokens: torch.LongTensor, eod_id: int):\ndef get_stop_words_ids(chat_format, tokenizer):\ndef make_context(\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: List[Tuple[str, str]] = None,\n system: str = \"\",\n max_window_size: int = 6144,\n chat_format: str = \"chatml\",\n):\n def _tokenize_str(role, content):\ndef _decode_default(\n tokens: List[int],\n *,\n stop_words: List[str],\n eod_words: List[str],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = 'replace',\n):\ndef _decode_chatml(\n tokens: List[int],\n *,\n stop_words: List[str],\n eod_token_ids: List[int],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n context_length: int,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = 'replace'\n):\ndef decode_tokens(\n tokens: Union[torch.LongTensor, TokensType],\n tokenizer: PreTrainedTokenizer,\n raw_text_len: int,\n context_length: int,\n chat_format: str,\n verbose: bool = False,\n return_end_reason: bool = False,\n errors: str = \"replace\",\n) -> str:\n def __init__(self, stop_words_ids: Iterable[Iterable[int]], eos_token_id: int):\n def __call__(\n self, input_ids: torch.LongTensor, scores: torch.FloatTensor\n ) -> torch.FloatTensor:\n def _tokens_match(self, prev_tokens: torch.LongTensor, tokens: List[int]) -> bool:\n def _calc_stopped_samples(self, prev_input_ids: Iterable[int]) -> Iterable[int]:\ndef top_k_logits(logits, top_k=0, top_p=0.0, filter_value=-float(\"Inf\")):\ndef switch(val1, val2, boolean):\nclass StopWordsLogitsProcessor(LogitsProcessor):" }, { "identifier": "VisionTransformer", "path": "llm/models/hf_models/qwen_vl/visual.py", "snippet": "class VisionTransformer(nn.Module):\n\n def __init__(\n self,\n image_size: int,\n patch_size: int,\n width: int,\n layers: int,\n heads: int,\n mlp_ratio: float,\n n_queries: int = 256,\n output_dim: int = 512,\n **kwargs\n ):\n super().__init__()\n image_height, image_width = self.image_size = (image_size, image_size)\n patch_height, patch_width = self.patch_size = (patch_size, patch_size)\n self.grid_size = (image_height // patch_height, image_width // patch_width)\n self.output_dim = output_dim\n\n mean = (0.48145466, 0.4578275, 0.40821073)\n std = (0.26862954, 0.26130258, 0.27577711)\n self.image_transform = transforms.Compose([\n transforms.Resize(\n (image_size, image_size),\n interpolation=InterpolationMode.BICUBIC\n ),\n transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std),\n ])\n\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n # class embeddings and positional embeddings\n scale = width ** -0.5\n self.positional_embedding = nn.Parameter(scale * torch.randn(256, width))\n\n norm_layer = partial(nn.LayerNorm, eps=1e-6)\n act_layer = nn.GELU\n\n self.ln_pre = norm_layer(width)\n self.transformer = TransformerBlock(\n width,\n layers,\n heads,\n mlp_ratio,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n\n self.attn_pool = Resampler(\n grid_size=int(math.sqrt(n_queries)),\n embed_dim=output_dim,\n num_heads=output_dim // 128,\n kv_dim=width,\n norm_layer=norm_layer,\n )\n self.ln_post = norm_layer(output_dim)\n self.proj = nn.Parameter((output_dim ** -0.5) * torch.randn(output_dim, output_dim))\n\n def forward(self, x: torch.Tensor):\n x = x.to(\n dtype=self.transformer.get_cast_dtype(),\n device=self.transformer.get_cast_device(),\n )\n # to patches\n x = self.conv1(x) # shape = [*, width, grid, grid]\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n x = x + get_abs_pos(self.positional_embedding, x.size(1))\n\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n x = self.attn_pool(x)\n x = self.ln_post(x)\n x = x @ self.proj\n\n return x\n\n def encode(self, image_paths: List[str]):\n images = []\n for image_path in image_paths:\n if image_path.startswith(\"http://\") or image_path.startswith(\"https://\"):\n image = Image.open(requests.get(image_path, stream=True).raw)\n else:\n image = Image.open(image_path)\n image = image.convert(\"RGB\")\n images.append(self.image_transform(image))\n images = torch.stack(images, dim=0)\n return self(images)" }, { "identifier": "RMSNorm", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class RMSNorm(torch.nn.Module):\n def __init__(self, dim: int, eps: float = 1e-6):\n super().__init__()\n self.eps = eps\n self.weight = nn.Parameter(torch.ones(dim))\n\n def _norm(self, x):\n return x * torch.rsqrt(x.pow(2).mean(-1, keepdim=True) + self.eps)\n\n def forward(self, x):\n if rms_norm is not None and x.is_cuda:\n return rms_norm(x, self.weight, self.eps)\n else:\n output = self._norm(x.float()).type_as(x)\n return output * self.weight" }, { "identifier": "apply_rotary_pos_emb", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "def apply_rotary_pos_emb(t, freqs):\n cos, sin = freqs\n if apply_rotary_emb_func is not None and t.is_cuda:\n t_ = t.float()\n cos = cos.squeeze(0).squeeze(1)[:, : cos.shape[-1] // 2]\n sin = sin.squeeze(0).squeeze(1)[:, : sin.shape[-1] // 2]\n output = apply_rotary_emb_func(t_, cos, sin).type_as(t)\n return output\n else:\n rot_dim = freqs[0].shape[-1]\n cos, sin = freqs\n t_, t_pass_ = t[..., :rot_dim], t[..., rot_dim:]\n t_ = t_.float()\n t_pass_ = t_pass_.float()\n t_ = (t_ * cos) + (_rotate_half(t_) * sin)\n return torch.cat((t_, t_pass_), dim=-1).type_as(t)" }, { "identifier": "QWenMLP", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenMLP(nn.Module):\n def __init__(self, config):\n super().__init__()\n self.w1 = nn.Linear(\n config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias\n )\n self.w2 = nn.Linear(\n config.hidden_size, config.intermediate_size // 2, bias=not config.no_bias\n )\n ff_dim_in = config.intermediate_size // 2\n self.c_proj = nn.Linear(ff_dim_in, config.hidden_size, bias=not config.no_bias)\n\n def forward(self, hidden_states):\n a1 = self.w1(hidden_states)\n a2 = self.w2(hidden_states)\n intermediate_parallel = a1 * F.silu(a2)\n output = self.c_proj(intermediate_parallel)\n return output" }, { "identifier": "QWenAttention", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenAttention(nn.Module):\n def __init__(self, config):\n super().__init__()\n\n self.register_buffer(\"masked_bias\", torch.tensor(-1e4), persistent=False)\n self.seq_length = config.seq_length\n\n self.hidden_size = config.hidden_size\n self.split_size = config.hidden_size\n self.num_heads = config.num_attention_heads\n self.head_dim = self.hidden_size // self.num_heads\n\n self.use_flash_attn = config.use_flash_attn\n self.scale_attn_weights = True\n\n self.projection_size = config.kv_channels * config.num_attention_heads\n\n assert self.projection_size % config.num_attention_heads == 0\n self.hidden_size_per_attention_head = (\n self.projection_size // config.num_attention_heads\n )\n\n self.c_attn = nn.Linear(config.hidden_size, 3 * self.projection_size)\n\n self.c_proj = nn.Linear(\n config.hidden_size, self.projection_size, bias=not config.no_bias\n )\n\n self.is_fp32 = not (config.bf16 or config.fp16)\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n ):\n self.core_attention_flash = FlashSelfAttention(\n causal=True, attention_dropout=config.attn_dropout_prob\n )\n self.bf16 = config.bf16\n\n self.use_dynamic_ntk = config.use_dynamic_ntk\n self.use_logn_attn = config.use_logn_attn\n\n logn_list = [\n math.log(i, self.seq_length) if i > self.seq_length else 1\n for i in range(1, 32768)\n ]\n logn_tensor = torch.tensor(logn_list)[None, :, None, None]\n self.register_buffer(\"logn_tensor\", logn_tensor, persistent=False)\n\n self.attn_dropout = nn.Dropout(config.attn_dropout_prob)\n self.softmax_in_fp32 = config.softmax_in_fp32 if hasattr(config, 'softmax_in_fp32') else False\n self.use_cache_quantization = config.use_cache_quantization if hasattr(\n config, 'use_cache_quantization') else False\n self.use_cache_kernel = config.use_cache_kernel if hasattr(config, 'use_cache_kernel') else False\n cache_dtype = torch.float\n if self.bf16:\n cache_dtype = torch.bfloat16\n elif config.fp16:\n cache_dtype = torch.float16\n self.cache_qmax = torch.tensor(torch.iinfo(torch.uint8).max, dtype=cache_dtype)\n self.cache_qmin = torch.tensor(torch.iinfo(torch.uint8).min, dtype=cache_dtype)\n\n if config.use_cache_quantization and config.use_cache_kernel:\n try:\n from .cpp_kernels import cache_autogptq_cuda_256\n self.cache_kernels = cache_autogptq_cuda_256\n except ImportError:\n self.cache_kernels = None\n\n def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None):\n device = query.device\n if self.use_cache_quantization:\n qk, qk_scale, qk_zero = key\n if self.use_cache_kernel and self.cache_kernels is not None:\n shape = query.shape[:-1] + (qk.shape[-2],)\n attn_weights = torch.zeros(shape, dtype=torch.float16, device=device)\n self.cache_kernels.vecquant8matmul_batched_faster_old(\n query.contiguous() if query.dtype == torch.float16 else query.to(torch.float16).contiguous(),\n qk.transpose(-1, -2).contiguous(),\n attn_weights,\n qk_scale.contiguous() if qk_scale.dtype == torch.float16 else qk_scale.to(torch.float16).contiguous(),\n qk_zero.contiguous()if qk_zero.dtype == torch.float16 else qk_zero.to(torch.float16).contiguous())\n # attn_weights = attn_weights.to(query.dtype).contiguous()\n else:\n key = dequantize_cache_torch(qk, qk_scale, qk_zero)\n attn_weights = torch.matmul(query, key.transpose(-1, -2))\n else:\n attn_weights = torch.matmul(query, key.transpose(-1, -2))\n\n if self.scale_attn_weights:\n if self.use_cache_quantization:\n size_temp = value[0].size(-1)\n else:\n size_temp = value.size(-1)\n attn_weights = attn_weights / torch.full(\n [],\n size_temp ** 0.5,\n dtype=attn_weights.dtype,\n device=attn_weights.device,\n )\n if self.use_cache_quantization:\n query_length, key_length = query.size(-2), key[0].size(-2)\n else:\n query_length, key_length = query.size(-2), key.size(-2)\n causal_mask = registered_causal_mask[\n :, :, key_length - query_length: key_length, :key_length\n ]\n mask_value = torch.finfo(attn_weights.dtype).min\n mask_value = torch.full([], mask_value, dtype=attn_weights.dtype).to(\n attn_weights.device\n )\n attn_weights = torch.where(\n causal_mask, attn_weights.to(attn_weights.dtype), mask_value\n )\n\n if attention_mask is not None:\n attn_weights = attn_weights + attention_mask\n\n if self.softmax_in_fp32:\n attn_weights = nn.functional.softmax(attn_weights.float(), dim=-1)\n else:\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n attn_weights = attn_weights.type(query.dtype)\n attn_weights = self.attn_dropout(attn_weights)\n\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n if self.use_cache_quantization:\n qv, qv_scale, qv_zero = value\n if self.use_cache_kernel and self.cache_kernels is not None:\n shape = attn_weights.shape[:-1] + (query.shape[-1],)\n attn_output = torch.zeros(shape, dtype=torch.float16, device=device)\n self.cache_kernels.vecquant8matmul_batched_column_compression_faster_old(\n attn_weights.contiguous() if attn_weights.dtype == torch.float16 else attn_weights.to(torch.float16).contiguous(),\n qv.contiguous(), # dtype: int32\n attn_output,\n qv_scale.contiguous() if qv_scale.dtype == torch.float16 else qv_scale.to(torch.float16).contiguous(),\n qv_zero.contiguous() if qv_zero.dtype == torch.float16 else qv_zero.to(torch.float16).contiguous())\n if attn_output.dtype != query.dtype:\n attn_output = attn_output.to(query.dtype)\n attn_weights = attn_weights.to(query.dtype)\n else:\n value = dequantize_cache_torch(qv, qv_scale, qv_zero)\n attn_output = torch.matmul(attn_weights, value)\n else:\n attn_output = torch.matmul(attn_weights, value)\n\n attn_output = attn_output.transpose(1, 2)\n\n return attn_output, attn_weights\n\n def _upcast_and_reordered_attn(\n self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None\n ):\n bsz, num_heads, q_seq_len, dk = query.size()\n _, _, k_seq_len, _ = key.size()\n\n attn_weights = torch.empty(\n bsz * num_heads,\n q_seq_len,\n k_seq_len,\n dtype=torch.float32,\n device=query.device,\n )\n\n scale_factor = 1.0\n if self.scale_attn_weights:\n scale_factor /= float(value.size(-1)) ** 0.5\n\n with autocast(enabled=False):\n q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(\n -1, dk, k_seq_len\n )\n attn_weights = torch.baddbmm(\n attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor\n )\n attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)\n\n query_length, key_length = query.size(-2), key.size(-2)\n causal_mask = registered_causal_mask[\n :, :, key_length - query_length: key_length, :key_length\n ]\n mask_value = torch.finfo(attn_weights.dtype).min\n mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to(\n attn_weights.device\n )\n attn_weights = torch.where(causal_mask, attn_weights, mask_value)\n\n if attention_mask is not None:\n attn_weights = attn_weights + attention_mask\n\n attn_weights = nn.functional.softmax(attn_weights, dim=-1)\n\n if attn_weights.dtype != torch.float32:\n raise RuntimeError(\n \"Error with upcasting, attn_weights does not have dtype torch.float32\"\n )\n attn_weights = attn_weights.type(value.dtype)\n attn_weights = self.attn_dropout(attn_weights)\n\n if head_mask is not None:\n attn_weights = attn_weights * head_mask\n\n attn_output = torch.matmul(attn_weights, value)\n\n return attn_output, attn_weights\n\n def _split_heads(self, tensor, num_heads, attn_head_size):\n new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)\n tensor = tensor.view(new_shape)\n return tensor\n\n def _merge_heads(self, tensor, num_heads, attn_head_size):\n tensor = tensor.contiguous()\n new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)\n return tensor.view(new_shape)\n\n def forward(\n self,\n hidden_states: Optional[Tuple[torch.FloatTensor]],\n rotary_pos_emb_list: Optional[List[List[torch.Tensor]]] = None,\n registered_causal_mask: Optional[torch.Tensor] = None,\n layer_past: Optional[Tuple[torch.Tensor]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n output_attentions: Optional[bool] = False,\n use_cache: Optional[bool] = False,\n ):\n mixed_x_layer = self.c_attn(hidden_states)\n\n query, key, value = mixed_x_layer.split(self.split_size, dim=2)\n\n query = self._split_heads(query, self.num_heads, self.head_dim)\n key = self._split_heads(key, self.num_heads, self.head_dim)\n value = self._split_heads(value, self.num_heads, self.head_dim)\n\n if rotary_pos_emb_list is not None:\n cur_len = query.shape[1]\n if len(rotary_pos_emb_list) == 1:\n rotary_pos_emb = rotary_pos_emb_list[0]\n rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]\n rotary_pos_emb = (rotary_pos_emb,) * 2\n q_pos_emb, k_pos_emb = rotary_pos_emb\n # Slice the pos emb for current inference\n query = apply_rotary_pos_emb(query, q_pos_emb)\n key = apply_rotary_pos_emb(key, k_pos_emb)\n else:\n query_list = []\n key_list = []\n for i, rotary_pos_emb in enumerate(rotary_pos_emb_list):\n rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb]\n rotary_pos_emb = (rotary_pos_emb,) * 2\n q_pos_emb, k_pos_emb = rotary_pos_emb\n # Slice the pos emb for current inference\n query_list += [apply_rotary_pos_emb(query[i:i + 1, :, :], q_pos_emb)]\n key_list += [apply_rotary_pos_emb(key[i:i + 1, :, :], k_pos_emb)]\n query = torch.cat(query_list, dim=0)\n key = torch.cat(key_list, dim=0)\n\n if self.use_cache_quantization:\n key = quantize_cache_v(key.permute(0, 2, 1, 3),\n bits=8,\n qmin=self.cache_qmin,\n qmax=self.cache_qmax)\n value = quantize_cache_v(value.permute(0, 2, 1, 3),\n bits=8,\n qmin=self.cache_qmin,\n qmax=self.cache_qmax)\n\n if layer_past is not None:\n past_key, past_value = layer_past[0], layer_past[1]\n if self.use_cache_quantization:\n # use_cache_quantization:\n # present=((q_key,key_scale,key_zero_point),\n # (q_value,value_scale,value_zero_point))\n key = (torch.cat((past_key[0], key[0]), dim=2),\n torch.cat((past_key[1], key[1]), dim=2),\n torch.cat((past_key[2], key[2]), dim=2))\n value = (torch.cat((past_value[0], value[0]), dim=2),\n torch.cat((past_value[1], value[1]), dim=2),\n torch.cat((past_value[2], value[2]), dim=2))\n else:\n # not use_cache_quantization:\n # present=(key,value)\n key = torch.cat((past_key, key), dim=1)\n value = torch.cat((past_value, value), dim=1)\n\n if use_cache:\n present = (key, value)\n else:\n present = None\n\n if self.use_logn_attn and not self.training:\n if self.use_cache_quantization:\n seq_start = key[0].size(2) - query.size(1)\n seq_end = key[0].size(2)\n else:\n seq_start = key.size(1) - query.size(1)\n seq_end = key.size(1)\n logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :].type_as(query)\n query = query * logn_tensor.expand_as(query)\n\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n and query.is_cuda\n ):\n q, k, v = query, key, value\n attn_output = self.core_attention_flash(q, k, v, attention_mask=attention_mask)\n else:\n query = query.permute(0, 2, 1, 3)\n if not self.use_cache_quantization:\n key = key.permute(0, 2, 1, 3)\n value = value.permute(0, 2, 1, 3)\n if (\n registered_causal_mask is None\n and self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n and not query.is_cuda\n ):\n raise Exception(_ERROR_INPUT_CPU_QUERY_WITH_FLASH_ATTN_ACTIVATED)\n\n if not self.use_cache_quantization and SUPPORT_TORCH2:\n causal_mask = registered_causal_mask[\n :, :, key.size(-2) - query.size(-2): key.size(-2), :key.size(-2)\n ]\n if attention_mask is not None:\n attention_mask = attention_mask.expand(\n -1, -1, causal_mask.size(2), -1\n ).masked_fill(~causal_mask, torch.finfo(query.dtype).min)\n else:\n attention_mask = causal_mask\n attn_output = F.scaled_dot_product_attention(\n query, key, value, attn_mask=attention_mask\n ).transpose(1, 2)\n attn_weight = None\n else:\n attn_output, attn_weight = self._attn(\n query, key, value, registered_causal_mask, attention_mask, head_mask\n )\n context_layer = self._merge_heads(\n attn_output, self.num_heads, self.head_dim\n )\n\n attn_output = self.c_proj(context_layer)\n\n outputs = (attn_output, present)\n if output_attentions:\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n ):\n raise ValueError(\"Cannot output attentions while using flash-attn\")\n else:\n outputs += (attn_weight,)\n\n return outputs" }, { "identifier": "QWenModel", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenModel(QWenPreTrainedModel):\n _keys_to_ignore_on_load_missing = [\"attn.masked_bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.vocab_size = config.vocab_size\n self.num_hidden_layers = config.num_hidden_layers\n self.embed_dim = config.hidden_size\n self.use_cache_quantization = self.config.use_cache_quantization if hasattr(\n self.config, 'use_cache_quantization') else False\n\n self.gradient_checkpointing = False\n self.use_dynamic_ntk = config.use_dynamic_ntk\n self.seq_length = config.seq_length\n\n self.wte = nn.Embedding(self.vocab_size, self.embed_dim)\n\n self.drop = nn.Dropout(config.emb_dropout_prob)\n\n if config.rotary_pct == 1.0:\n self.rotary_ndims = None\n else:\n assert config.rotary_pct < 1\n self.rotary_ndims = int(\n config.kv_channels * config.rotary_pct\n )\n dim = (\n self.rotary_ndims\n if self.rotary_ndims is not None\n else config.kv_channels\n )\n self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base)\n\n self.use_flash_attn = config.use_flash_attn\n self.is_fp32 = not (config.bf16 or config.fp16)\n if (\n self.use_flash_attn\n and flash_attn_unpadded_func is not None\n and not self.is_fp32\n ):\n self.registered_causal_mask = None\n else:\n max_positions = config.max_position_embeddings\n self.register_buffer(\n \"registered_causal_mask\",\n torch.tril(\n torch.ones((max_positions, max_positions), dtype=torch.bool)\n ).view(1, 1, max_positions, max_positions),\n persistent=False,\n )\n\n self.h = nn.ModuleList(\n [\n QWenBlock(\n config\n )\n for i in range(config.num_hidden_layers)\n ]\n )\n self.ln_f = RMSNorm(\n self.embed_dim,\n eps=config.layer_norm_epsilon,\n )\n\n self.post_init()\n\n def get_input_embeddings(self):\n return self.wte\n\n def set_input_embeddings(self, new_embeddings):\n self.wte = new_embeddings\n\n def get_ntk_alpha(self, true_seq_len):\n context_value = math.log(true_seq_len / self.seq_length, 2) + 1\n ntk_alpha = 2 ** math.ceil(context_value) - 1\n ntk_alpha = max(ntk_alpha, 1)\n return ntk_alpha\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ):\n output_attentions = (\n output_attentions\n if output_attentions is not None\n else self.config.output_attentions\n )\n output_hidden_states = (\n output_hidden_states\n if output_hidden_states is not None\n else self.config.output_hidden_states\n )\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\n \"You cannot specify both input_ids and inputs_embeds at the same time\"\n )\n elif input_ids is not None:\n input_shape = input_ids.size()\n input_ids = input_ids.view(-1, input_shape[-1])\n batch_size = input_ids.shape[0]\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n batch_size = inputs_embeds.shape[0]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if token_type_ids is not None:\n token_type_ids = token_type_ids.view(-1, input_shape[-1])\n if position_ids is not None:\n position_ids = position_ids.view(-1, input_shape[-1])\n\n if past_key_values is None:\n past_length = 0\n past_key_values = tuple([None] * len(self.h))\n else:\n if self.use_cache_quantization:\n past_length = past_key_values[0][0][0].size(2)\n else:\n past_length = past_key_values[0][0].size(-2)\n if position_ids is None:\n position_ids = torch.arange(\n past_length,\n input_shape[-1] + past_length,\n dtype=torch.long,\n device=device,\n )\n position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1])\n\n if attention_mask is not None:\n if batch_size <= 0:\n raise ValueError(\"batch_size has to be defined and > 0\")\n attention_mask = attention_mask.view(batch_size, -1)\n attention_mask = attention_mask[:, None, None, :]\n attention_mask = attention_mask.to(dtype=self.dtype)\n attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min\n\n encoder_attention_mask = None\n head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)\n\n if inputs_embeds is None:\n inputs_embeds = self.wte(input_ids)\n hidden_states = inputs_embeds\n\n kv_seq_len = hidden_states.size()[1]\n if past_key_values[0] is not None:\n # past key values[0][0] shape: bs * seq_len * head_num * dim\n if self.use_cache_quantization:\n kv_seq_len += past_key_values[0][0][0].shape[2]\n else:\n kv_seq_len += past_key_values[0][0].shape[1]\n\n if self.training or not self.use_dynamic_ntk:\n ntk_alpha_list = [1.0]\n elif kv_seq_len != hidden_states.size()[1]:\n ntk_alpha_list = self.rotary_emb._ntk_alpha_cached_list\n else:\n ntk_alpha_list = []\n if attention_mask is not None and kv_seq_len > self.seq_length:\n true_seq_lens = attention_mask.squeeze(1).squeeze(1).eq(0).sum(dim=-1, dtype=torch.int32)\n for i in range(hidden_states.size()[0]):\n true_seq_len = true_seq_lens[i].item()\n ntk_alpha = self.get_ntk_alpha(true_seq_len)\n ntk_alpha_list.append(ntk_alpha)\n else:\n ntk_alpha = self.get_ntk_alpha(kv_seq_len)\n ntk_alpha_list.append(ntk_alpha)\n self.rotary_emb._ntk_alpha_cached_list = ntk_alpha_list\n rotary_pos_emb_list = [\n self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for ntk_alpha in ntk_alpha_list\n ]\n\n hidden_states = self.drop(hidden_states)\n output_shape = input_shape + (hidden_states.size(-1),)\n\n if self.gradient_checkpointing and self.training:\n if use_cache:\n logger.warning_once(\n \"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...\"\n )\n use_cache = False\n\n presents = () if use_cache else None\n all_self_attentions = () if output_attentions else None\n all_hidden_states = () if output_hidden_states else None\n for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)):\n\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if self.gradient_checkpointing and self.training:\n\n def create_custom_forward(module):\n def custom_forward(*inputs):\n # None for past_key_value\n return module(*inputs, use_cache, output_attentions)\n\n return custom_forward\n\n outputs = torch.utils.checkpoint.checkpoint(\n create_custom_forward(block),\n hidden_states,\n rotary_pos_emb_list,\n self.registered_causal_mask,\n None,\n attention_mask,\n head_mask[i],\n encoder_hidden_states,\n encoder_attention_mask,\n )\n else:\n outputs = block(\n hidden_states,\n layer_past=layer_past,\n rotary_pos_emb_list=rotary_pos_emb_list,\n registered_causal_mask=self.registered_causal_mask,\n attention_mask=attention_mask,\n head_mask=head_mask[i],\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n )\n\n hidden_states = outputs[0]\n if use_cache is True:\n presents = presents + (outputs[1],)\n\n if output_attentions:\n all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],)\n\n hidden_states = self.ln_f(hidden_states)\n hidden_states = hidden_states.view(output_shape)\n # Add last hidden state\n if output_hidden_states:\n all_hidden_states = all_hidden_states + (hidden_states,)\n\n if not return_dict:\n return tuple(\n v for v in [hidden_states, presents, all_hidden_states] if v is not None\n )\n\n return BaseModelOutputWithPast(\n last_hidden_state=hidden_states,\n past_key_values=presents,\n hidden_states=all_hidden_states,\n attentions=all_self_attentions,\n )" }, { "identifier": "QWenLMHeadModel", "path": "llm/models/hf_models/qwen/modeling_qwen.py", "snippet": "class QWenLMHeadModel(QWenPreTrainedModel):\n _keys_to_ignore_on_load_missing = [r\"h\\.\\d+\\.attn\\.rotary_emb\\.inv_freq\"]\n _keys_to_ignore_on_load_unexpected = [r\"h\\.\\d+\\.attn\\.masked_bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n assert (\n config.bf16 + config.fp16 + config.fp32 <= 1\n ), \"Only one of \\\"bf16\\\", \\\"fp16\\\", \\\"fp32\\\" can be true\"\n logger.warn(\n \"Warning: please make sure that you are using the latest codes and checkpoints, \"\n \"especially if you used Qwen-7B before 09.25.2023.\"\n \"请使用最新模型和代码,尤其如果你在9月25日前已经开始使用Qwen-7B,千万注意不要使用错误代码和模型。\"\n )\n\n autoset_precision = config.bf16 + config.fp16 + config.fp32 == 0\n\n if autoset_precision:\n if SUPPORT_BF16:\n logger.warn(\n \"The model is automatically converting to bf16 for faster inference. \"\n \"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \\\"AutoModelForCausalLM.from_pretrained\\\".\" # noqa\n )\n config.bf16 = True\n elif SUPPORT_FP16:\n logger.warn(\n \"The model is automatically converting to fp16 for faster inference. \"\n \"If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \\\"AutoModelForCausalLM.from_pretrained\\\".\" # noqa\n )\n config.fp16 = True\n else:\n config.fp32 = True\n\n if config.bf16 and SUPPORT_CUDA and not SUPPORT_BF16:\n logger.warn(\n \"Your device does NOT seem to support bf16, you can switch to fp16 or fp32 by by passing fp16/fp32=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\") # noqa\n if config.fp16 and SUPPORT_CUDA and not SUPPORT_FP16:\n logger.warn(\n \"Your device does NOT support faster inference with fp16, please switch to fp32 which is likely to be faster\")\n if config.fp32:\n if SUPPORT_BF16:\n logger.warn(\n \"Your device support faster inference by passing bf16=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\")\n elif SUPPORT_FP16:\n logger.warn(\n \"Your device support faster inference by passing fp16=True in \\\"AutoModelForCausalLM.from_pretrained\\\".\")\n\n if config.use_flash_attn == \"auto\":\n if config.bf16 or config.fp16:\n logger.warn(\"Try importing flash-attention for faster inference...\")\n config.use_flash_attn = True\n else:\n config.use_flash_attn = False\n if config.use_flash_attn and config.fp32:\n logger.warn(\"Flash attention will be disabled because it does NOT support fp32.\")\n\n if config.use_flash_attn:\n _import_flash_attn()\n\n self.transformer = QWenModel(config)\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n if config.bf16:\n self.transformer.bfloat16()\n self.lm_head.bfloat16()\n if config.fp16:\n self.transformer.half()\n self.lm_head.half()\n self.post_init()\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, inputs_embeds=None, **kwargs\n ):\n token_type_ids = kwargs.get(\"token_type_ids\", None)\n if past_key_values:\n input_ids = input_ids[:, -1].unsqueeze(-1)\n if token_type_ids is not None:\n token_type_ids = token_type_ids[:, -1].unsqueeze(-1)\n\n attention_mask = kwargs.get(\"attention_mask\", None)\n position_ids = kwargs.get(\"position_ids\", None)\n\n if attention_mask is not None and position_ids is None:\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n else:\n position_ids = None\n\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"position_ids\": position_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids,\n }\n )\n return model_inputs\n\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n token_type_ids: Optional[torch.LongTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n\n return_dict = (\n return_dict if return_dict is not None else self.config.use_return_dict\n )\n\n transformer_outputs = self.transformer(\n input_ids,\n past_key_values=past_key_values,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n hidden_states = transformer_outputs[0]\n\n lm_logits = self.lm_head(hidden_states)\n\n loss = None\n if labels is not None:\n labels = labels.to(lm_logits.device)\n shift_logits = lm_logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n loss = loss_fct(\n shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1)\n )\n\n if not return_dict:\n output = (lm_logits,) + transformer_outputs[1:]\n return ((loss,) + output) if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=lm_logits,\n past_key_values=transformer_outputs.past_key_values,\n hidden_states=transformer_outputs.hidden_states,\n attentions=transformer_outputs.attentions,\n )\n\n @staticmethod\n def _reorder_cache(\n past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor\n ) -> Tuple[Tuple[torch.Tensor]]:\n\n return tuple(\n tuple(\n past_state.index_select(0, beam_idx.to(past_state.device))\n for past_state in layer_past\n )\n for layer_past in past_key_values\n )\n\n def chat(\n self,\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: Optional[HistoryType],\n system: str = \"You are a helpful assistant.\",\n append_history: bool = True,\n stream: Optional[bool] = _SENTINEL,\n stop_words_ids: Optional[List[List[int]]] = None,\n generation_config: Optional[GenerationConfig] = None,\n **kwargs,\n ) -> Tuple[str, HistoryType]:\n generation_config = generation_config if generation_config is not None else self.generation_config\n\n assert stream is _SENTINEL, _ERROR_STREAM_IN_CHAT\n assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT\n if history is None:\n history = []\n if stop_words_ids is None:\n stop_words_ids = []\n\n max_window_size = kwargs.get('max_window_size', None)\n if max_window_size is None:\n max_window_size = generation_config.max_window_size\n raw_text, context_tokens = make_context(\n tokenizer,\n query,\n history=history,\n system=system,\n max_window_size=max_window_size,\n chat_format=generation_config.chat_format,\n )\n\n stop_words_ids.extend(get_stop_words_ids(\n generation_config.chat_format, tokenizer\n ))\n input_ids = torch.tensor([context_tokens]).to(self.device)\n outputs = self.generate(\n input_ids,\n stop_words_ids=stop_words_ids,\n return_dict_in_generate=False,\n generation_config=generation_config,\n **kwargs,\n )\n\n response = decode_tokens(\n outputs[0],\n tokenizer,\n raw_text_len=len(raw_text),\n context_length=len(context_tokens),\n chat_format=generation_config.chat_format,\n verbose=False,\n errors='replace'\n )\n\n if append_history:\n history.append((query, response))\n\n return response, history\n\n def chat_stream(\n self,\n tokenizer: PreTrainedTokenizer,\n query: str,\n history: Optional[HistoryType],\n system: str = \"You are a helpful assistant.\",\n stop_words_ids: Optional[List[List[int]]] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n generation_config: Optional[GenerationConfig] = None,\n **kwargs,\n ) -> Generator[str, Any, None]:\n generation_config = generation_config if generation_config is not None else self.generation_config\n assert generation_config.chat_format == 'chatml', _ERROR_BAD_CHAT_FORMAT\n if history is None:\n history = []\n if stop_words_ids is None:\n stop_words_ids = []\n\n max_window_size = kwargs.get('max_window_size', None)\n if max_window_size is None:\n max_window_size = generation_config.max_window_size\n raw_text, context_tokens = make_context(\n tokenizer,\n query,\n history=history,\n system=system,\n max_window_size=max_window_size,\n chat_format=generation_config.chat_format,\n )\n\n stop_words_ids.extend(get_stop_words_ids(\n generation_config.chat_format, tokenizer\n ))\n if stop_words_ids is not None:\n stop_words_logits_processor = StopWordsLogitsProcessor(\n stop_words_ids=stop_words_ids,\n eos_token_id=generation_config.eos_token_id,\n )\n if logits_processor is None:\n logits_processor = LogitsProcessorList([stop_words_logits_processor])\n else:\n logits_processor.append(stop_words_logits_processor)\n input_ids = torch.tensor([context_tokens]).to(self.device)\n\n from transformers_stream_generator.main import NewGenerationMixin, StreamGenerationConfig\n self.__class__.generate_stream = NewGenerationMixin.generate\n self.__class__.sample_stream = NewGenerationMixin.sample_stream\n stream_config = StreamGenerationConfig(**generation_config.to_dict(), do_stream=True)\n\n def stream_generator():\n outputs = []\n for token in self.generate_stream(\n input_ids,\n return_dict_in_generate=False,\n generation_config=stream_config,\n logits_processor=logits_processor,\n seed=-1,\n **kwargs):\n outputs.append(token.item())\n yield tokenizer.decode(outputs, skip_special_tokens=True, errors='ignore')\n\n return stream_generator()\n\n def generate(\n self,\n inputs: Optional[torch.Tensor] = None,\n generation_config: Optional[GenerationConfig] = None,\n logits_processor: Optional[LogitsProcessorList] = None,\n stopping_criteria: Optional[StoppingCriteriaList] = None,\n prefix_allowed_tokens_fn: Optional[\n Callable[[int, torch.Tensor], List[int]]\n ] = None,\n synced_gpus: Optional[bool] = None,\n assistant_model: Optional[\"PreTrainedModel\"] = None,\n streamer: Optional[\"BaseStreamer\"] = None,\n **kwargs,\n ) -> Union[GenerateOutput, torch.LongTensor]:\n generation_config = generation_config if generation_config is not None else self.generation_config\n\n # Process stop_words_ids.\n stop_words_ids = kwargs.pop(\"stop_words_ids\", None)\n if stop_words_ids is None and generation_config is not None:\n stop_words_ids = getattr(generation_config, \"stop_words_ids\", None)\n if stop_words_ids is None:\n stop_words_ids = getattr(generation_config, \"stop_words_ids\", None)\n\n if stop_words_ids is not None:\n stop_words_logits_processor = StopWordsLogitsProcessor(\n stop_words_ids=stop_words_ids,\n eos_token_id=generation_config.eos_token_id,\n )\n if logits_processor is None:\n logits_processor = LogitsProcessorList([stop_words_logits_processor])\n else:\n logits_processor.append(stop_words_logits_processor)\n\n return super().generate(\n inputs,\n generation_config=generation_config,\n logits_processor=logits_processor,\n stopping_criteria=stopping_criteria,\n prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,\n synced_gpus=synced_gpus,\n assistant_model=assistant_model,\n streamer=streamer,\n **kwargs,\n )" } ]
import importlib import math import torch # noqa import torch.nn.functional as F # noqa import torch.utils.checkpoint # noqa from typing import TYPE_CHECKING, Dict, Optional, Tuple, Union, Callable, List, Any, Generator # noqa from torch.cuda.amp import autocast # noqa from torch.nn import CrossEntropyLoss from transformers import PreTrainedTokenizer, GenerationConfig, StoppingCriteriaList # noqa from transformers.generation.logits_process import LogitsProcessorList # noqa from transformers.generation.streamers import BaseStreamer # noqa from transformers.generation.utils import GenerateOutput # noqa from transformers.modeling_outputs import ( BaseModelOutputWithPast, CausalLMOutputWithPast, ) from transformers.modeling_utils import PreTrainedModel # noqa from transformers.utils import logging from einops import rearrange from torch import nn from .configuration_qwen import QWenConfig # noqa from .qwen_generation_utils import ( make_context, ) # noqa from llm.models.hf_models.qwen.qwen_generation_utils import ( HistoryType, decode_tokens, get_stop_words_ids, ) from .visual import VisionTransformer from llm.models.hf_models.qwen.modeling_qwen import RMSNorm, apply_rotary_pos_emb, QWenMLP from llm.models.hf_models.qwen.modeling_qwen import QWenAttention as QWenAttention_chat from llm.models.hf_models.qwen.modeling_qwen import QWenModel as QWenModel_chat from llm.models.hf_models.qwen.modeling_qwen import QWenLMHeadModel as QWenLMHeadModel_chat from einops import rearrange
15,993
input_shape[-1] + past_length, dtype=torch.long, device=device, ) position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) encoder_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_length ) hidden_states = inputs_embeds kv_seq_len = hidden_states.size()[1] if past_key_values[0] is not None: # past key values[0][0] shape: bs * seq_len * head_num * dim kv_seq_len += past_key_values[0][0].shape[1] if ( self.use_dynamic_ntk and kv_seq_len == hidden_states.size()[1] and not self.training ): context_value = math.log(kv_seq_len / self.seq_length, 2) + 1 ntk_alpha = 2 ** math.ceil(context_value) - 1 ntk_alpha = max(ntk_alpha, 1) else: ntk_alpha = self.rotary_emb._ntk_alpha_cached rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for idx in range(len(rotary_pos_emb)): rotary_pos_emb[idx] = rotary_pos_emb[idx].to(hidden_states.device) hidden_states = self.drop(hidden_states).clone() if fake_images is not None: hidden_states = hidden_states + images.mean() * 0 elif images is not None: for idx, (i, a, b) in enumerate(img_pos): hidden_states[i][a + 1 : b] = images[idx] output_shape = input_shape + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, use_cache, output_attentions) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, rotary_pos_emb, self.registered_causal_mask, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, ) else: outputs = block( hidden_states, layer_past=layer_past, rotary_pos_emb=rotary_pos_emb, registered_causal_mask=self.registered_causal_mask, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, )
# Copyright (c) Alibaba Cloud. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. if TYPE_CHECKING: try: except ImportError: rearrange = None SUPPORT_CUDA = torch.cuda.is_available() SUPPORT_BF16 = SUPPORT_CUDA and torch.cuda.is_bf16_supported() SUPPORT_FP16 = SUPPORT_CUDA and torch.cuda.get_device_capability(0)[0] >= 7 logger = logging.get_logger(__name__) _CHECKPOINT_FOR_DOC = "qwen" _CONFIG_FOR_DOC = "QWenConfig" QWen_PRETRAINED_MODEL_ARCHIVE_LIST = ["qwen-7b"] _ERROR_BAD_CHAT_FORMAT = """\ We detect you are probably using the pretrained model (rather than chat model) for chatting, since the chat_format in generation_config is not "chatml". If you are directly using the model downloaded from Huggingface, please make sure you are using our "Qwen/Qwen-7B-Chat" Huggingface model (rather than "Qwen/Qwen-7B") when you call model.chat(). 我们检测到您可能在使用预训练模型(而非chat模型)进行多轮chat,因为您当前在generation_config指定的chat_format,并未设置为我们在对话中所支持的"chatml"格式。 如果您在直接使用我们从Huggingface提供的模型,请确保您在调用model.chat()时,使用的是"Qwen/Qwen-7B-Chat"模型(而非"Qwen/Qwen-7B"预训练模型)。 """ _SENTINEL = object() _ERROR_STREAM_IN_CHAT = """\ Pass argument `stream` to model.chat() is buggy, deprecated, and marked for removal. Please use model.chat_stream(...) instead of model.chat(..., stream=True). 向model.chat()传入参数stream的用法可能存在Bug,该用法已被废弃,将在未来被移除。请使用model.chat_stream(...)代替model.chat(..., stream=True)。 """ apply_rotary_emb_func = None rms_norm = None # Copied from transformers.models.bart.modeling_bart._make_causal_mask def _make_causal_mask( input_ids_shape: torch.Size, dtype: torch.dtype, device: torch.device, past_key_values_length: int = 0 ): """ Make causal mask used for bi-directional self-attention. """ bsz, tgt_len = input_ids_shape mask = torch.full((tgt_len, tgt_len), torch.finfo(dtype).min, device=device) mask_cond = torch.arange(mask.size(-1), device=device) mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0) mask = mask.to(dtype) if past_key_values_length > 0: mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1) return mask[None, None, :, :].expand(bsz, 1, tgt_len, tgt_len + past_key_values_length) # Copied from transformers.models.bart.modeling_bart._expand_mask def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None): """ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`. """ bsz, src_len = mask.size() tgt_len = tgt_len if tgt_len is not None else src_len expanded_mask = mask[:, None, None, :].expand(bsz, 1, tgt_len, src_len).to(dtype) inverted_mask = 1.0 - expanded_mask return inverted_mask.masked_fill(inverted_mask.to(torch.bool), torch.finfo(dtype).min) class QWenAttention(QWenAttention_chat): def __init__(self, config): super().__init__(config) def _attn(self, query, key, value, registered_causal_mask, attention_mask=None, head_mask=None): attn_weights = torch.matmul(query, key.transpose(-1, -2)) if self.scale_attn_weights: attn_weights = attn_weights / torch.full( [], value.size(-1) ** 0.5, dtype=attn_weights.dtype, device=attn_weights.device, ) attn_weights = attn_weights + attention_mask attn_weights = nn.functional.softmax(attn_weights, dim=-1) attn_weights = attn_weights.type(value.dtype) attn_weights = self.attn_dropout(attn_weights) if head_mask is not None: attn_weights = attn_weights * head_mask attn_output = torch.matmul(attn_weights, value) attn_output = attn_output.transpose(1, 2) return attn_output, attn_weights def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, output_attentions: Optional[bool] = False, use_cache: Optional[bool] = False, ): mixed_x_layer = self.c_attn(hidden_states) query, key, value = mixed_x_layer.split(self.split_size, dim=2) query = self._split_heads(query, self.num_heads, self.head_dim) key = self._split_heads(key, self.num_heads, self.head_dim) value = self._split_heads(value, self.num_heads, self.head_dim) if rotary_pos_emb is not None: cur_len = query.shape[1] rotary_pos_emb = [i[:, -cur_len:, :, :] for i in rotary_pos_emb] rotary_pos_emb = (rotary_pos_emb,) * 2 q_pos_emb, k_pos_emb = rotary_pos_emb # Slice the pos emb for current inference query = apply_rotary_pos_emb(query, q_pos_emb) key = apply_rotary_pos_emb(key, k_pos_emb) if layer_past is not None: past_key, past_value = layer_past[0], layer_past[1] key = torch.cat((past_key, key), dim=1) value = torch.cat((past_value, value), dim=1) if use_cache: present = (key, value) else: present = None if self.use_logn_attn and not self.training: if self.logn_tensor.device != query.device or self.logn_tensor.dtype != query.dtype: self.logn_tensor = self.logn_tensor.to(query.device).type_as(query) seq_start = key.size(1) - query.size(1) seq_end = key.size(1) logn_tensor = self.logn_tensor[:, seq_start:seq_end, :, :] query = query * logn_tensor.expand_as(query) query = query.permute(0, 2, 1, 3) key = key.permute(0, 2, 1, 3) value = value.permute(0, 2, 1, 3) attn_output, attn_weight = self._attn( query, key, value, registered_causal_mask, attention_mask, head_mask ) context_layer = self._merge_heads( attn_output, self.num_heads, self.head_dim ) attn_output = self.c_proj(context_layer) outputs = (attn_output, present) if output_attentions: outputs += (attn_weight,) return outputs class QWenBlock(nn.Module): def __init__(self, config): super().__init__() hidden_size = config.hidden_size self.bf16 = config.bf16 self.ln_1 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.attn = QWenAttention(config) self.ln_2 = RMSNorm( hidden_size, eps=config.layer_norm_epsilon, ) self.mlp = QWenMLP(config) def forward( self, hidden_states: Optional[Tuple[torch.FloatTensor]], rotary_pos_emb: Optional[List[torch.Tensor]] = None, registered_causal_mask: Optional[torch.Tensor] = None, layer_past: Optional[Tuple[torch.Tensor]] = None, attention_mask: Optional[torch.FloatTensor] = None, head_mask: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = False, output_attentions: Optional[bool] = False, ): layernorm_output = self.ln_1(hidden_states) attn_outputs = self.attn( layernorm_output, rotary_pos_emb, registered_causal_mask=registered_causal_mask, layer_past=layer_past, attention_mask=attention_mask, head_mask=head_mask, use_cache=use_cache, output_attentions=output_attentions, ) attn_output = attn_outputs[0] outputs = attn_outputs[1:] residual = hidden_states layernorm_input = attn_output + residual layernorm_output = self.ln_2(layernorm_input) residual = layernorm_input mlp_output = self.mlp(layernorm_output) hidden_states = residual + mlp_output if use_cache: outputs = (hidden_states,) + outputs else: outputs = (hidden_states,) + outputs[1:] return outputs class QWenModel(QWenModel_chat): _keys_to_ignore_on_load_missing = ["attn.masked_bias"] def __init__(self, config): super().__init__(config) dim = ( self.rotary_ndims if self.rotary_ndims is not None else config.kv_channels ) self.rotary_emb = RotaryEmbedding(dim, base=config.rotary_emb_base) self.registered_causal_mask = None self.h = nn.ModuleList( [ QWenBlock( config ) for i in range(config.num_hidden_layers) ] ) self.visual = VisionTransformer(**config.visual) def _prepare_decoder_attention_mask(self, attention_mask, input_shape, inputs_embeds, past_key_values_length): combined_attention_mask = None if input_shape[-1] > 1: combined_attention_mask = _make_causal_mask( input_shape, inputs_embeds.dtype, device=inputs_embeds.device, past_key_values_length=past_key_values_length, ) if attention_mask is not None: # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len] expanded_attn_mask = _expand_mask(attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]).to( inputs_embeds.device ) combined_attention_mask = ( expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask ) return combined_attention_mask def forward( self, input_ids: Optional[torch.LongTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, token_type_ids: Optional[torch.LongTensor] = None, position_ids: Optional[torch.LongTensor] = None, head_mask: Optional[torch.FloatTensor] = None, inputs_embeds: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.Tensor] = None, encoder_attention_mask: Optional[torch.FloatTensor] = None, use_cache: Optional[bool] = None, output_attentions: Optional[bool] = None, output_hidden_states: Optional[bool] = None, return_dict: Optional[bool] = None, ): if past_key_values is None and torch.any(input_ids == self.config.visual['image_start_id']): bos_pos = torch.where(input_ids == self.config.visual['image_start_id']) eos_pos = torch.where(input_ids == self.config.visual['image_start_id'] + 1) assert (bos_pos[0] == eos_pos[0]).all() img_pos = torch.stack((bos_pos[0], bos_pos[1], eos_pos[1]), dim=1) images = [] for i, a, b in img_pos: image = input_ids[i][a + 1 : b - 1].tolist() image = image[: image.index(self.config.visual['image_start_id'] + 2)] images.append(bytes(image).decode('utf-8')) images = self.visual.encode(images) assert images.shape[0] == len(images) fake_images = None elif self.training: fake_images = torch.zeros(1, 3, 224, 224).to( dtype=self.visual.conv1.weight.dtype, device=self.visual.conv1.weight.device) images = self.visual(fake_images) else: fake_images = None images = None output_attentions = ( output_attentions if output_attentions is not None else self.config.output_attentions ) output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) use_cache = use_cache if use_cache is not None else self.config.use_cache return_dict = ( return_dict if return_dict is not None else self.config.use_return_dict ) if input_ids is not None and inputs_embeds is not None: raise ValueError( "You cannot specify both input_ids and inputs_embeds at the same time" ) elif input_ids is not None: input_shape = input_ids.size() input_ids = input_ids.view(-1, input_shape[-1]) batch_size = input_ids.shape[0] elif inputs_embeds is not None: input_shape = inputs_embeds.size()[:-1] batch_size = inputs_embeds.shape[0] else: raise ValueError("You have to specify either input_ids or inputs_embeds") device = input_ids.device if input_ids is not None else inputs_embeds.device if token_type_ids is not None: token_type_ids = token_type_ids.view(-1, input_shape[-1]) if position_ids is not None: position_ids = position_ids.view(-1, input_shape[-1]) if past_key_values is None: past_length = 0 past_key_values = tuple([None] * len(self.h)) else: past_length = past_key_values[0][0].size(-2) if position_ids is None: position_ids = torch.arange( past_length, input_shape[-1] + past_length, dtype=torch.long, device=device, ) position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) encoder_attention_mask = None head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers) if inputs_embeds is None: inputs_embeds = self.wte(input_ids) if batch_size <= 0: raise ValueError("batch_size has to be defined and > 0") attention_mask = self._prepare_decoder_attention_mask( attention_mask, input_shape, inputs_embeds, past_length ) hidden_states = inputs_embeds kv_seq_len = hidden_states.size()[1] if past_key_values[0] is not None: # past key values[0][0] shape: bs * seq_len * head_num * dim kv_seq_len += past_key_values[0][0].shape[1] if ( self.use_dynamic_ntk and kv_seq_len == hidden_states.size()[1] and not self.training ): context_value = math.log(kv_seq_len / self.seq_length, 2) + 1 ntk_alpha = 2 ** math.ceil(context_value) - 1 ntk_alpha = max(ntk_alpha, 1) else: ntk_alpha = self.rotary_emb._ntk_alpha_cached rotary_pos_emb = self.rotary_emb(kv_seq_len, ntk_alpha=ntk_alpha) for idx in range(len(rotary_pos_emb)): rotary_pos_emb[idx] = rotary_pos_emb[idx].to(hidden_states.device) hidden_states = self.drop(hidden_states).clone() if fake_images is not None: hidden_states = hidden_states + images.mean() * 0 elif images is not None: for idx, (i, a, b) in enumerate(img_pos): hidden_states[i][a + 1 : b] = images[idx] output_shape = input_shape + (hidden_states.size(-1),) if self.gradient_checkpointing and self.training: if use_cache: logger.warning_once( "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." ) use_cache = False presents = () if use_cache else None all_self_attentions = () if output_attentions else None all_hidden_states = () if output_hidden_states else None for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if self.gradient_checkpointing and self.training: def create_custom_forward(module): def custom_forward(*inputs): # None for past_key_value return module(*inputs, use_cache, output_attentions) return custom_forward outputs = torch.utils.checkpoint.checkpoint( create_custom_forward(block), hidden_states, rotary_pos_emb, self.registered_causal_mask, None, attention_mask, head_mask[i], encoder_hidden_states, encoder_attention_mask, ) else: outputs = block( hidden_states, layer_past=layer_past, rotary_pos_emb=rotary_pos_emb, registered_causal_mask=self.registered_causal_mask, attention_mask=attention_mask, head_mask=head_mask[i], encoder_hidden_states=encoder_hidden_states, encoder_attention_mask=encoder_attention_mask, use_cache=use_cache, output_attentions=output_attentions, ) hidden_states = outputs[0] if use_cache is True: presents = presents + (outputs[1],) if output_attentions: all_self_attentions = all_self_attentions + (outputs[2 if use_cache else 1],) hidden_states = self.ln_f(hidden_states) hidden_states = hidden_states.view(output_shape) # Add last hidden state if output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if not return_dict: return tuple( v for v in [hidden_states, presents, all_hidden_states] if v is not None ) return BaseModelOutputWithPast( last_hidden_state=hidden_states, past_key_values=presents, hidden_states=all_hidden_states, attentions=all_self_attentions, )
class QWenLMHeadModel(QWenLMHeadModel_chat):
6
2023-11-26 10:12:52+00:00
24k
wangermeng2021/llm-webui
main.py
[ { "identifier": "login_huggingface", "path": "src/utils/common.py", "snippet": "def login_huggingface(token,base_model_name_dropdown):\n if base_model_name_dropdown.lower().find(\"llama\") >= 0:\n if token:\n HUGGINGFACE_HUB_TOKEN = token\n print(\"d1:\",HUGGINGFACE_HUB_TOKEN)\n else:\n env_file_path = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),\"token.env\")\n load_dotenv(env_file_path)\n HUGGINGFACE_HUB_TOKEN = os.getenv('HUGGINGFACE_HUB_TOKEN')\n print(\"d2:\", HUGGINGFACE_HUB_TOKEN)\n login(token=HUGGINGFACE_HUB_TOKEN)\n os.environ[\"HUGGING_FACE_HUB_TOKEN\"] = HUGGINGFACE_HUB_TOKEN" }, { "identifier": "HuggingfaceInference", "path": "src/finetune/huggingface_inference.py", "snippet": "class HuggingfaceInference(Inference):\n def __init__(self,model_path,max_new_tokens=256,temperature=0.7 ,top_p=0.95 ,top_k=1,repetition_penalty=1.15,using_4bit_quantization=True,low_cpu_mem_usage=False):\n self.model = None\n self.tokenizer = None\n self.hg_model = None\n self.model_path = model_path\n self.max_new_tokens = max_new_tokens\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.repetition_penalty = repetition_penalty\n self.prompt_template = PromptTemplate.from_template(\n \"{question}\"\n )\n self.bnb_config = None\n if using_4bit_quantization:\n self.bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n )\n self.low_cpu_mem_usage = low_cpu_mem_usage\n def load_model(self):\n try:\n \n if self.model_path.split(os.sep)[-1].rfind(\"llama\") >=0:\n self.tokenizer = LlamaTokenizer.from_pretrained(self.model_path)\n if self.bnb_config:\n self.hg_model = LlamaForCausalLM.from_pretrained(self.model_path, device_map={\"\":0},quantization_config=self.bnb_config,torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.hg_model = LlamaForCausalLM.from_pretrained(self.model_path, device_map={\"\": 0},torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(self.model_path)\n if self.bnb_config:\n self.hg_model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map={\"\":0},quantization_config=self.bnb_config,torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n else:\n self.hg_model = AutoModelForCausalLM.from_pretrained(self.model_path, device_map={\"\": 0},torch_dtype=torch.bfloat16, low_cpu_mem_usage=True,trust_remote_code=True)\n if not self.tokenizer.pad_token:\n if self.model_path.split(os.sep)[-1].lower().rfind(\"gpt2\")>=0:\n self.tokenizer.pad_token = self.tokenizer.eos_token\n else:\n self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n self.hg_model.resize_token_embeddings(len(self.tokenizer))\n\n except Exception as e:\n return -1, e\n self.model = pipeline(\n \"text-generation\",\n model=self.hg_model,\n tokenizer=self.tokenizer,\n max_new_tokens = self.max_new_tokens,\n temperature=self.temperature,\n top_p=self.top_p,top_k=self.top_k,do_sample=True,\n return_full_text=False,\n repetition_penalty=self.repetition_penalty,\n # return_dict_in_generate = True\n )\n return 0, \"\"\n def infer(self ,input):\n output = self.model(input)\n return output[0]['generated_text'] if output else None\n def free_memory(self):\n if self.hg_model:\n del self.hg_model\n self.hg_model = None\n if self.tokenizer:\n del self.tokenizer\n self.tokenizer = None\n if self.model:\n del self.model\n self.model = None" }, { "identifier": "LlamaCppInference", "path": "src/finetune/llama_cpp_inference.py", "snippet": "class LlamaCppInference(Inference):\n def __init__(self,model_path,max_new_tokens=256,temperature=0.7 ,top_p=0.95 ,top_k=1,repetition_penalty=1.15,n_gpu_layers=35, n_ctx=4048,verbose=False):\n self.model_path = model_path\n self.max_new_tokens = max_new_tokens\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.repetition_penalty = repetition_penalty\n self.prefix1 = \"\"\n self.prefix2 = \"\"\n self.model = None\n\n def load_model(self):\n load_model_status = 0\n msg = None\n try:\n self.model = LlamaCpp(model_path=self.model_path, n_gpu_layers=35, n_ctx=4096,max_tokens=self.max_new_tokens, temperature=self.temperature,\n verbose=False, top_k=self.top_k, top_p=self.top_p,repeat_penalty=self.repetition_penalty)\n except Exception as e:\n load_model_status = -1\n msg = e\n return load_model_status, msg\n def infer(self ,input):\n return self.model(input)\n\n\n def free_memory(self):\n if self.model:\n del self.model\n self.model = None" }, { "identifier": "QAWithRAG", "path": "src/rag/qa_with_rag.py", "snippet": "class QAWithRAG():\n def __init__(self ,config: dict ={}):\n self.text_splitter = None\n self.embedding_function = None\n self.vectorstore = None\n self.retriever = None\n self.chat_llm = None\n\n self.chat_history =[]\n # self.persist_directory = \"./chroma_db\"\n self.persist_directory = None\n self.qa = None\n self.langchain_llm = None\n def free_memory(self):\n if self.chat_llm:\n self.chat_llm.free_memory()\n del self.chat_llm\n self.chat_llm = None\n if self.langchain_llm:\n del self.langchain_llm\n self.langchain_llm = None\n if self.qa:\n del self.qa\n self.qa = None\n\n\n def get_text_splitter(self ,chunk_size ,chunk_overlap ,separators):\n self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=chunk_size, chunk_overlap=chunk_overlap, length_function=len,\n separators=separators)\n def load_embedding_model(self ,model_path=\"\"):\n self.embedding_function = HuggingFaceEmbeddings(model_name=model_path ,model_kwargs = {'device': 'cpu'})\n def load_chat_model(self ,model_path,using_4bit_quantization,low_cpu_mem_usage,\n max_new_tokens, temperature, top_k, top_p, repeat_penalty\n ):\n self.set_prompt_template(model_path)\n load_model_status = 0\n if model_path.split('.')[-1] == \"gguf\":\n self.chat_llm = LlamaCppInference(model_path=model_path, max_new_tokens=max_new_tokens, temperature=temperature,\n top_k=top_k, top_p=top_p, repetition_penalty=repeat_penalty)\n load_model_status, msg = self.chat_llm.load_model()\n self.langchain_llm = self.chat_llm.model\n else:\n self.chat_llm = HuggingfaceInference(model_path, max_new_tokens, temperature, top_p, top_k, repeat_penalty, using_4bit_quantization,low_cpu_mem_usage)\n load_model_status, msg = self.chat_llm.load_model()\n self.langchain_llm = HuggingFacePipeline(pipeline=self.chat_llm.model)\n\n return load_model_status, msg\n\n #\n def get_document_data(self ,doc_path):\n self.chat_history = []\n self.chat_history.clear()\n self.doc_ext = doc_path.split('.')[-1]\n if self.doc_ext == \"txt\":\n loader = TextLoader(doc_path, encoding='utf8')\n elif self.doc_ext == \"pdf\":\n loader = PyPDFLoader(doc_path)\n elif self.doc_ext == \"docx\":\n loader = Docx2txtLoader(doc_path)\n else:\n raise ValueError(f\"Unsupported format: {self.doc_ext}\")\n data = loader.load()\n return data\n def add_document_to_vector_store(self, doc_path ,search_top_k ,search_score_threshold):\n data = self.get_document_data(doc_path)\n data = self.text_splitter.split_documents(data)\n try:\n self.vectorstore = Chroma.from_documents(data, self.embedding_function\n ,collection_metadata={\"hnsw:space\": \"cosine\"}\n ,persist_directory=self.persist_directory)\n # self.vectorstore = FAISS.from_documents(data, self.embedding_function) \n except InvalidDimensionException:\n Chroma().delete_collection()\n self.vectorstore = Chroma.from_documents(data, self.embedding_function\n ,collection_metadata={\"hnsw:space\": \"cosine\"}\n ,persist_directory=self.persist_directory)\n # self.vectorstore = FAISS.from_documents(data, self.embedding_function) \n self.set_retriever(search_top_k ,search_score_threshold)\n\n def set_retriever(self ,search_top_k ,score_threshold):\n self.retriever = self.vectorstore.as_retriever(search_type='similarity_score_threshold',\n search_kwargs={'k': search_top_k, \"score_threshold\": score_threshold})\n def set_prompt_template(self ,chat_model_path):\n\n if chat_model_path.lower().find(\"mistral\") >= 0 and chat_model_path.lower().find(\"instruct\") >= 0:\n prompt_template = \"\"\"<s>[INST] Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: [/INST]\"\"\"\n elif chat_model_path.lower().find(\"llama\") >= 0 and chat_model_path.lower().find(\"chat\") >= 0:\n prompt_template = \"\"\"<s>[INST] Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: [/INST]\"\"\"\n elif chat_model_path.lower().find(\"zephyr\") >= 0:\n prompt_template = \"\"\"<|user|>\\n Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer: </s><|assistant|>\\n\"\"\"\n else:\n prompt_template = \"\"\"Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.\\n\\n\n Context: {context}\\n\n Question: {question}\\n\n Answer:\"\"\"\n\n self.prompt_template = PromptTemplate(\n template=prompt_template, input_variables=[\"context\", \"question\"]\n )\n def generate(self, question):\n self.chat_history = []\n if self.retriever:\n\n chain_type_kwargs = {\"prompt\": self.prompt_template ,\"verbose\": False}\n self.qa = RetrievalQA.from_chain_type(llm=self.langchain_llm, chain_type=\"stuff\", retriever=self.retriever,\n return_source_documents=True,\n chain_type_kwargs=chain_type_kwargs)\n result = self.qa({\"query\": question}, return_only_outputs=True)\n retrieved_txt_list = []\n if len(result['source_documents'] ) >0:\n if self.doc_ext == \"txt\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n elif self.doc_ext == \"pdf\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n elif self.doc_ext == \"docx\":\n for doc_text in result['source_documents']:\n retrieved_txt_list.append(list(doc_text)[0][1])\n answer = result['result']\n else:\n answer = \"Sorry, I can't find any relevant information in document. \" + result['result']\n return answer, retrieved_txt_list\n else:\n return \"\", retrieved_txt_list" }, { "identifier": "read_yaml", "path": "src/utils/common.py", "snippet": "def read_yaml(yaml_path):\n with open(yaml_path) as f1:\n try:\n data = yaml.safe_load(f1)\n return data\n except yaml.YAMLError as e:\n raise ValueError(f'Error loading yaml file: {e}')" }, { "identifier": "get_first_row_from_dataset", "path": "src/utils/common.py", "snippet": "def get_first_row_from_dataset(dataset_path):\n if os.path.exists(os.path.join(dataset_path, \"dataset_dict.json\")):\n dataset = datasets.load_from_disk(dataset_path)\n elif os.path.exists(os.path.join(dataset_path, \"dataset_infos.json\")):\n dataset = datasets.load_dataset(dataset_path)\n elif os.path.exists(os.path.join(dataset_path, \"dataset_info.json\")):\n dataset = datasets.load_from_disk(dataset_path)\n else:\n raise ValueError(\n f'Invalid Dataset format {dataset_path}.')\n try:\n split_list = list(dataset.keys())\n except:\n split_list = [\"train\"]\n new_split_list= [\"\",\"\",\"\"]\n for split in split_list:\n if split.find(\"train\") >= 0:\n new_split_list[0] = split\n elif split.find(\"val\") >= 0:\n new_split_list[1] = split\n elif split.find(\"test\") >= 0:\n new_split_list[2] = split\n\n return dataset[new_split_list[0]][0],new_split_list" }, { "identifier": "get_runs_model_names_from_dir", "path": "src/utils/common.py", "snippet": "def get_runs_model_names_from_dir(root_dir):\n\n run_names = os.listdir(root_dir)\n run_names.sort(key=lambda file: os.path.getmtime(os.path.join(root_dir, file)),reverse=True)\n runs_output_model = []\n for run_name in run_names:\n run_name_dir = os.path.join(root_dir, run_name)\n run_output_model = os.path.join(run_name_dir, \"output_model\")\n if os.path.exists(run_output_model):\n run_output_model_names = os.listdir(run_output_model)\n for run_output_model_name in run_output_model_names:\n model_bin_path = os.path.exists(\n os.path.join(root_dir,\n run_name, \"output_model\", run_output_model_name, \"ori\",\n \"pytorch_model.bin\"))\n if run_output_model_name.find(\"merged_\") >= 0 and model_bin_path:\n runs_output_model.append(os.path.join(run_name, \"output_model\", run_output_model_name, \"ori\"))\n return runs_output_model" }, { "identifier": "get_hg_model_names_from_dir", "path": "src/utils/common.py", "snippet": "def get_hg_model_names_from_dir(root_dir):\n model_names = os.listdir(root_dir)\n model_names.sort(key=lambda file: os.path.getmtime(os.path.join(root_dir, file)),reverse=True)\n return model_names" }, { "identifier": "get_hg_model_names_and_gguf_from_dir", "path": "src/utils/common.py", "snippet": "def get_hg_model_names_and_gguf_from_dir(hg_model_root_dir,runs_model_root_dir):\n output = []\n runs_gguf_files = glob.glob(os.path.join(runs_model_root_dir,\"**\",\"**\",\"**\",\"**\",\"*.gguf\"),recursive=False)\n root_model_gguf_files = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"*.gguf\"),recursive=False)\n root_model_gguf_files1 = glob.glob(os.path.join(hg_model_root_dir, \"**\",\"**\", \"*.gguf\"), recursive=False)\n root_model_hg_dir0 = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"config.json\"),recursive=False)\n root_model_hg_dir1 = glob.glob(os.path.join(hg_model_root_dir, \"**\",\"**\", \"config.json\"), recursive=False)\n runs_hg_dir = glob.glob(os.path.join(hg_model_root_dir,\"**\",\"**\",\"**\",\"**\",\"config.json\"),recursive=False)\n runs_gguf_files.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_gguf_files.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_gguf_files1.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_hg_dir0.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n root_model_hg_dir1.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n runs_hg_dir.sort(key=lambda file: os.path.getmtime(file), reverse=True)\n\n for file in runs_gguf_files:\n file_pos = file.find(\"runs\")\n output.append(file[file_pos:])\n for file in root_model_gguf_files:\n output.append(file[file.find(\"models\")+len(\"models\")+1:])\n for file in root_model_gguf_files1:\n output.append(file[file.find(\"models\")+len(\"models\")+1:])\n for file in root_model_hg_dir0:\n file_pos1 = file.find(\"models\")\n file_pos2 = file.find(\"config.json\")\n output.append(file[file_pos1+len(\"models\")+1:file_pos2-1])\n for file in root_model_hg_dir1:\n file_pos1 = file.find(\"models\")\n file_pos2 = file.find(\"config.json\")\n output.append(file[file_pos1+len(\"models\")+1:file_pos2-1])\n for file in runs_hg_dir:\n file_pos = file.find(\"runs\")+len(\"runs\")+1\n output.append(file[file_pos:])\n return output" }, { "identifier": "validate_model_path", "path": "src/utils/common.py", "snippet": "def validate_model_path(model_name):\n if not model_name:\n return False,\"\"\n home_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n base_model_config_path1 = os.path.join(home_dir, \"models\", model_name)\n base_model_config_path2 = os.path.join(base_model_config_path1, \"config.json\")\n run_model_config_path1 = os.path.join(home_dir, \"runs\", model_name)\n run_model_config_path2 = os.path.join(run_model_config_path1, \"config.json\")\n if os.path.exists(base_model_config_path1) and base_model_config_path1.endswith(\".gguf\"):\n return True,base_model_config_path1\n if os.path.exists(run_model_config_path1) and run_model_config_path1.endswith(\".gguf\") :\n return True,run_model_config_path1\n if os.path.exists(base_model_config_path2):\n return True,base_model_config_path1\n if os.path.exists(run_model_config_path2):\n return True,run_model_config_path1\n return False,\"\"" }, { "identifier": "get_runs_models", "path": "src/utils/common.py", "snippet": "def get_runs_models():\n training_runs_dir = os.path.join(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))), 'runs')\n run_names = os.listdir(training_runs_dir)\n run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file)))\n runs_output_model = []\n for run_name in run_names:\n run_name_dir = os.path.join(training_runs_dir, run_name)\n run_output_model = os.path.join(run_name_dir, \"output_model\")\n if os.path.exists(run_output_model):\n run_output_model_names = os.listdir(run_output_model)\n for run_output_model_name in run_output_model_names:\n if run_output_model_name.find(\"merged_\") >= 0:\n runs_output_model.append(os.path.join(run_name, \"output_model\", run_output_model_name, \"ori\"))\n runs_output_model = runs_output_model[::-1]\n return runs_output_model" }, { "identifier": "get_model_type", "path": "src/utils/chat_prompts.py", "snippet": "def get_model_type(model_path):\n if model_path:\n if model_path.lower().find(\"mistral\") >= 0 and model_path.lower().find(\"instruct\") >= 0:\n model_type = \"mistral\"\n elif model_path.lower().find(\"llama\") >= 0 and model_path.lower().find(\"chat\") >= 0:\n model_type = \"llama2\"\n elif model_path.lower().find(\"zephyr\") >= 0:\n model_type = \"zephyr\"\n else:\n model_type = \"other model\"\n else:\n model_type = \"other model\"\n return model_type" }, { "identifier": "get_chat_history_prompt", "path": "src/utils/chat_prompts.py", "snippet": "def get_chat_history_prompt(chat_history,model_type=\"llama2\"):\n if model_type == \"other model\":\n prompt = ','.join(chat_history[:-2])\n prompt = prompt + chat_history[-2]\n elif model_type == \"llama2\":\n prompt = format_chat_history_prompt_for_llama2_7b_chat(chat_history)\n elif model_type == \"zephyr\":\n prompt = format_chat_history_prompt_for_zephyr_7b_instruct(chat_history)\n elif model_type == \"mistral\":\n prompt = format_chat_history_prompt_for_mistral_7b_instruct(chat_history)\n return prompt" }, { "identifier": "get_model_prompt_template", "path": "src/utils/chat_prompts.py", "snippet": "def get_model_prompt_template(model_type=\"llama2\"):\n if model_type == \"other model\":\n prompt_template = PromptTemplate.from_template(\n \"{question}\"\n )\n elif model_type == \"llama2\":\n prompt_template = PromptTemplate.from_template(\n \"<s>[INST] {question} [/INST]\"\n )\n elif model_type == \"zephyr\":\n prompt_template = PromptTemplate.from_template(\n \"<|user|>\\n{question}</s><|assistant|>\\n\"\n )\n elif model_type == \"mistral\":\n prompt_template = PromptTemplate.from_template(\n \"<s>[INST] {question} [/INST]\"\n )\n return prompt_template" }, { "identifier": "download_model", "path": "src/utils/download_model.py", "snippet": "class ModelDownloader:\n def __init__(self, max_retries=5):\n def sanitize_model_and_branch_names(self, model, branch):\n def get_download_links_from_huggingface(self, model, branch, text_only=False, specific_file=None):\n def get_output_folder(self, model, branch, is_lora, is_llamacpp=False, base_folder=None):\n def get_single_file(self, url, output_folder, start_from_scratch=False):\n def start_download_threads(self, file_list, output_folder, start_from_scratch=False, threads=4):\n def download_model_files(self, model, branch, links, sha256, output_folder, progress_bar=None, start_from_scratch=False, threads=1, specific_file=None, is_llamacpp=False):\n def check_model_files(self, model, branch, links, sha256, output_folder):" }, { "identifier": "QloraTrainer", "path": "src/finetune/qlora_trainer.py", "snippet": "class QloraTrainer(PeftTrainer):\n\n def __init__(self, config: dict):\n self.config = config\n self.tokenizer = None\n self.base_model = None\n self.merged_model = None\n self.dataset = None\n self.fused_model = None\n self.train_dataset = None\n self.val_dataset = None\n self.logging_callback = self.LoggingCallbacks()\n print(\"config:\",config)\n def load_dataset(self):\n if self.config[\"dataset\"][\"hg_dataset_dir\"]:\n if os.path.exists(os.path.join(self.config[\"dataset\"][\"hg_dataset_dir\"],\"dataset_infos.json\")):\n if self.config[\"dataset\"][\"hg_train_dataset\"]:\n self.train_dataset= datasets.load_dataset(self.config[\"dataset\"][\"hg_dataset_dir\"],split=self.config[\"dataset\"][\"hg_train_dataset\"])\n if self.config[\"dataset\"][\"hg_val_dataset\"]:\n self.val_dataset = datasets.load_dataset(self.config[\"dataset\"][\"hg_dataset_dir\"],split=self.config[\"dataset\"][\"hg_val_dataset\"])\n elif os.path.exists(os.path.join(self.config[\"dataset\"][\"hg_dataset_dir\"],\"dataset_dict.json\")):\n if self.config[\"dataset\"][\"hg_train_dataset\"]:\n self.train_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"hg_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"hg_train_dataset\"])\n if self.config[\"dataset\"][\"hg_val_dataset\"]:\n self.val_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"hg_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"hg_val_dataset\"])\n else:\n raise ValueError(\n f'Invalid Dataset format {self.config[\"dataset\"][\"hg_dataset_dir\"]}.')\n else:\n\n if self.config[\"dataset\"][\"local_dataset_dir\"]:\n if os.path.exists(os.path.join(self.config[\"dataset\"][\"local_dataset_dir\"], \"dataset_infos.json\")):\n if self.config[\"dataset\"][\"local_train_set\"]:\n self.train_dataset = datasets.load_dataset(self.config[\"dataset\"][\"local_dataset_dir\"],\n split=self.config[\"dataset\"][\"local_train_set\"])\n if self.config[\"dataset\"][\"local_val_set\"]:\n self.val_dataset = datasets.load_dataset(self.config[\"dataset\"][\"local_dataset_dir\"],\n split=self.config[\"dataset\"][\"local_val_set\"])\n elif os.path.exists(os.path.join(self.config[\"dataset\"][\"local_dataset_dir\"], \"dataset_dict.json\")):\n if self.config[\"dataset\"][\"local_train_set\"]:\n self.train_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"local_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"local_train_set\"])\n if self.config[\"dataset\"][\"local_val_set\"]:\n self.val_dataset = datasets.load_from_disk(\n self.config[\"dataset\"][\"local_dataset_dir\"] + \"/\" + self.config[\"dataset\"][\"local_val_set\"])\n else:\n raise ValueError(\n f'Invalid Dataset format {self.config[\"dataset\"][\"local_dataset_dir\"]}.')\n\n\n if self.config[\"dataset\"][\"max_length\"] == \"Model Max Length\":\n\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\") >= 0:\n context_window = 1024*4\n elif self.config[\"model\"][\"base_model_name\"].rfind(\"mistral\") >= 0:\n context_window = 1024*4\n elif self.config[\"model\"][\"base_model_name\"].rfind(\"zephyr\") >= 0:\n context_window = 1024*4\n else:\n context_window = self.tokenizer.model_max_length\n if self.tokenizer.model_max_length == int(1e30):\n context_window = 1024\n else:\n context_window = self.config[\"dataset\"][\"max_length\"]\n print(\"context_window:\",context_window)\n self.train_dataset = self.train_dataset.map(lambda sample: self.tokenizer(\n self.generate_prompt(\n sample,\n self.tokenizer.eos_token),\n max_length=context_window,\n truncation=True,\n # padding=True\n ))\n if self.val_dataset:\n self.val_dataset = self.val_dataset.map(lambda sample: self.tokenizer(\n self.generate_prompt(\n sample,\n self.tokenizer.eos_token),\n max_length=context_window,\n truncation=True,\n padding=True\n ))\n def generate_prompt(self,sample,eos_token):\n\n prompt = self.config[\"dataset\"][\"prefix1\"]+sample[self.config[\"dataset\"][\"datatset_col1\"]]+\\\n self.config[\"dataset\"][\"prefix2\"] + sample[self.config[\"dataset\"][\"datatset_col2\"]]+eos_token\n # print(\"prompt:\",prompt)\n return prompt\n\n def load_model(self):\n\n if self.config[\"model\"][\"fine_tuning_type\"] == \"QLoRA\":\n bnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n )\n elif self.config[\"model\"][\"fine_tuning_type\"] == \"LoRA\":\n bnb_config = None\n try:\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\")>=0:\n self.tokenizer = LlamaTokenizer.from_pretrained(self.config[\"model\"][\"base_model_path\"])\n self.base_model = LlamaForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], quantization_config=bnb_config, device_map={\"\":0},trust_remote_code=True)\n else:\n self.tokenizer = AutoTokenizer.from_pretrained(self.config[\"model\"][\"base_model_path\"])\n self.base_model = AutoModelForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], quantization_config=bnb_config, device_map={\"\":0},trust_remote_code=True)\n except Exception as e:\n return -1,e\n if not self.tokenizer.pad_token:\n self.tokenizer.add_special_tokens({'pad_token': '[PAD]'})\n self.base_model.resize_token_embeddings(len(self.tokenizer))\n if self.config[\"training\"][\"gradient_checkpointing\"] and not self.config[\"model\"][\"base_model_name\"].rfind(\"phi\")>=0:\n # self.base_model.gradient_checkpointing_enable()\n self.base_model = prepare_model_for_kbit_training(self.base_model,use_gradient_checkpointing=True,gradient_checkpointing_kwargs={'use_reentrant':False})\n else:\n self.base_model = prepare_model_for_kbit_training(self.base_model, use_gradient_checkpointing=False,gradient_checkpointing_kwargs={'use_reentrant':False})\n if self.config[\"model\"][\"base_model_name\"].lower().rfind(\"llama\")>=0 or \\\n self.config[\"model\"][\"base_model_name\"].lower().rfind(\"mistral\") >= 0 or \\\n self.config[\"model\"][\"base_model_name\"].lower().rfind(\"zephyr\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"llama\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"falcon\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"falcon\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"gpt2\") >= 0:\n target_modules = LORA_TARGET_MODULES[\"gpt2\"]\n task_type = \"CAUSAL_LM\"\n elif self.config[\"model\"][\"base_model_name\"].lower().find(\"phi\") >= 0:\n target_modules = [\"Wqkv\", \"out_proj\"]\n task_type = \"CAUSAL_LM\"\n else:\n raise ValueError(f'{self.config[\"model\"][\"base_model_name\"]} is not yet supported.')\n #T5,bart, task_type = \"SEQ_2_SEQ_LM\" ,AutoModelForSeq2SeqLM\n \n lora_config = LoraConfig(\n r=self.config[\"model\"][\"lora_r\"],\n lora_alpha=self.config[\"model\"][\"lora_alpha\"],\n target_modules=target_modules,\n lora_dropout=self.config[\"model\"][\"lora_dropout\"],\n bias=self.config[\"model\"][\"lora_bias\"],\n task_type=task_type,\n )\n self.fused_model = get_peft_model(self.base_model, lora_config)\n # self.fused_model.gradient_checkpointing = True\n return 0,\"\"\n def train(self):\n self.run_name = datetime.now().strftime(\"run_%Y-%m-%d_%H-%M-%S\")\n logging_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name,\"tensorboard\")\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name,\"output_model\", run_output_model_name + \"_adapter\")\n checkpoint_dir = os.path.join(self.config[\"training\"][\"root_dir\"],\"runs\", self.run_name)\n self.trainer = transformers.Trainer(\n model=self.fused_model,\n train_dataset=self.train_dataset,\n eval_dataset= self.val_dataset if self.val_dataset else None,\n args=transformers.TrainingArguments(\n per_device_train_batch_size=self.config[\"training\"][\"batch_size\"],\n gradient_accumulation_steps=self.config[\"training\"][\"gradient_accumulation_steps\"],\n warmup_steps=self.config[\"training\"][\"warmup_steps\"],\n num_train_epochs=self.config[\"training\"][\"epochs\"],\n learning_rate=self.config[\"training\"][\"learning_rate\"],\n fp16=True,\n output_dir=checkpoint_dir,\n report_to=\"tensorboard\",\n optim=self.config[\"training\"][\"optimizer\"],\n lr_scheduler_type=self.config[\"training\"][\"lr_scheduler_type\"],\n load_best_model_at_end=True if self.val_dataset else False,\n save_strategy=\"steps\",\n save_steps = self.config[\"training\"][\"eval_steps\"],\n save_total_limit=1,\n evaluation_strategy=\"steps\" if self.val_dataset else \"no\",\n eval_steps=self.config[\"training\"][\"eval_steps\"], # eval interval\n per_device_eval_batch_size=1,\n # eval_steps=10, # eval interval\n logging_steps=100,#self.config[\"training\"][\"eval_steps\"]\n # run_name=self.run_name,\n logging_dir=logging_dir,\n ),\n\n callbacks=[self.logging_callback,transformers.EarlyStoppingCallback(early_stopping_patience=self.config[\"training\"][\"early_stopping_patience\"]) ] if self.config[\"training\"][\"early_stopping_patience\"]>0 else [self.logging_callback],\n data_collator=transformers.DataCollatorForLanguageModeling(self.tokenizer, mlm=False),\n\n )\n\n self.fused_model.config.use_cache = False # silence the warnings. Please re-enable for inference!\n try:\n self.trainer.train()\n except Exception as e:\n return -1,e\n # model_save_path = f\"{self.config['training']['output_dir']}/{self.config['model']['base_model_name']}_adapter\"\n self.trainer.save_model(output_model_dir)\n return 0,\"\"\n def merge_and_save(self):\n\n if self.config[\"model\"][\"base_model_name\"].rfind(\"llama\")>=0:\n base_model = LlamaForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], device_map=\"cpu\",trust_remote_code=True)\n else:\n base_model = AutoModelForCausalLM.from_pretrained(self.config[\"model\"][\"base_model_path\"], device_map=\"cpu\",trust_remote_code=True)\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_adapter_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"], \"runs\", self.run_name, \"output_model\",\n run_output_model_name + \"_adapter\")\n\n model = PeftModel.from_pretrained(base_model, output_adapter_model_dir)\n\n merged_model = model.merge_and_unload()\n run_output_model_name = self.config['model']['base_model_name'].replace('/', '_')\n output_merged_model_dir = os.path.join(self.config[\"training\"][\"root_dir\"], \"runs\", self.run_name, \"output_model\",\"merged_\"+run_output_model_name,\"ori\")\n merged_model.save_pretrained(output_merged_model_dir)\n self.tokenizer.save_pretrained(output_merged_model_dir)\n\n def _print_trainable_parameters(self, model):\n \"\"\"\n Prints the number of trainable parameters in the model.\n \"\"\"\n trainable_params = 0\n all_param = 0\n for _, param in model.named_parameters():\n all_param += param.numel()\n if param.requires_grad:\n trainable_params += param.numel()\n print(\n f\"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}\"\n )\n\n\n class LoggingCallbacks(transformers.TrainerCallback):\n # current_step = 0\n # max_steps = 0\n\n def on_step_begin(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, **kwargs):\n pass\n\n def on_step_end(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, **kwargs):\n global TRAINING_STATUS\n if TRAINING_STATUS.status == 1:\n control.should_epoch_stop = True\n control.should_training_stop = True\n else:\n self.max_steps = state.max_steps\n self.current_step = state.global_step\n\n def on_log(self, args: transformers.TrainingArguments, state: transformers.TrainerState,\n control: transformers.TrainerControl, logs, **kwargs):\n pass\n\n def free_memroy(self):\n try:\n del self.fused_model\n del self.tokenizer\n del self.base_model\n del self.trainer\n torch.cuda.empty_cache()\n except Exception as e:\n print(\"Free memory error:\",e)" }, { "identifier": "TRAINING_STATUS", "path": "src/finetune/qlora_trainer.py", "snippet": "TRAINING_STATUS = TrainingStatus()" }, { "identifier": "download_model_wrapper", "path": "src/utils/download_huggingface_repo.py", "snippet": "def download_model_wrapper(repo_id,local_model_root_dir, specific_file=None, return_links=False, check=False,progress = gr.Progress()):\n if repo_id.endswith(\".gguf\"):\n try:\n model_dir = os.path.join(local_model_root_dir, '/'.join(repo_id.split('/')[0:-1]))\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading file {repo_id.split('/')[-1]} to `{model_dir}/...`</span>\"\n hf_hub_download(repo_id='/'.join(repo_id.split('/')[0:-1]), filename=repo_id.split('/')[-1], local_dir=model_dir, resume_download=True,\n force_download=False)\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n else:\n if repo_id == \"\" or repo_id == \"None\":\n # return gr.update(value=\"Model's name is empty!\",visible=True)\n yield f\"Model's name is empty!\"\n else:\n model_dir = os.path.join(local_model_root_dir, repo_id)\n\n model_config_path = os.path.join(model_dir, \"config.json\")\n model_config_path1 = os.path.join(model_dir, \"pytorch_model.bin\")\n model_config_path2 = os.path.join(model_dir, \"model.safetensors\")\n if os.path.exists(model_config_path1) or os.path.exists(model_config_path2):\n yield '<span style=\"color:green\">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded.</span>'\n else:\n\n try:\n progress(0.0)\n # download_model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)),\"download-model.py\")\n # downloader = importlib.import_module(download_model_path).ModelDownloader()\n downloader = download_model.ModelDownloader()\n model, branch = downloader.sanitize_model_and_branch_names(repo_id, None)\n yield (\"Getting the download links from Hugging Face\")\n links, sha256, is_lora, is_llamacpp, link_file_size_list = downloader.get_download_links_from_huggingface(model,\n branch,\n text_only=False,\n specific_file=specific_file\n )\n if return_links:\n yield '\\n\\n'.join([f\"`{Path(link).name}`\" for link in links])\n yield (\"Getting the output folder\")\n # base_folder = shared.args.lora_dir if is_lora else shared.args.model_dir\n base_folder = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"models\")\n output_folder = downloader.get_output_folder(model, branch, is_lora, is_llamacpp=is_llamacpp,\n base_folder=base_folder)\n link_file_size_list = np.array(link_file_size_list)\n links = np.array(links)\n sorted_index = np.argsort(link_file_size_list)\n link_file_size_list = link_file_size_list[sorted_index]\n links = links[sorted_index]\n total_file_size = sum(link_file_size_list)\n copyed_file_size = 0\n for link, link_file_size in zip(links, link_file_size_list):\n model_file_name = link.split('/')[-1]\n if model_file_name.find(\"Pooling\")>=0:\n model_file_name = model_file_name+\"/config.json\"\n # yield (f\"Downloading file {model_file_name} to `{output_folder}/...`\")\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading file {model_file_name} to `{output_folder}/...`</span>\"\n hf_hub_download(repo_id=repo_id, filename=model_file_name, local_dir=model_dir, resume_download=True,\n force_download=False)\n copyed_file_size += link_file_size\n progress(copyed_file_size / total_file_size)\n # yield (\"Download successful!\")\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')" }, { "identifier": "download_dataset_wrapper", "path": "src/utils/download_huggingface_repo.py", "snippet": "def download_dataset_wrapper(repo_id,local_dataset_root_dir,progress = gr.Progress()):\n repo_id = repo_id.strip()\n if repo_id == \"\":\n yield \"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;This Dataset's name is empty!</span>\"\n else:\n dataset_dir = os.path.join(local_dataset_root_dir, repo_id)\n # dataset_config_path1 = os.path.join(dataset_dir, \"config.json\")\n dataset_config_path1 = os.path.join(dataset_dir, \"dataset_infos.json\")\n dataset_config_path2 = os.path.join(dataset_dir, \"dataset_dict.json\")\n\n if os.path.exists(dataset_config_path1) or os.path.exists(dataset_config_path2):\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;This Dataset has already been downloaded.</span>\"\n else:\n try:\n\n progress(0.3)\n yield f\"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Downloading dataset to `{dataset_dir}/...`</span>\"\n datasets = load_dataset(repo_id)\n progress(0.8)\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n datasets.save_to_disk(dataset_dir)\n # datasets = load_from_disk(\"dddd\")\n yield \"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;Download successful!</span>\"\n except:\n progress(1.0)\n yield traceback.format_exc().replace('\\n', '\\n\\n')" } ]
import pandas as pd import math import numpy as np import gc import os,requests import subprocess,threading import time import gradio as gr import os import traceback import numpy as np import glob import shutil import torch import socket from src.utils.common import login_huggingface from src.finetune.huggingface_inference import HuggingfaceInference from src.finetune.llama_cpp_inference import LlamaCppInference from src.rag.qa_with_rag import QAWithRAG from src.utils.common import read_yaml,get_first_row_from_dataset,\ get_runs_model_names_from_dir,get_hg_model_names_from_dir,get_hg_model_names_and_gguf_from_dir,validate_model_path,get_runs_models from src.utils.chat_prompts import get_model_type,get_chat_history_prompt,get_model_prompt_template from transformers.training_args import OptimizerNames from huggingface_hub import hf_hub_download from src.utils import download_model from pathlib import Path from src.finetune.qlora_trainer import QloraTrainer from src.finetune.qlora_trainer import TRAINING_STATUS from src.utils.download_huggingface_repo import download_model_wrapper,download_dataset_wrapper
14,501
gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Group(): gr.Markdown("### &nbsp;&nbsp;1.Chunking", elem_classes="white_background") with gr.Row(): text_splitter_dropdown = gr.Dropdown(["RecursiveCharacterTextSplitter"], label=f"Text Splitter", value="RecursiveCharacterTextSplitter", interactive=True, scale=1, min_width=1) with gr.Row(): chunk_size_slider = gr.Slider(32, 1024, value=256, step=32, label="Chunk Size", interactive=True, scale=1) chunk_overlap_slider = gr.Slider(0, 500, value=20, step=10, label="Chunk Overlap", interactive=True) Separators_textbox = gr.Textbox(label="Separators", value='''["\n\n", "\n", ".", " ", ""]''', interactive=True,visible=False) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;2.Vector Store Retriever", elem_classes="white_background") # local_embedding_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"rag","embedding_models") local_embedding_model_names = get_hg_model_names_from_dir(local_embedding_model_dir,"embedding_models") embedding_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_embedding_model_dir})"] embedding_model_source_radio = gr.Radio(embedding_model_source_radio_choices, label="Embedding Model Source", value=embedding_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_embedding_model_names_dropdown = gr.Dropdown(embedding_model_names, label=f"",show_label=False, value=embedding_model_names[0] if embedding_model_names else None, interactive=True, scale=4, min_width=1) download_hub_embedding_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_embedding_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_embedding_model_names_dropdown = gr.Dropdown(local_embedding_model_names, label=f"Embedding Model",show_label=False, value=local_embedding_model_names[0] if local_embedding_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_embedding_model_names_btn = gr.Button("Refresh", scale=1,visible=False) # model_config_path1 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "pytorch_model.bin") # model_config_path2 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "model.safetensors") model_config_path = os.path.join(local_embedding_model_dir, embedding_model_names[0], "config.json") if os.path.exists(model_config_path): download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): search_top_k_slider = gr.Slider(1, 10, value=3, step=1, label="Search Top K", interactive=True) search_score_threshold_slider = gr.Slider(0, 1, value=0.5, step=0.1, label="Search Score Threshold",interactive=True) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;3.Chat Model", elem_classes="white_background") local_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_chat_model_names = get_hg_model_names_from_dir(local_chat_model_dir) local_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_chat_model_dir,runs_model_root_dir) chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_chat_model_dir})"] chat_model_source_radio = gr.Radio(chat_model_source_radio_choices, label="Chat Model source",show_label=False, value=chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model",show_label=False,allow_custom_value=True, value=base_model_names[0] if base_model_names else None, interactive=True, scale=4, min_width=1) download_hub_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_chat_model_names_dropdown = gr.Dropdown(local_chat_model_names, label=f"Chat Model",show_label=False, value=local_chat_model_names[0] if local_chat_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_chat_model_names_btn = gr.Button("Refresh", scale=1,visible=False) rag_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Tab("Setting"): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Row(): max_new_tokens_slider = gr.Slider(1, 4096, value=256, step=0.1, label="Max New Tokens", interactive=True) temperature_slider = gr.Slider(0, 5, value=1, step=0.1, label="Temperature", interactive=True) with gr.Row(): top_k_slider = gr.Slider(1, 100, value=50, step=1, label="Top_k", interactive=True) top_p_slider = gr.Slider(0, 1, value=1, step=0.1, label="Top_p", interactive=True) with gr.Row(): repeat_penalty_slider = gr.Slider(1, 5, value=1, step=0.1, label="Repeat Penalty", interactive=True) with gr.Row(): chat_history_window_slider = gr.Slider(1, 20, value=3, step=1, label="Chat History Window", interactive=True) low_cpu_mem_usage_checkbox = gr.Checkbox(False, label="Low Cpu Mem Usage",interactive=True,visible=False) Huggingface_hub_token = gr.Textbox(label="Huggingface Hub Token", value="") def check_local_model_or_dataset_is_empty1(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try:
# os.environ['HTTP_PROXY'] = 'http://127.0.0.1:8889' # os.environ['HTTPS_PROXY'] = 'http://127.0.0.1:8889' LOCAL_HOST_IP = "0.0.0.0" TENSORBOARD_URL = "http://" + LOCAL_HOST_IP + ":6006/" INIT_DATASET_NAME = "test_python_code_instructions_5000_rows" RAG_DATA_LIST_DROPDOWN = "" TEXT_SPLITTER_DROPDOWN = "" CHUNK_SIZE_SLIDER = 0 CHUNK_OVERLAP_SLIDER = -1 SEPARATORS_TEXTBOX = "" EMBEDDING_MODEL_SOURCE_RADIO = "" HUB_EMBEDDING_MODEL_NAMES_DROPDOWN = "" LOCAL_EMBEDDING_MODEL_NAMES_DROPDOWN = "" CHAT_MODEL_SOURCE_RADIO = "" HUB_CHAT_MODEL_NAMES_DROPDOWN = "" LOCAL_CHAT_MODEL_NAMES_DROPDOWN = "" SEARCH_TOP_K_SLIDER = "" SEARCH_SCORE_THRESHOLD_SLIDER = "" training_ret_val = -1 error_msg = "" current_running_model_name = "" infer_model = None stop_generation_status = False chatbot_history=[] chatbot_height = 500 rag_chatbot_history=[] rag_stop_generation_status = False qa_with_rag = QAWithRAG() train_param_config = {} train_param_config["dataset"]={} train_param_config["model"]={} train_param_config["training"]={} model_zoo_config = {} transformer_optimizer_list = [] model_context_window = 0 init_train_file_path = None init_val_file_path = None INIT_PREFIX1 = "" INIT_PREFIX2 = "" INIT_PREFIX3 = "" INIT_PREFIX4 = "" INIT_COL1_TEXT = "" INIT_COL2_TEXT = "" INIT_COL3_TEXT = "" INIT_COL4_TEXT = "" col_names = [] DATASET_FIRST_ROW = None local_model_list = "" local_model_root_dir = "" base_model_names = [] training_base_model_names = [] embedding_model_names = [] base_model_context_window = [] local_dataset_list = [] local_dataset_root_dir = "" def get_local_embedding_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_model_list(): local_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") local_model_root_files = os.listdir(local_model_root_dir) local_model_list = [] for model_dir in local_model_root_files: if os.path.isdir(os.path.join(local_model_root_dir, model_dir)): local_model_list.append(model_dir) return local_model_list,local_model_root_dir def get_local_dataset_list(): local_dataset_list = [] local_dataset_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets") matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_infos.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_infos.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) matched_dataset_file_path_list = glob.glob(os.path.join(local_dataset_root_dir,"**","dataset_dict.json"),recursive=False) for matched_file_path in matched_dataset_file_path_list: matched_pos1 = matched_file_path.rfind("datasets") matched_pos2 = matched_file_path.rfind("dataset_dict.json") local_dataset_list.append(matched_file_path[matched_pos1 + 9:matched_pos2-1]) return local_dataset_list,local_dataset_root_dir def start_tensorboard_server(): try: s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((LOCAL_HOST_IP, 6006)) s.close() except Exception as e: tensorboard_cmd = f"tensorboard --logdir {os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs')} --reload_multifile True" tensorboard_proc = subprocess.Popen(tensorboard_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True, close_fds=True) # bufsize=0, close_fds=True def init(): global config_dict,transformer_optimizer_list,model_context_window,init_train_file_path,init_val_file_path global INIT_PREFIX1,INIT_COL1_TEXT,INIT_PREFIX2,INIT_COL2_TEXT,INIT_PREFIX3,INIT_COL3_TEXT,INIT_PREFIX4,INIT_COL4_TEXT,col_names,DATASET_FIRST_ROW global local_model_list,local_model_root_dir global base_model_names,base_model_context_window,embedding_model_names,training_base_model_names global local_dataset_list, local_dataset_root_dir start_tensorboard_server() model_zoo_config = read_yaml(os.path.join(os.path.dirname(os.path.abspath(__file__)),"config","model_zoo.yaml")) transformer_optimizer_list = list(vars(OptimizerNames)["_value2member_map_"].keys()) #get dynamic context window from selected model model_context_window = [2048,1024,512] init_train_file_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "datasets", INIT_DATASET_NAME) DATASET_FIRST_ROW,split_list = get_first_row_from_dataset(init_train_file_path) col_names = list(DATASET_FIRST_ROW) col_names.insert(0,"") INIT_PREFIX1 = "<s>[INST] " INIT_PREFIX2 = "here are the inputs " INIT_PREFIX3 = " [/INST]" INIT_PREFIX4 = "</s>" INIT_COL1_TEXT = str(DATASET_FIRST_ROW[col_names[1]]) INIT_COL2_TEXT = str(DATASET_FIRST_ROW[col_names[2]]) INIT_COL3_TEXT = str(DATASET_FIRST_ROW[col_names[3]]) INIT_COL4_TEXT = "" local_model_list,local_model_root_dir = get_local_model_list() base_model_names = [model_name for model_name in model_zoo_config["model_list"]] training_base_model_names = [model_name for model_name in base_model_names if not model_name.endswith(".gguf")] # base_model_context_window = [model_name[1] for model_name in model_zoo_config["model_list"]] embedding_model_names = [model_name for model_name in model_zoo_config["embedding_model_list"]] local_dataset_list, local_dataset_root_dir = get_local_dataset_list() with gr.Blocks(title="FINETUNE",css="#vertical_center_align_markdown { position:absolute; top:30%;background-color:white;} .white_background {background-color: #ffffff} .none_border {border: none;border-collapse:collapse;}") as demo: init() local_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_dataset_root_dir_textbox = gr.Textbox(label="",value=local_dataset_root_dir, visible=False) local_embedding_model_root_dir_textbox = gr.Textbox(label="", value=os.path.join(os.path.dirname(os.path.abspath(__file__)), "rag", "embedding_models"), visible=False) local_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) local_home_chat_model_root_dir_textbox = gr.Textbox(label="", value=local_model_root_dir, visible=False) session_state = gr.State(value={}) # html = gr.HTML("<p align='center';>llm-web-ui</p>",elem_id="header") with gr.Tab("Home"): with gr.Row(): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;Chat Model", elem_classes="white_background") local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") local_home_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir, runs_model_root_dir) home_chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_home_chat_model_dir})"] home_chat_model_source_radio = gr.Radio(home_chat_model_source_radio_choices, label="Chat Model source", show_label=False, value=home_chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_home_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model", show_label=False, allow_custom_value=True, value=base_model_names[ 0] if base_model_names else None, interactive=True, scale=4, min_width=1) local_home_chat_model_names_dropdown = gr.Dropdown(local_home_chat_model_names, label=f"Chat Model", show_label=False, value=local_home_chat_model_names[ 0] if local_home_chat_model_names else None, interactive=True, scale=4, min_width=1, visible=False) download_hub_home_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_home_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) refresh_local_home_chat_model_names_btn = gr.Button("Refresh", scale=1, visible=False) load_home_chat_model_btn = gr.Button("Load Model", scale=1, visible=True) using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_home_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local,click load model to run.</span>') else: download_hub_home_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') # home_chat_model_running_status_markdown = gr.Markdown( # '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): chatbot = gr.Chatbot(value=[],bubble_full_width=False,rtl=False,layout="panel",height=chatbot_height, avatar_images=((os.path.join(os.path.abspath(''),"pics", "user1.png")), (os.path.join(os.path.abspath(''),"pics", "bot4.png"))), ) with gr.Row(): input_txtbox = gr.Textbox( show_label=False,autofocus=True, placeholder="Enter text and press enter",scale=3 ) generate_btn = gr.Button("Generate", scale=1) stop_btn = gr.Button("Stop", scale=1) # clear_btn = gr.Button("Clear",scale=1) with gr.Tab("Fine-Tuning"): with gr.Tabs() as tensorboard_tab: with gr.TabItem("Training", id=0): with gr.Row(): with gr.Column(scale=1, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;1.Training", elem_classes="white_background") with gr.Group(): gr.Markdown("### &nbsp;1).Model", elem_classes="white_background") with gr.Group(): # gr.Markdown("<br> &nbsp;&nbsp;&nbsp; Base Model") base_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_model_root_dir})"] base_model_source_radio = gr.Radio(base_model_source_radio_choices, label="Base Model", value=base_model_source_radio_choices[0], interactive=True) with gr.Row(elem_classes="white_background"): base_model_name_dropdown = gr.Dropdown(training_base_model_names, label="Model Name", value=training_base_model_names[0] if training_base_model_names else None, interactive=True, visible=True, scale=5, allow_custom_value=True) download_local_model_btn = gr.Button("Download", scale=1, visible=True) stop_download_local_model_btn = gr.Button("Stop", scale=1, visible=False) # model_download_status = gr.Markdown("<div id='vertical_center_align_markdown'><p style='text-align: center;'>Not downloaded</p></div>", elem_classes="white_background",scale=1,full_width=True,visible=False) if validate_model_path(training_base_model_names[0])[0]: download_model_status_markdown = gr.Markdown('<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_model_status_markdown = gr.Markdown('<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): # local_home_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "models") # runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_model_list = get_hg_model_names_and_gguf_from_dir(local_home_chat_model_dir,runs_model_root_dir) local_model_list = get_hg_model_names_from_dir(os.path.dirname(os.path.abspath(__file__)), "models") local_model_dropdown = gr.Dropdown(local_model_list, label="Local Model", info="", value=local_model_list[0] if len(local_model_list) > 0 else None, interactive=True, elem_classes="white_background", scale=5, visible=False) refresh_local_model_list_btn = gr.Button("Refresh", scale=1, visible=False) fine_tuning_type_dropdown = gr.Dropdown(["QLoRA", "LoRA"], label="Fine-Tuning Type", info="", value="QLoRA", interactive=True) with gr.Group(): with gr.Row(elem_classes="white_background"): # gr.Markdown("### &nbsp;&nbsp;&nbsp; LoRA Config", elem_classes="white_background") lora_r_list = [str(ri) for ri in range(8, 65, 8)] lora_r_slider = gr.Slider(8, 64, value=8, step=8, label="lora_r", interactive=True) # lora_r_dropdown = gr.Dropdown(lora_r_list,label="lora_r", value=lora_r_list[0],interactive=True,allow_custom_value=True) lora_alpha_slider = gr.Slider(8, 96, value=32, step=8, label="lora_alpha", interactive=True) # lora_alpha_list = [str(ri) for ri in range(8, 97, 8)] # lora_alpha_dropdown = gr.Dropdown(lora_alpha_list,label="lora_alpha", value=lora_alpha_list[3],interactive=True,allow_custom_value=True) with gr.Row(elem_classes="white_background"): lora_dropout_slider = gr.Slider(0, 1, value=0.05, step=0.01, label="lora_dropout", interactive=True) lora_bias_dropdown = gr.Dropdown(["none", "all", "lora_only"], label="lora_bias", info="", value="none", interactive=True) with gr.Group(): gr.Markdown("### &nbsp;2).Dataset",elem_classes="white_background") dataset_source_radio_choices = ["Download From Huggingface Hub", f"From Local HG Dataset In {local_dataset_root_dir})"] dataset_source_radio = gr.Radio(dataset_source_radio_choices, label="Dataset Source", value=dataset_source_radio_choices[1], interactive=True) with gr.Row(equal_height=True): hg_dataset_path_textbox = gr.Textbox(label="Dataset Name:",elem_classes="none_border",visible=False, interactive=True, scale=4, value="iamtarun/python_code_instructions_18k_alpaca") download_local_dataset_btn = gr.Button("Download", scale=1, visible=False) stop_download_local_dataset_btn = gr.Button("Stop", scale=1, visible=False) download_dataset_status_markdown = gr.Markdown('') with gr.Row(): hg_train_dataset_dropdown = gr.Dropdown(["train"], label="Train set", info="", interactive=False,visible=False, elem_classes="white_background", scale=1,value="train") hg_val_dataset_dropdown = gr.Dropdown([], label="Val set", info="", interactive=False,visible=False, elem_classes="white_background", scale=1) with gr.Row(): local_dataset_list.pop( local_dataset_list.index(INIT_DATASET_NAME)) local_dataset_list.insert(0, INIT_DATASET_NAME) local_train_path_dataset_dropdown = gr.Dropdown(local_dataset_list, label="Train Dataset", info="", value=local_dataset_list[0] if len(local_dataset_list)>0 else None, interactive=True, elem_classes="white_background", scale=5, visible=True) refresh_local_train_path_dataset_list_btn = gr.Button("Refresh", scale=1, visible=True) with gr.Row(): local_train_dataset_dropdown = gr.Dropdown(["train"], label="Train set", info="", interactive=True, elem_classes="white_background", scale=1,value="train",visible=True) local_val_dataset_dropdown = gr.Dropdown([], label="Val set", info="", interactive=True, elem_classes="white_background", scale=1,visible=True) with gr.Group(elem_classes="white_background"): # gr.Markdown("<h4><br> &nbsp;&nbsp;Prompt Template: (Prefix1 + ColumnName1 + Prefix2 + ColumnName2)</h4>",elem_classes="white_background") gr.Markdown("<br> &nbsp;&nbsp;&nbsp;&nbsp;**Prompt Template: (Prefix1+ColumnName1+Prefix2+ColumnName2+Prefix3+ColumnName3+Prefix4+ColumnName4)**",elem_classes="white_background") gr.Markdown( "<span> &nbsp;&nbsp;&nbsp;&nbsp;**Note**:&nbsp;&nbsp;Llama2/Mistral Chat Template:<s\>[INST] instruction+input [/INST] output</s\> </span>",elem_classes="white_background") # using_llama2_chat_template_checkbox = gr.Checkbox(True, label="Using Llama2/Mistral chat template",interactive=True,visible=False) with gr.Row(elem_classes="white_background"): # prompt_template prefix1_textbox = gr.Textbox(label="Prefix1:",value=INIT_PREFIX1,lines=2,interactive=True,elem_classes="white_background") datatset_col1_dropdown = gr.Dropdown(col_names, label="ColumnName1:", info="",value=col_names[1],interactive=True,elem_classes="white_background") prefix2_textbox = gr.Textbox(label="Prefix2:",value=INIT_PREFIX2,lines=2,interactive=True,elem_classes="white_background") datatset_col2_dropdown = gr.Dropdown(col_names, label="ColumnName2:", info="",value=col_names[2],interactive=True,elem_classes="white_background") with gr.Row(elem_classes="white_background"): prefix3_textbox = gr.Textbox(label="Prefix3:",value=INIT_PREFIX3,lines=2,interactive=True,elem_classes="white_background") datatset_col3_dropdown = gr.Dropdown(col_names, label="ColumnName3:", info="",value=col_names[3],interactive=True,elem_classes="white_background") prefix4_textbox = gr.Textbox(label="Prefix4:",value=INIT_PREFIX4,lines=2,interactive=True,elem_classes="white_background") datatset_col4_dropdown = gr.Dropdown(col_names, label="ColumnName4:", info="",value=col_names[0],interactive=True,elem_classes="white_background") # print("") prompt_sample = INIT_PREFIX1 + INIT_COL1_TEXT + INIT_PREFIX2 + INIT_COL2_TEXT + INIT_PREFIX3 + INIT_COL3_TEXT + INIT_PREFIX4 + INIT_COL4_TEXT prompt_sample_textbox = gr.Textbox(label="Prompt Sample:",interactive=False,value=prompt_sample,lines=4) max_length_dropdown = gr.Dropdown(["Model Max Length"]+model_context_window, label="Max Length",value="Model Max Length", interactive=True,allow_custom_value=True) with gr.Group(): gr.Markdown("### &nbsp;3).Training Arguments",elem_classes="white_background") with gr.Row(elem_classes="white_background"): epochs_slider = gr.Slider(1, 100, value=10, step=1, label="Epochs", interactive=True) # epochs_dropdown = gr.Dropdown([1]+[bi for bi in range(10,101,10)], label="Epochs",value=1, interactive=True,allow_custom_value=True) batch_size_list = [1,2,3]+[bi for bi in range(4,32+1,4)] batch_size_slider = gr.Slider(1, 100, value=1, step=1, label="Batch Size", interactive=True) # batch_size_dropdown = gr.Dropdown(batch_size_list,label="Batch Size", info="",value=batch_size_list[0],interactive=True,allow_custom_value=True) # learning_rate_textbox = gr.Textbox(label="Learning Rate", value=2e-4,interactive=True) with gr.Row(elem_classes="white_background"): learning_rate_slider = gr.Slider(0, 0.01, value=2e-4, step=0.0001, label="Learning Rate", interactive=True) warmup_steps_slider = gr.Slider(0, 400, value=100, step=10, label="Warmup Steps", interactive=True) with gr.Row(elem_classes="white_background"): optimizer_dropdown = gr.Dropdown(transformer_optimizer_list, label="Optimizer", info="", value=transformer_optimizer_list[1], interactive=True) lr_scheduler_list = ["linear","cosine","cosine_with_hard_restarts","polynomial_decay","constant","constant_with_warmup","inverse_sqrt","reduce_on_plateau"] lr_scheduler_type_dropdown = gr.Dropdown(lr_scheduler_list, label="LR Scheduler Type", info="", value=lr_scheduler_list[0], interactive=True) with gr.Row(elem_classes="white_background"): early_stopping_patience_slider = gr.Slider(0, 50+1, value=0, step=5, label="Early Stopping Patience", interactive=True) gradient_accumulation_steps_slider = gr.Slider(1, 50, value=1, step=1, label="Gradient Accumulation Steps") with gr.Row(elem_classes="white_background"): eval_steps_slider = gr.Slider(0, 1000, value=100, step=100, label="eval_steps", interactive=True) gradient_checkpointing_checkbox = gr.Checkbox(True,label="Gradient Checkpointing",interactive=True) train_btn = gr.Button("Start Training") with gr.Column(scale=1, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;2.Test",elem_classes="white_background") training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file:os.path.getmtime(os.path.join(training_runs_dir,file))) runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir,run_name) run_output_model = os.path.join(run_name_dir,"output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: if run_output_model_name.find("merged_")>=0: runs_output_model.append(os.path.join(run_name,"output_model",run_output_model_name, "ori")) runs_output_model = runs_output_model[::-1] runs_output_model_dropdown = gr.Dropdown(runs_output_model, label="runs_output_model", value=runs_output_model[0] if runs_output_model else None, interactive=True) gr.Markdown("") gr.Markdown( "<span> &nbsp;&nbsp;&nbsp;&nbsp;**Note**:&nbsp;&nbsp;Llama2/Mistral Chat Template:<s\>[INST] instruction+input [/INST] output</s\> </span>", elem_classes="white_background") with gr.Row(): test_input_textbox = gr.Textbox(label="Input:", interactive=True, value="", lines=4, scale=4) generate_text_btn = gr.Button("Generate",scale=1) finetune_test_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) # test_prompt = gr.Textbox(label="Prompt:", interactive=False, lines=2, scale=1) test_output = gr.Textbox(label="Output:", interactive=False,lines=4, scale=1) # def change_test_input_textbox(test_prefix1_textbox,test_input_textbox,test_prefix2_textbox): # return gr.update(value=test_prefix1_textbox+test_input_textbox+test_prefix2_textbox) # test_input_textbox.change(change_test_input_textbox,[test_prefix1_textbox,test_input_textbox,test_prefix2_textbox],test_prompt) with gr.Group(): gr.Markdown("## &nbsp;3.Quantization",elem_classes="white_background") with gr.Row(): quantization_type_list = ["gguf"] quantization_type_dropdown = gr.Dropdown(quantization_type_list, label="Quantization Type",value=quantization_type_list[0], interactive=True,scale=3) local_quantization_dataset_dropdown = gr.Dropdown(local_dataset_list, label="Dataset for quantization", value=local_dataset_list[0] if len( local_dataset_list) > 0 else None, interactive=True, elem_classes="white_background", scale=7, visible=False) refresh_local_quantization_dataset_btn = gr.Button("Refresh", scale=2, visible=False) def click_refresh_local_quantization_dataset_btn(): local_dataset_list, _ = get_local_dataset_list() return gr.update(choices=local_dataset_list, value=local_dataset_list[0] if len(local_dataset_list) > 0 else "") refresh_local_quantization_dataset_btn.click(click_refresh_local_quantization_dataset_btn,[],local_quantization_dataset_dropdown) with gr.Row(): training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file))) runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir, run_name) run_output_model = os.path.join(run_name_dir, "output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: if run_output_model_name.find("merged_") >= 0: runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "ori")) runs_output_model = runs_output_model[::-1] quantization_runs_output_model_dropdown = gr.Dropdown(runs_output_model, label="runs_output_model", value=runs_output_model[ 0] if runs_output_model else None, interactive=True, scale=6) quantize_btn = gr.Button("Quantize", scale=1,visible=False) if runs_output_model: model_name = runs_output_model[0].split(os.sep)[-2].split('_')[-1] quantized_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', os.sep.join(runs_output_model[0].split(os.sep)[0:-1]), "quantized_" + quantization_type_list[0] + "_" + model_name) if not os.path.exists(quantized_model_dir): os.makedirs(quantized_model_dir) quantization_logging_markdown = gr.Markdown("") gguf_quantization_markdown0 = gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;GGUF Quantization Instruction:", elem_classes="white_background", visible=True) gguf_quantization_markdown1 = gr.Markdown('''&nbsp;&nbsp;&nbsp;&nbsp;1.Follow the instructions in the llama.cpp to generate a GGUF:[https://github.com/ggerganov/llama.cpp#prepare-data--run](https://github.com/ggerganov/llama.cpp#prepare-data--run),<span style="color:red">&nbsp;&nbsp;Q4_K_M is recommend</span>''',visible=True) if runs_output_model: gguf_quantization_markdown2 = gr.Markdown(f"&nbsp;&nbsp;&nbsp;&nbsp;2.Convert {runs_output_model[0]} to gguf model",visible=True) else: gguf_quantization_markdown2 = gr.Markdown( f"", visible=True) gguf_quantization_markdown3 = gr.Markdown(f"&nbsp;&nbsp;&nbsp;&nbsp;3.Deploy gguf model", visible=False) else: quantization_logging_markdown = gr.Markdown("") gguf_quantization_markdown0 = gr.Markdown("### &nbsp;&nbsp;&nbsp;&nbsp;GGUF Quantization Instruction:", elem_classes="white_background", visible=True) gguf_quantization_markdown1 = gr.Markdown('''''',visible=True) gguf_quantization_markdown2 = gr.Markdown(f"",visible=True) gguf_quantization_markdown3 = gr.Markdown(f"", visible=True) with gr.Group(visible=False): gr.Markdown("## &nbsp;4.Deploy",elem_classes="white_background") with gr.Row(): deployment_framework_dropdown = gr.Dropdown(["TGI","llama-cpp-python"], label="Deployment Framework",value="TGI", interactive=True) with gr.Row(): training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names.sort(key=lambda file: os.path.getmtime(os.path.join(training_runs_dir, file))) # ori_model_runs_output_model = [] tgi_model_format_runs_output_model = [] gguf_model_format_runs_output_model = [] for run_name in run_names: run_name_dir = os.path.join(training_runs_dir, run_name) run_output_model = os.path.join(run_name_dir, "output_model") if os.path.exists(run_output_model): run_output_model_names = os.listdir(run_output_model) for run_output_model_name in run_output_model_names: model_bin_path = os.path.exists( os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', run_name, "output_model", run_output_model_name, "ori", "pytorch_model.bin")) if run_output_model_name.find("merged_") >= 0 and model_bin_path: tgi_model_format_runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "ori")) gptq_model_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs',run_name, "output_model", run_output_model_name, "quantized_gptq_"+run_output_model_name.split('_')[-1], "pytorch_model.bin") if os.path.exists(gptq_model_path): tgi_model_format_runs_output_model.append(os.path.join(run_name, "output_model", run_output_model_name, "quantized_gptq_"+run_output_model_name.split('_')[-1])) gguf_model_dir = os.path.join( os.path.dirname(os.path.abspath(__file__)), 'runs', run_name, "output_model", run_output_model_name, "quantized_gguf_" + run_output_model_name.split('_')[-1]) if os.path.exists(gguf_model_dir): gguf_model_names = os.listdir(gguf_model_dir) for gguf_model_name in gguf_model_names: if gguf_model_name.split('.')[-1] == "gguf": gguf_model_format_runs_output_model.append( os.path.join(run_name, "output_model", run_output_model_name, "quantized_gguf_" + run_output_model_name.split('_')[-1], gguf_model_name)) tgi_model_format_runs_output_model = tgi_model_format_runs_output_model[::-1] gguf_model_format_runs_output_model = gguf_model_format_runs_output_model[::-1] deployment_runs_output_model_dropdown = gr.Dropdown(tgi_model_format_runs_output_model, label="runs_output_model", value=tgi_model_format_runs_output_model[ 0] if tgi_model_format_runs_output_model else None, interactive=True,scale=6) refresh_deployment_runs_output_model_btn = gr.Button("Refresh", scale=1, visible=True) if tgi_model_format_runs_output_model: model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs', os.path.dirname(tgi_model_format_runs_output_model[0])) model_name = os.path.basename(tgi_model_format_runs_output_model[0]) if model_name.rfind("quantized_gptq_") >= 0: run_server_value = f'''docker run --gpus all --shm-size 1g -p 8080:80 -v {model_dir}:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/{model_name} --quantize gptq''' else: run_server_value = f'''docker run --gpus all --shm-size 1g -p 8080:80 -v {model_dir}:/data ghcr.io/huggingface/text-generation-inference:latest --model-id /data/{model_name}''' run_server_script_textbox = gr.Textbox(label="Run Server:", interactive=False,lines=2, scale=1,value=run_server_value) run_client_value = '''Command-Line Interface(CLI):\ncurl 127.0.0.1:8080/generate -X POST -d '{"inputs":"What is Deep Learning?","parameters":{"max_new_tokens":20}}' -H 'Content-Type: application/json'\n\nPython:\nfrom huggingface_hub import InferenceClient \nclient = InferenceClient(model="http://127.0.0.1:8080")\noutput = client.text_generation(prompt="What is Deep Learning?",max_new_tokens=512) ''' run_client_script_textbox = gr.Textbox(label="Run Client:", interactive=False, lines=6,scale=1,value=run_client_value) else: run_server_script_textbox = gr.Textbox(label="Run Server:", interactive=False,lines=2, scale=1,value="") run_client_script_textbox = gr.Textbox(label="Run Client:", interactive=False, lines=6, scale=1, value="") # deploy_llm_code = gr.Code(code_str, language="shell", lines=5, label="Install Requirements:") install_requirements_value = ''' ### &nbsp;&nbsp; 1.install docker ### &nbsp;&nbsp; 2.Install NVIDIA Container Toolkit <h4> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2.1 Configure the repository: </h4> <p> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg \ && curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | \ sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | \ sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list \ && \ sudo apt-get update </p> <h4> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;2.2 Install the NVIDIA Container Toolkit packages: </h4> <p>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; sudo apt-get install -y nvidia-container-toolkit </p> ''' with gr.Accordion("Install Requirements",open=False) as install_requirements_accordion: install_requirements_markdown = gr.Markdown(install_requirements_value) run_llama_cpp_python_code = gr.Code("", language="python", lines=10, label="run_model_using_llama_cpp_python.py",visible=False) # run_script_textbox = gr.Textbox(label="Install Requirements:", interactive=False, scale=1,value=install_requirements_value) #dependencies with gr.TabItem("Tensorboard", id=1) as fdddd: # training_log_markdown = gr.Markdown('',every=mytestfun) with gr.Row(): # training_log_textbox = gr.Textbox(label="logging:",value="", interactive=True, lines=2, scale=1) with gr.Group(): training_log_markdown = gr.Markdown('') stop_training_btn = gr.Button("Stop Training") training_runs_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'runs') run_names = os.listdir(training_runs_dir) run_names = [run_name for run_name in run_names if os.path.isdir(os.path.join(training_runs_dir,run_name))] run_names.sort(key=lambda f: os.path.getmtime(os.path.join(training_runs_dir, f))) # print("dddddddd:",run_names) with gr.Group(): # with gr.Row(): training_runs_dropdown = gr.Dropdown(run_names, label="Training Runs",value=run_names[0] if run_names else None, interactive=True, scale=1) delete_text_btn = gr.Button("Delete Run", scale=1) iframe = f'<iframe src={TENSORBOARD_URL} style="border:none;height:1024px;width:100%">' tensorboard_html = gr.HTML(iframe) with gr.Tab("RAG"): with gr.Row(): with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;ChatBot", elem_classes="white_background") rag_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rag', 'data') matched_file_list = [] supported_doc_type = ["*.pdf","*.txt","*.docx"] for doc_type in supported_doc_type: matched_file_list += glob.glob(os.path.join(rag_data_dir, doc_type), recursive=False) matched_file_list.sort(key=lambda file: os.path.getmtime(file),reverse=True) matched_file_name_list = [] for matched_file in matched_file_list: matched_file_name_list.append(os.path.basename(matched_file)) # chat_data_source_radio_choices = ["Chat With Document", # f"Chat With Image"] gr.Markdown("### &nbsp;Chat With Document", elem_classes="white_background") # chat_data_source_radio = gr.Radio(chat_data_source_radio_choices, # label="", # value=chat_data_source_radio_choices[0], # interactive=True) with gr.Row(): rag_data_list_dropdown = gr.Dropdown(matched_file_name_list, label=f"Local Documents In {rag_data_dir}", value=matched_file_name_list[0] if matched_file_name_list else None, interactive=True,scale=4, min_width=1) refresh_rag_data_list_btn = gr.Button("Refresh", scale=1, min_width=1) # if not current_running_model_name: # model_running_status_markdown = gr.Markdown(f"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;No modelis running!</span>") # else: # model_running_status_markdown = gr.Markdown(f"<span style='color:green'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Model is runing:{current_running_model_name}.</span>") def click_refresh_rag_data_list_btn(): rag_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'rag', 'data') matched_file_list = [] supported_doc_type = ["*.pdf", "*.txt", "*.docx"] for doc_type in supported_doc_type: matched_file_list += glob.glob(os.path.join(rag_data_dir, doc_type), recursive=False) matched_file_list.sort(key=lambda file: os.path.getmtime(file), reverse=True) matched_file_name_list = [] for matched_file in matched_file_list: matched_file_name_list.append(os.path.basename(matched_file)) return gr.update(choices=matched_file_name_list,value=matched_file_name_list[0] if matched_file_name_list else None) refresh_rag_data_list_btn.click(click_refresh_rag_data_list_btn,[],rag_data_list_dropdown) # def update_model_running_status(): # return gr.update(value=f"<span style='color:red'>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{current_running_model_name} is runing!.</span>") # # load_model_btn.click(click_load_model_btn,model_list_dropdown,[model_list_dropdown]).success(update_model_running_status,[],model_running_status_markdown) with gr.Row(): rag_chatbot = gr.Chatbot(value=[],bubble_full_width=False,rtl=False,layout="panel",height=chatbot_height, avatar_images=((os.path.join(os.path.abspath(''),"pics", "user1.png")), (os.path.join(os.path.abspath(''),"pics", "bot4.png"))), ) with gr.Row(): rag_input_txtbox = gr.Textbox( show_label=False,autofocus=True, placeholder="Enter text and press enter",scale=6) rag_generate_btn = gr.Button("Generate", scale=1) rag_stop_btn = gr.Button("Stop", scale=1) # rag_clear_btn = gr.Button("Clear", scale=1) rag_model_running_status_markdown = gr.Markdown( f"### &nbsp;&nbsp;Retrieved Document Chunks",visible=True) # retrieved_document_chunks_markdown = gr.Markdown( # f"### &nbsp;&nbsp;Retrieved Document Chunks",visible=True) retrieved_document_chunks_dataframe = gr.Dataframe( headers=["ID", "Chunk"], datatype=["str", "str"], show_label=False, value=None ) with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Group(): gr.Markdown("### &nbsp;&nbsp;1.Chunking", elem_classes="white_background") with gr.Row(): text_splitter_dropdown = gr.Dropdown(["RecursiveCharacterTextSplitter"], label=f"Text Splitter", value="RecursiveCharacterTextSplitter", interactive=True, scale=1, min_width=1) with gr.Row(): chunk_size_slider = gr.Slider(32, 1024, value=256, step=32, label="Chunk Size", interactive=True, scale=1) chunk_overlap_slider = gr.Slider(0, 500, value=20, step=10, label="Chunk Overlap", interactive=True) Separators_textbox = gr.Textbox(label="Separators", value='''["\n\n", "\n", ".", " ", ""]''', interactive=True,visible=False) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;2.Vector Store Retriever", elem_classes="white_background") # local_embedding_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"rag","embedding_models") local_embedding_model_names = get_hg_model_names_from_dir(local_embedding_model_dir,"embedding_models") embedding_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_embedding_model_dir})"] embedding_model_source_radio = gr.Radio(embedding_model_source_radio_choices, label="Embedding Model Source", value=embedding_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_embedding_model_names_dropdown = gr.Dropdown(embedding_model_names, label=f"",show_label=False, value=embedding_model_names[0] if embedding_model_names else None, interactive=True, scale=4, min_width=1) download_hub_embedding_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_embedding_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_embedding_model_names_dropdown = gr.Dropdown(local_embedding_model_names, label=f"Embedding Model",show_label=False, value=local_embedding_model_names[0] if local_embedding_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_embedding_model_names_btn = gr.Button("Refresh", scale=1,visible=False) # model_config_path1 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "pytorch_model.bin") # model_config_path2 = os.path.join(local_embedding_model_dir, # embedding_model_names[0], "model.safetensors") model_config_path = os.path.join(local_embedding_model_dir, embedding_model_names[0], "config.json") if os.path.exists(model_config_path): download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_embedding_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Row(): search_top_k_slider = gr.Slider(1, 10, value=3, step=1, label="Search Top K", interactive=True) search_score_threshold_slider = gr.Slider(0, 1, value=0.5, step=0.1, label="Search Score Threshold",interactive=True) with gr.Group(): gr.Markdown("### &nbsp;&nbsp;3.Chat Model", elem_classes="white_background") local_chat_model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),"models") runs_model_root_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "runs") # local_chat_model_names = get_hg_model_names_from_dir(local_chat_model_dir) local_chat_model_names = get_hg_model_names_and_gguf_from_dir(local_chat_model_dir,runs_model_root_dir) chat_model_source_radio_choices = ["Download From Huggingface Hub", f"From Local Dir(hg format:{local_chat_model_dir})"] chat_model_source_radio = gr.Radio(chat_model_source_radio_choices, label="Chat Model source",show_label=False, value=chat_model_source_radio_choices[0], interactive=True) with gr.Row(): hub_chat_model_names_dropdown = gr.Dropdown(base_model_names, label=f"Chat Model",show_label=False,allow_custom_value=True, value=base_model_names[0] if base_model_names else None, interactive=True, scale=4, min_width=1) download_hub_chat_model_names_btn = gr.Button("Download", scale=1) stop_download_hub_chat_model_names_btn = gr.Button("Stop", scale=1, visible=False) local_chat_model_names_dropdown = gr.Dropdown(local_chat_model_names, label=f"Chat Model",show_label=False, value=local_chat_model_names[0] if local_chat_model_names else None, interactive=True, scale=4, min_width=1,visible=False) refresh_local_chat_model_names_btn = gr.Button("Refresh", scale=1,visible=False) rag_using_4bit_quantization_checkbox = gr.Checkbox(True, label="Using 4-bit quantization", interactive=True, visible=True, info="Less memory but slower", scale=1 ) if validate_model_path(base_model_names[0])[0]: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:green">&nbsp;&nbsp;&nbsp;&nbsp;This model has already been downloaded to local.</span>') else: download_hub_chat_model_status_markdown = gr.Markdown( '<span style="color:red">&nbsp;&nbsp;&nbsp;&nbsp;This model has not been downloaded.</span>') with gr.Tab("Setting"): # with gr.Column(scale=4, min_width=1): with gr.Group(): gr.Markdown("## &nbsp;Setting", elem_classes="white_background") with gr.Group(): with gr.Row(): max_new_tokens_slider = gr.Slider(1, 4096, value=256, step=0.1, label="Max New Tokens", interactive=True) temperature_slider = gr.Slider(0, 5, value=1, step=0.1, label="Temperature", interactive=True) with gr.Row(): top_k_slider = gr.Slider(1, 100, value=50, step=1, label="Top_k", interactive=True) top_p_slider = gr.Slider(0, 1, value=1, step=0.1, label="Top_p", interactive=True) with gr.Row(): repeat_penalty_slider = gr.Slider(1, 5, value=1, step=0.1, label="Repeat Penalty", interactive=True) with gr.Row(): chat_history_window_slider = gr.Slider(1, 20, value=3, step=1, label="Chat History Window", interactive=True) low_cpu_mem_usage_checkbox = gr.Checkbox(False, label="Low Cpu Mem Usage",interactive=True,visible=False) Huggingface_hub_token = gr.Textbox(label="Huggingface Hub Token", value="") def check_local_model_or_dataset_is_empty1(base_model_name_dropdown,Huggingface_hub_token): if len(base_model_name_dropdown.strip()) == 0: raise gr.Error("Name is empty!") try:
login_huggingface(Huggingface_hub_token,base_model_name_dropdown)
0
2023-11-25 12:37:21+00:00
24k
danilonumeroso/conar
models/tsp_reasoner.py
[ { "identifier": "vmapped_beam_search_rollout", "path": "baselines/beam_search.py", "snippet": "BEAM_WIDTH = 128\ndef expand_single(beam_vis, beam_last, beam_cost, beam_par, W):\ndef beam_search_rollout_step(W, beam_width, i, tpl):\ndef beam_search_rollout(start_route, W, num_nodes, beam_width):\ndef beam_search_baseline(data, return_ratio=True):" }, { "identifier": "AlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class AlgorithmReasoner(nn.Module):\n @staticmethod\n def prepare_batch(batch):\n batch = batch.clone()\n for name, tensor in batch.items():\n if not torch.is_tensor(tensor):\n continue\n if name.endswith('_temporal') and 'index' not in name:\n tensor = tensor.transpose(1, 0)\n batch[name] = tensor\n return batch\n\n @staticmethod\n def get_masks(train, batch, continue_logits, enforced_mask):\n mask = continue_logits[batch.batch] > 0\n mask_cp = (continue_logits > 0.0).bool()\n mask_edges = mask[batch.edge_index[0]]\n if not train and enforced_mask is not None:\n enforced_mask_ids = enforced_mask[batch.batch]\n mask &= enforced_mask_ids\n mask_cp &= enforced_mask\n return mask_cp, mask, mask_edges\n\n def add_encoder(self, stage, name, loc, data_type, data_sample, bias):\n if name == 'adj': # we use edge indices\n return\n if data_type == Type.SCALAR or data_type == Type.MASK or data_type == Type.MASK_ONE:\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n self.encoders[stage][name] = nn.Linear(in_shape, self.latent_features, bias=bias)\n\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are 1-hot encoded on the edges\n self.encoders[stage][name] = nn.Linear(1, self.latent_features, bias=bias)\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.encoders[stage][name] = nn.ModuleList([\n nn.Linear(1, self.latent_features, bias=bias),\n nn.Linear(1, self.latent_features, bias=bias)\n ])\n\n def add_decoder(self, stage, name, loc, data_type, data_sample, bias):\n assert name != 'adj', 'Adjacency matrix should not be decoded'\n dec = None\n if loc == Location.NODE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.Linear(2*self.latent_features, 1, bias=bias)\n\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.Linear(2*self.latent_features, in_shape, bias=bias)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]: # pointers are decoded from both node and edge information\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if loc == Location.GRAPH:\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n in_shape = data_sample.shape[-1] if data_type == Type.CATEGORICAL else 1\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n\n if loc == Location.EDGE:\n if data_type in (Type.SCALAR, Type.MASK, Type.MASK_ONE):\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(2*self.latent_features, 1, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n if data_type == Type.CATEGORICAL:\n in_shape = data_sample.shape[-1]\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(2*self.latent_features, in_shape, bias=bias),\n nn.Linear(self.latent_features, in_shape, bias=bias),\n ])\n if data_type == Type.POINTER:\n dec = nn.ModuleList([\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, self.latent_features, bias=bias),\n nn.Linear(2*self.latent_features, self.latent_features, bias=bias),\n nn.Linear(self.latent_features, 1, bias=bias),\n ])\n assert dec is not None, breakpoint()\n self.decoders[stage][name] = dec\n\n\n\n\n def __init__(self,\n spec,\n data,\n latent_features,\n algo_processor,\n bias=True,\n use_TF=False,\n use_sinkhorn=True,\n L1_loss=False,\n xavier_on_scalars=True,\n global_termination_pool='max', #'predinet',\n get_attention=False,\n use_batch_norm=False,\n transferring=False,\n timeit=True,\n **kwargs):\n\n super().__init__()\n self.step_idx = 0\n self.latent_features = latent_features\n self.assert_checks = False\n self.timeit = timeit\n self.debug = False\n self.debug_epoch_threshold = 1e9\n self.L1_loss = L1_loss\n self.global_termination_pool = global_termination_pool\n self.next_step_pool = True\n self.processor = algo_processor\n self.triplet_reasoning = False\n if isinstance(self.processor.processors[0].processor, TripletMPNN):\n self.triplet_reasoning = True\n self.triplet_reductor = nn.Linear(2*latent_features, latent_features, bias=bias)\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.get_attention = get_attention\n self.lambda_mul = 1 # 0.0001\n self.transferring = transferring\n self.node_encoder = nn.Sequential(\n nn.Linear(2*latent_features, latent_features, bias=bias),\n )\n self.encoders = nn.ModuleDict({\n 'input': nn.ModuleDict({\n }),\n 'hint': nn.ModuleDict({\n }),\n })\n self.decoders = nn.ModuleDict({\n 'hint': nn.ModuleDict({\n }),\n 'output': nn.ModuleDict({\n })\n })\n for name, (stage, loc, datatype) in spec.items():\n if name == 'adj': # we use edge indices\n continue\n if stage == 'input':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'output':\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n if stage == 'hint':\n self.add_encoder(stage, name, loc, datatype, getattr(data, name), bias)\n self.add_decoder(stage, name, loc, datatype, getattr(data, name), bias)\n\n self.node_pointer_vec = nn.Parameter(torch.randn(latent_features))\n if xavier_on_scalars:\n assert False, \"NEEDS REFACTORING\"\n torch.nn.init.trunc_normal_(self.encoders['input']['edge_attr'].weight, std=1/torch.sqrt(torch.tensor(latent_features)))\n\n if global_termination_pool == 'attention':\n inp_dim = latent_features\n self.global_attn = GlobalAttentionPlusCoef(\n nn.Sequential(\n nn.Linear(inp_dim, latent_features, bias=bias),\n nn.LeakyReLU(),\n nn.Linear(latent_features, 1, bias=bias)\n ),\n nn=None)\n\n if global_termination_pool == 'predinet':\n lf = latent_features\n self.predinet = PrediNet(lf, 1, lf, lf, flatten_pooling=torch_geometric.nn.glob.global_max_pool)\n\n self.termination_network = nn.Sequential(\n nn.BatchNorm1d(latent_features) if use_batch_norm else nn.Identity(),\n nn.Linear(latent_features, 1, bias=bias),\n )\n\n def get_continue_logits(self, batch_ids, latent_nodes, sth_else=None):\n if self.global_termination_pool == 'mean':\n graph_latent = torch_geometric.nn.global_mean_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'max':\n graph_latent = torch_geometric.nn.global_max_pool(latent_nodes, batch_ids)\n if self.global_termination_pool == 'attention':\n graph_latent, coef = self.global_attn(latent_nodes, batch_ids)\n if self.get_attention:\n self.attentions[self.step_idx] = coef.clone().detach()\n self.per_step_latent[self.step_idx] = sth_else\n\n if self.global_termination_pool == 'predinet':\n assert not torch.isnan(latent_nodes).any()\n graph_latent = self.predinet(latent_nodes, batch_ids)\n\n if self.get_attention:\n self.attentions[self.step_idx] = latent_nodes\n continue_logits = self.termination_network(graph_latent).view(-1)\n return continue_logits\n\n def zero_termination(self):\n self.true_positive = 0\n self.false_positive = 0\n self.false_negative = 0\n self.true_negative = 0\n\n def zero_steps(self):\n self.sum_of_processed_nodes = 0\n self.sum_of_processed_edges = 0\n self.step_idx = 0\n self.sum_of_steps = 0\n self.cnt = 0\n\n @staticmethod\n def convert_logits_to_outputs(spec,\n logits,\n fr,\n to,\n num_nodes,\n batch_ids,\n include_probabilities=True,\n dbg=False):\n outs = defaultdict(dict)\n\n for stage in logits.keys():\n for name in logits[stage].keys():\n if name not in logits[stage] or name not in spec:\n continue\n stage, loc, data_type = spec[name]\n assert stage != Stage.INPUT\n if data_type == Type.SOFT_POINTER:\n assert False, f\"Not yet added, please add {name}\"\n if data_type in [Type.CATEGORICAL]:\n indices = logits[stage][name].argmax(-1)\n outshape = logits[stage][name].shape[-1]\n outs[stage][name] = F.one_hot(indices, num_classes=outshape).float()\n if data_type == Type.MASK_ONE:\n _, amax = torch_scatter.scatter_max(logits[stage][name], batch_ids, dim=0)\n amax = amax.squeeze(-1)\n outs[stage][name] = torch.zeros_like(logits[stage][name])\n outs[stage][name][amax] = 1\n if data_type == Type.MASK:\n outs[stage][name] = (logits[stage][name] > 0).float()\n if data_type == Type.SCALAR:\n outs[stage][name] = logits[stage][name]\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n _, pointers = torch_scatter.scatter_max(pointer_logits, fr, dim_size=num_nodes)\n pointers = to[pointers]\n pointer_probabilities = torch_geometric.utils.softmax(pointer_logits, fr, num_nodes=num_nodes)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pointer_logits = logits[stage][name]\n pointers = pointer_logits.argmax(-1)\n pointer_probabilities = F.softmax(pointer_logits, dim=-1)\n outs[stage][name] = pointers\n if include_probabilities:\n outs[stage][f'{name}_probabilities'] = pointer_probabilities\n return outs\n\n def set_initial_states(self, batch, init_last_latent=None):\n self.processor.zero_lstm(batch.num_nodes) # NO-OP if processor(s) don't use LSTM\n self.last_latent = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n if init_last_latent is not None:\n self.last_latent = init_last_latent\n self.last_latent_edges = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n self.last_continue_logits = torch.ones(batch.num_graphs, device=batch.edge_index.device)\n self.last_logits = defaultdict(dict)\n\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n if name not in self.decoders[stage]:\n continue\n if stage == Stage.OUTPUT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n if data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name).unsqueeze(-1)\n if data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name).bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name).int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.last_logits[stage][name] = torch.full((batch.edge_index.shape[1], int(ptrs.max().item())+1), -1e9).to(batch.edge_index.device)\n self.last_logits[stage][name][torch.arange(ptrs.shape[0]), ptrs] = 1e9\n else:\n assert False, breakpoint()\n\n if stage == Stage.HINT:\n\n if loc in [Location.NODE, Location.GRAPH]:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0].unsqueeze(-1)\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n self.last_logits[stage][name] = torch.where(batch.edge_index[0, :] == batch.edge_index[1, :], 1e9, -1e9).to(batch.edge_index.device) # self-loops\n else:\n assert False, breakpoint()\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n self.last_logits[stage][name] = getattr(batch, name)[0]\n elif data_type in [Type.MASK, Type.MASK_ONE]:\n self.last_logits[stage][name] = torch.where(getattr(batch, name)[0, :].bool(), 1e9, -1e9).unsqueeze(-1)\n elif data_type == Type.SCALAR:\n self.last_logits[stage][name] = getattr(batch, name)[0, :].unsqueeze(-1)\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n ptrs = getattr(batch, name)[0, :].int()\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n ptrs = ptrs - starts_edge\n self.max_nodes_in_graph = int(ptrs.max().item())+1 # FIXME try another way to infer\n self.last_logits[stage][name] = torch.where(edge_one_hot_encode_pointers_edge(ptrs, batch, self.max_nodes_in_graph).bool(), 1e9, -1e9).to(batch.edge_index.device)\n else:\n assert False, breakpoint()\n\n self.all_hint_logits = []\n self.all_masks_graph = []\n\n def update_per_mask(self, before, after, mask=None):\n # NOTE: this does expansion of the mask, if you do\n # NOT use expansion, use torch.where\n if mask is None:\n mask = self.mask\n mask = mask.unsqueeze(-1).expand_as(before)\n return torch.where(mask, after, before)\n\n def update_state_dict(self, before, after):\n new_before = defaultdict(dict)\n for stage in after.keys():\n for name in after[stage].keys():\n _, loc, data_type = self.dataset_spec[name]\n if loc == Location.GRAPH:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_cp)\n if loc == Location.EDGE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL, Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name], mask=self.mask_edges)\n else:\n assert False, \"Please implement\"\n if loc == Location.NODE:\n if data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n new_before[stage][name] = self.update_per_mask(before[stage][name], after[stage][name])\n elif data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n new_before[stage][name] = torch.where(self.mask_edges, after[stage][name], before[stage][name])\n else:\n assert False, breakpoint()\n return new_before\n\n def update_states(self, batch, current_latent, edges_current_latent,\n logits, continue_logits):\n self.last_continue_logits = torch.where(self.mask_cp, continue_logits,\n self.last_continue_logits)\n self.last_latent = self.update_per_mask(self.last_latent, current_latent)\n self.last_latent_edges = self.update_per_mask(self.last_latent_edges, edges_current_latent, mask=self.mask_edges)\n self.last_logits = self.update_state_dict(self.last_logits, logits)\n self.all_hint_logits.append(self.last_logits['hint'])\n self.all_masks_graph.append(self.mask_cp)\n preds = type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)\n self.last_hint = preds['hint']\n self.last_output = preds['output']\n\n def prepare_initial_masks(self, batch):\n self.mask = torch.ones_like(batch.batch, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_cp = torch.ones(batch.num_graphs, dtype=torch.bool, device=batch.edge_index.device)\n self.mask_edges = torch.ones_like(batch.edge_index[0], dtype=torch.bool, device=batch.edge_index.device)\n\n def loop_condition(self, termination, STEPS_SIZE):\n return (((not self.training and termination.any()) or\n (self.training and termination.any())) and\n self.step_idx+1 < STEPS_SIZE)\n\n def loop_body(self,\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=1000):\n\n current_latent, edges_current_latent, preds, continue_logits =\\\n self.forward(\n batch,\n node_fts,\n edge_fts,\n graph_fts,\n first_n_processors=first_n_processors,\n )\n termination = continue_logits\n\n self.debug_batch = batch\n self.debug_hint_out_curr = hint_out_curr\n if self.timeit:\n st = time.time()\n self.update_states(batch, current_latent, edges_current_latent, preds, termination)\n if self.timeit:\n print(f'updating states: {time.time()-st}')\n\n def get_step_input(self, x_curr, batch):\n if self.training and self.use_TF or self.hardcode_outputs:\n return x_curr\n return type(self).convert_logits_to_outputs(\n self.dataset_spec, self.last_logits, batch.edge_index[0],\n batch.edge_index[1], batch.num_nodes, batch.batch,\n self.epoch > self.debug_epoch_threshold)['hint']\n\n def encode_inputs(self, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.INPUT:\n continue\n if name not in self.encoders[stage]:\n continue\n data = getattr(batch, name)\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n assert False, breakpoint() # we don't have it for now (B-F/MST), will figure out later\n if data_type != Type.CATEGORICAL:\n data = data.unsqueeze(-1)\n if loc == Location.EDGE:\n edge_fts += self.encoders[stage][name](data)\n if loc == Location.NODE:\n node_fts += self.encoders[stage][name](data)\n return node_fts, edge_fts\n\n def encode_hints(self, hints, batch):\n node_fts = torch.zeros(batch.num_nodes, self.latent_features, device=batch.edge_index.device)\n edge_fts = torch.zeros(batch.num_edges, self.latent_features, device=batch.edge_index.device)\n graph_fts = torch.zeros(batch.num_graphs, self.latent_features, device=batch.edge_index.device)\n\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n if name not in self.encoders[stage]:\n continue\n hint = hints[name]\n if loc == Location.NODE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n node_fts = node_fts + self.encoders['hint'][name](hint)\n if loc == Location.EDGE and data_type in [Type.MASK, Type.MASK_ONE, Type.SCALAR, Type.CATEGORICAL]:\n edge_fts = edge_fts + self.encoders['hint'][name](hint)\n if loc == Location.NODE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(hint, batch.edge_index)\n edge_fts = edge_fts + self.encoders['hint'][name](pred_gt_one_hot.unsqueeze(-1))\n if loc == Location.EDGE and data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers_edge(hint, batch, self.max_nodes_in_graph)\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n encoding = self.encoders['hint'][name][0](pred_gt_one_hot.unsqueeze(-1))\n encoding_2 = self.encoders['hint'][name][1](pred_gt_one_hot.unsqueeze(-1))\n encoding_sparse = SparseTensor(row=batch.edge_index[0], col=batch.edge_index[1], value=encoding)\n res_1 = encoding_sparse.mean(1)[batch.edge_index[0], batch.edge_index[1]-starts_edge]\n res_2 = encoding_2.mean(1)\n edge_fts += res_1 + res_2 # INPLACE\n if loc == Location.GRAPH and data_type in [Type.CATEGORICAL, Type.SCALAR, Type.MASK]:\n graph_fts = graph_fts + self.encoders['hint'][name](hint)\n return node_fts, edge_fts, graph_fts\n\n def get_input_output_hints(self, batch):\n hint_inp_curr = {}\n hint_out_curr = {}\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage != Stage.HINT:\n continue\n hint_inp_curr[name] = getattr(batch, name)[self.step_idx]\n hint_out_curr[name] = getattr(batch, name)[self.step_idx+1]\n if 'mask' in data_type or data_type == Type.SCALAR:\n hint_inp_curr[name] = hint_inp_curr[name].unsqueeze(-1)\n hint_out_curr[name] = hint_out_curr[name].unsqueeze(-1)\n return hint_inp_curr, hint_out_curr\n\n def process(\n self,\n batch,\n EPSILON=0,\n enforced_mask=None,\n hardcode_outputs=False,\n debug=False,\n first_n_processors=1000,\n init_last_latent=None,\n **kwargs):\n\n SIZE, STEPS_SIZE = prepare_constants(batch)\n self.hardcode_outputs = hardcode_outputs\n\n # Pytorch Geometric batches along the node dimension, but we execute\n # along the temporal (step) dimension, hence we need to transpose\n # a few tensors. Done by `prepare_batch`.\n if self.assert_checks:\n check_edge_index_sorted(batch.edge_index)\n if self.epoch > self.debug_epoch_threshold:\n breakpoint()\n self.zero_steps()\n batch = type(self).prepare_batch(batch)\n # When we want to calculate last step metrics/accuracies\n # we need to take into account again different termination per graph\n # hence we save last step tensors (e.g. outputs) into their\n # corresponding tensor. The function below prepares these tensors\n # (all set to zeros, except masking for computation, which are ones)\n self.set_initial_states(batch, init_last_latent=init_last_latent)\n # Prepare masking tensors (each graph does at least 1 iteration of the algo)\n self.prepare_initial_masks(batch)\n # A flag if we had a wrong graph in the batch. Used for visualisation\n # of what went wrong\n self.wrong_flag = False\n assert self.mask_cp.all(), self.mask_cp\n if self.timeit:\n st = time.time()\n node_fts_inp, edge_fts_inp = self.encode_inputs(batch)\n if self.timeit:\n print(f'encoding inputs: {time.time()-st}')\n\n while True:\n hint_inp_curr, hint_out_curr = self.get_input_output_hints(batch)\n if not self.training:\n assert (self.last_continue_logits > 0).any() or True\n\n # Some algorithms output fewer values than they take\n # so if we reuse our last step outputs, they need to be fed back in.\n if self.timeit:\n st = time.time()\n hint_inp_curr = self.get_step_input(hint_inp_curr, batch)\n if self.timeit:\n print(f'getting step input : {time.time()-st}')\n st = time.time()\n node_fts_hint, edge_fts_hint, graph_fts = self.encode_hints(hint_inp_curr, batch)\n node_fts = node_fts_inp + node_fts_hint\n edge_fts = edge_fts_inp + edge_fts_hint\n if self.timeit:\n print(f'encoding hints: {time.time()-st}')\n\n true_termination = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n\n # Does one iteration of the algo and accumulates statistics\n self.loop_body(batch,\n node_fts,\n edge_fts,\n graph_fts,\n hint_inp_curr,\n hint_out_curr,\n true_termination,\n first_n_processors=first_n_processors)\n # And calculate what graphs would execute on the next step.\n self.mask_cp, self.mask, self.mask_edges = type(self).get_masks(self.training, batch, true_termination if self.training else self.last_continue_logits, enforced_mask)\n if not self.loop_condition(\n self.mask_cp,\n STEPS_SIZE):\n break\n assert self.mask_cp.any()\n self.step_idx += 1\n\n return self.all_hint_logits, self.last_logits, self.all_masks_graph\n\n def decode(self, batch, encoded_nodes, hidden, edge_fts, graph_fts):\n catted = torch.cat((encoded_nodes, hidden), dim=1)\n outs = defaultdict(dict)\n for name, (stage, loc, data_type) in self.dataset_spec.items():\n if stage == Stage.INPUT:\n continue\n\n if loc == Location.NODE:\n\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name](catted)\n\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n prod = self.decoders[stage][name][3](to.max(fr+edge)).squeeze(-1)\n if data_type in [Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION] and self.use_sinkhorn:\n prod = torch.maximum(prod, self.decoders[stage][name][3](fr.max(to+edge)).squeeze(-1))\n prod = sinkhorn_normalize(batch, prod, temperature=0.1, steps=10 if self.training else 60, add_noise=self.training)\n outs[stage][name] = prod\n\n if loc == Location.GRAPH:\n aggr_node_fts = torch_scatter.scatter_max(catted, batch.batch, dim=0)[0]\n if data_type in [Type.MASK, Type.SCALAR, Type.CATEGORICAL, Type.MASK_ONE]:\n outs[stage][name] = self.decoders[stage][name][0](aggr_node_fts) + self.decoders[stage][name][1](graph_fts)\n else:\n assert False\n\n if loc == Location.EDGE:\n fr = self.decoders[stage][name][0](catted[batch.edge_index[0]])\n to = self.decoders[stage][name][1](catted[batch.edge_index[1]])\n edge = self.decoders[stage][name][2](edge_fts)\n if data_type in (Type.CATEGORICAL, Type.MASK, Type.SCALAR):\n outs[stage][name] = fr + to + edge\n elif data_type == Type.POINTER:\n pred = fr + to + edge\n pred_2 = self.decoders[stage][name][3](catted)\n ebatch = batch.edge_index_batch\n st = batch.ptr[ebatch]\n en = batch.ptr[ebatch+1]\n dense_pred_2, mask_pred_2 = tg_utils.to_dense_batch(pred_2, batch=batch.batch)\n edge_pred_2 = dense_pred_2[ebatch]\n mask_edge_pred_2 = mask_pred_2[ebatch]\n probs_logits = self.decoders[stage][name][4](torch.maximum(pred[:, None, :], edge_pred_2)).squeeze(-1)\n probs_logits[~mask_edge_pred_2] = -1e9\n outs[stage][name] = probs_logits\n else:\n assert False\n\n return outs\n\n def encode_nodes(self, current_input, last_latent):\n return torch.cat((current_input, last_latent), dim=1)\n\n def forward(self, batch, node_fts, edge_fts, graph_fts, first_n_processors=1000):\n if torch.isnan(node_fts).any():\n breakpoint()\n assert not torch.isnan(self.last_latent).any()\n assert not torch.isnan(node_fts).any()\n if self.timeit:\n st = time.time()\n if self.timeit:\n print(f'projecting nodes: {time.time()-st}')\n\n if self.timeit:\n st = time.time()\n edge_index = batch.edge_index\n hidden, edges_hidden = self.processor(node_fts, edge_fts, graph_fts, edge_index, self.last_latent, self.last_latent_edges, first_n_processors=first_n_processors, batch=batch)\n if self.timeit:\n print(f'message passing: {time.time()-st}')\n assert not torch.isnan(hidden).any()\n if self.timeit:\n st = time.time()\n if self.triplet_reasoning:\n edge_fts = self.triplet_reductor(torch.cat([edge_fts, edges_hidden], dim=-1))\n outs = self.decode(batch, node_fts, hidden, edge_fts, graph_fts)\n if self.timeit:\n print(f'decoding hints: {time.time()-st}')\n continue_logits = torch.where(self.step_idx+1 >= batch.lengths-1, -1e9, 1e9)\n return hidden, edges_hidden, outs, continue_logits" }, { "identifier": "LitAlgorithmReasoner", "path": "models/algorithm_reasoner.py", "snippet": "class LitAlgorithmReasoner(pl.LightningModule):\n def __init__(self,\n hidden_dim,\n algo_processor,\n dataset_class,\n dataset_root,\n dataset_kwargs,\n algorithm='mst_prim',\n update_edges_hidden=False,\n use_TF=False,\n use_sinkhorn=True,\n xavier_on_scalars=True,\n learning_rate=get_hyperparameters()['lr'],\n weight_decay=get_hyperparameters()['weight_decay'],\n test_with_val=False,\n test_with_val_every_n_epoch=20,\n test_train_every_n_epoch=20,\n **algorithm_base_kwargs):\n super().__init__()\n self.hidden_dim = hidden_dim\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.dataset_class = dataset_class\n self.dataset_root = dataset_root\n self.dataset_kwargs = dataset_kwargs\n self.learning_rate = learning_rate\n self.weight_decay = weight_decay\n self.timeit = False\n self.update_edges_hidden = update_edges_hidden\n self.use_TF = use_TF\n self.use_sinkhorn = use_sinkhorn\n self.algorithm_base_kwargs = algorithm_base_kwargs\n self.algorithm = algorithm\n self.xavier_on_scalars = xavier_on_scalars\n self.test_with_val = test_with_val\n self.test_with_val_every_n_epoch = test_with_val_every_n_epoch\n self.test_train_every_n_epoch = test_train_every_n_epoch\n self._datasets = {}\n if self.test_with_val:\n self.val_dataloader = self.val_dataloader_alt\n self.validation_step = self.validation_step_alt\n self._current_epoch = 0\n self.load_dataset('train')\n\n self.algorithm_module = AlgorithmReasoner(self.dataset.spec,\n self.dataset[0],\n hidden_dim,\n algo_processor,\n update_edges_hidden=update_edges_hidden,\n use_TF=use_TF,\n use_sinkhorn=use_sinkhorn,\n timeit=self.timeit,\n xavier_on_scalars=xavier_on_scalars,\n **algorithm_base_kwargs)\n self.save_hyperparameters(ignore=['algo_processor'])\n\n @property\n def current_epoch(self) -> int:\n \"\"\"The current epoch in the ``Trainer``, or 0 if not attached.\"\"\"\n return self.trainer.current_epoch if self._trainer else self._current_epoch\n\n @current_epoch.setter\n def current_epoch(self, epoch) -> int:\n self._current_epoch = epoch\n\n def prepare_for_transfer(self):\n algo_processor = copy.deepcopy(self.algorithm_module.processor)\n self.algorithm_module = AlgorithmReasoner(self.hidden_dim,\n self.node_features,\n self.edge_features,\n self.output_features,\n algo_processor,\n use_TF=False,\n timeit=self.timeit,\n **self.algorithm_base_kwargs)\n for p in self.algorithm_module.processor.parameters():\n p.requires_grad = False\n\n @staticmethod\n def pointer_loss(predecessor_pred, predecessor_gt_edge_1h,\n softmax_idx, num_nodes):\n loss_unreduced = cross_entropy(predecessor_pred, softmax_idx, predecessor_gt_edge_1h, num_nodes)\n sum_loss = loss_unreduced.flatten().sum()\n cnt_loss = predecessor_gt_edge_1h.count_nonzero()\n return sum_loss / cnt_loss\n\n def single_prediction_loss(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n loss = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[graph_mask], pred_gt[graph_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[graph_mask].squeeze(-1),\n pred_gt[graph_mask])\n\n if loc == Location.NODE:\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n pred_gt_one_hot = edge_one_hot_encode_pointers(pred_gt, batch.edge_index)\n loss = type(self).pointer_loss(\n pred[edge_mask],\n pred_gt_one_hot[edge_mask],\n batch.edge_index[0][edge_mask], batch.num_nodes)\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.MASK_ONE:\n lsms = torch_scatter.scatter_log_softmax(pred[node_mask], batch.batch[node_mask].unsqueeze(-1), dim=0)\n loss = (-lsms[(pred_gt[node_mask] == 1.)]).mean()\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[node_mask].squeeze(-1),\n pred_gt[node_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[node_mask], pred_gt[node_mask].argmax(-1))\n if loc == Location.EDGE:\n if data_type == Type.MASK:\n loss = F.binary_cross_entropy_with_logits(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type == Type.CATEGORICAL:\n loss = F.cross_entropy(pred[edge_mask], pred_gt[edge_mask].argmax(-1))\n if data_type == Type.SCALAR:\n loss = F.mse_loss(\n pred[edge_mask].squeeze(-1),\n pred_gt[edge_mask])\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n loss = F.cross_entropy(\n pred[edge_mask],\n pred_gt[edge_mask])\n assert loss is not None, f'{stage}/{name}/{loc}/{data_type}'\n return loss\n\n def get_step_loss(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n if self.timeit:\n st = time.time()\n batch = self.algorithm_module.prepare_batch(batch)\n losses_dict = defaultdict(list)\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n assert graph_mask.any()\n for name in pred:\n stage, loc, data_type = self.dataset.spec[name]\n pred_gt = getattr(batch, name)[i+1]\n losses_dict[name].append(\n self.single_prediction_loss(name, pred[name], pred_gt,\n batch, graph_mask, node_mask,\n edge_mask))\n\n for name in output_logits:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n losses_dict[name].append(\n self.single_prediction_loss(name, output_logits[name],\n getattr(batch, name), batch,\n graph_mask, node_mask, edge_mask))\n\n for k, v in losses_dict.items():\n losses_dict[k] = torch.stack(v).mean()\n if self.timeit:\n print(f'loss calculation: {time.time()-st}')\n input()\n\n return losses_dict\n\n def single_prediction_acc(self, name, pred, pred_gt, batch, graph_mask,\n node_mask, edge_mask):\n acc = None\n stage, loc, data_type = self.dataset.spec[name]\n if loc == Location.NODE:\n if data_type == Type.MASK_ONE:\n # try:\n acc = (pred[node_mask].squeeze(-1).nonzero() == pred_gt[node_mask].nonzero()).float().mean()\n # except Exception as e:\n # breakpoint()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION, Type.MASK]:\n acc = (pred[node_mask].squeeze(-1) == pred_gt[node_mask]).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[node_mask].squeeze(-1) - pred_gt[node_mask])**2).mean()\n if data_type == Type.CATEGORICAL:\n acc = (pred[node_mask].argmax(-1) == pred_gt[node_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[node_mask].squeeze(-1), pred_gt[node_mask])\n\n if loc == Location.GRAPH:\n if data_type == Type.CATEGORICAL:\n acc = (pred[graph_mask].argmax(-1) == pred_gt[graph_mask].argmax(-1)).float().mean()\n if data_type == Type.SCALAR:\n acc = ((pred[graph_mask].squeeze(-1) - pred_gt[graph_mask])**2).mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[graph_mask].squeeze(-1), pred_gt[graph_mask])\n\n if loc == Location.EDGE:\n if data_type == Type.CATEGORICAL:\n acc = (pred[edge_mask].argmax(-1) == pred_gt[edge_mask].argmax(-1)).float().mean()\n if data_type == Type.MASK:\n acc = multiclass_f1_score(pred[edge_mask].squeeze(-1), pred_gt[edge_mask])\n if data_type == Type.SCALAR:\n acc = ((pred[edge_mask].squeeze(-1) - pred_gt[edge_mask])**2).mean()\n if data_type in [Type.POINTER, Type.PERMUTATION_POINTER, Type.SHOULD_BE_PERMUTATION]:\n starts_edge = batch.ptr[:-1][batch.batch[batch.edge_index[0]]]\n pred_gt = pred_gt.int() - starts_edge\n acc = (pred[edge_mask] == pred_gt[edge_mask]).float().mean()\n assert acc is not None, f\"Please implement {name}\"\n return acc\n\n def get_metrics(self,\n batch,\n all_hint_logits,\n output_logits,\n all_masks_graph):\n\n batch = self.algorithm_module.prepare_batch(batch)\n accs_dict = defaultdict(list)\n\n for i, (pred, graph_mask) in enumerate(zip(all_hint_logits, all_masks_graph)):\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec, {'hint': pred},\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['hint']\n\n for name in outputs:\n acc = self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name)[i+1],\n batch,\n graph_mask,\n node_mask,\n edge_mask)\n accs_dict[name].append(acc)\n\n outputs = type(self.algorithm_module).convert_logits_to_outputs(\n self.dataset.spec,\n output_logits,\n batch.edge_index[0],\n batch.edge_index[1],\n batch.num_nodes,\n batch.batch,\n include_probabilities=False)['output']\n for name in outputs:\n graph_mask = torch.ones(batch.num_graphs, dtype=torch.bool, device=self.device)\n node_mask = graph_mask[batch.batch]\n edge_mask = node_mask[batch.edge_index[0]]\n accs_dict[name].append(\n self.single_prediction_acc(\n name,\n outputs[name],\n getattr(batch, name),\n batch,\n graph_mask,\n node_mask,\n edge_mask))\n\n for k, v in accs_dict.items():\n accs_dict[k] = torch.stack(v).mean()\n\n return accs_dict\n\n def fwd_step(self, batch, batch_idx):\n if self.timeit:\n st = time.time()\n self.algorithm_module.epoch = self.current_epoch\n all_hint_logits, output_logits, masks = self.algorithm_module.process(batch)\n if self.timeit:\n print(f'forward step: {time.time()-st}')\n input()\n return all_hint_logits, output_logits, masks\n\n def training_step(self, batch, batch_idx):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'train/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs)\n total_loss = sum(losses_dict.values()) / len(losses_dict)\n self.log('train/loss/average_loss', total_loss, prog_bar=False, on_step=True, on_epoch=True, batch_size=batch.num_graphs)\n accs_dict = {}\n if self.current_epoch % self.test_train_every_n_epoch == 0:\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'train/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n # if sum(losses_dict.values()) > 1e5:\n # breakpoint()\n return {'loss': total_loss, 'losses_dict': losses_dict, 'accuracies': accs_dict}\n\n def valtest_step(self, batch, batch_idx, mode):\n all_hint_logits, output_logits, masks = self.fwd_step(batch, batch_idx)\n losses_dict = self.get_step_loss(batch, all_hint_logits, output_logits['output'], masks)\n self.log_dict(dict((f'{mode}/loss/{k}', v) for k, v in losses_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n if torch.isnan(sum(losses_dict.values())).any():\n breakpoint()\n self.log(f'{mode}/loss/average_loss', sum(losses_dict.values()) / len(losses_dict), batch_size=batch.num_graphs, add_dataloader_idx=False)\n accs_dict = self.get_metrics(batch, all_hint_logits, output_logits, masks)\n self.log_dict(dict((f'{mode}/acc/{k}', v) for k, v in accs_dict.items()), batch_size=batch.num_graphs, add_dataloader_idx=False)\n return {'losses': losses_dict, 'accuracies': accs_dict}\n\n def validation_step_alt(self, batch, batch_idx, dataloader_idx):\n if dataloader_idx == 1 and not self.trainer.state.stage == 'sanity_check' and self.current_epoch % self.test_with_val_every_n_epoch == 0:\n return self.valtest_step(batch, batch_idx, 'periodic_test')\n if dataloader_idx == 0:\n return self.valtest_step(batch, batch_idx, 'val')\n\n def validation_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'val')\n\n def test_step(self, batch, batch_idx):\n return self.valtest_step(batch, batch_idx, 'test')\n\n def predict_step(self, batch, batch_idx):\n return self.fwd_step(batch, batch_idx)\n\n def load_dataset(self, split, suffix=''):\n split = split+suffix\n nn = CONFIGS[self.algorithm][split]['num_nodes']\n self.dataset_kwargs['split'] = split\n if (split, nn) not in self._datasets:\n self._datasets[(split, nn)] = self.dataset_class(\n self.dataset_root,\n nn,\n CONFIGS[self.algorithm][split]['num_samples'],\n algorithm=self.algorithm,\n **self.dataset_kwargs)\n self.dataset = self._datasets[(split, nn)]\n print(f'Loading {self.dataset=} (num nodes: {nn}) with kwargs')\n pprint(self.dataset_kwargs)\n print()\n\n def get_a_loader(self, split, suffix=''):\n self.load_dataset(split, suffix='')\n self.algorithm_module.dataset_spec = self.dataset.spec\n dl = DataLoader(self.dataset,\n batch_size=get_hyperparameters()['batch_size'],\n shuffle=True if split == 'train' else False,\n drop_last=False,\n follow_batch=['edge_index'],\n num_workers=1,\n persistent_workers=True)\n return dl\n\n def train_dataloader(self):\n return self.get_a_loader('train')\n\n def val_dataloader_alt(self):\n return [self.get_a_loader('val'), self.get_a_loader('test')]\n\n def val_dataloader(self):\n return self.get_a_loader('val')\n\n def test_dataloader(self, suffix=''):\n return self.get_a_loader('test'+suffix)\n\n def configure_optimizers(self):\n lr = self.learning_rate\n wd = self.weight_decay\n optimizer = optim.Adam(self.parameters(),\n weight_decay=wd,\n lr=lr)\n return optimizer" }, { "identifier": "get_hyperparameters", "path": "hyperparameters.py", "snippet": "def get_hyperparameters():\n return {\n 'dim_latent': 128,\n 'num_bits': 8,\n 'weight_decay': 0,\n 'lr': 0.0003,\n 'nee_warmup_steps': 4000,\n 'dim_nodes_mst_prim': 1,\n 'dim_target_mst_prim': 1,\n 'device': 'cuda',\n 'batch_size': 64,\n 'bias': True,\n 'seed': 47, # for dataset generation\n 'calculate_termination_statistics': False,\n }" }, { "identifier": "CONFIGS", "path": "datasets/_configs.py", "snippet": "CONFIGS = defaultdict(lambda: _DEFAULT_CONFIG)" }, { "identifier": "cross_entropy", "path": "utils_execution.py", "snippet": "def cross_entropy(pred, softmax_idx, truth_1h, num_nodes):\n lsm_pred = torch.log(torch_geometric.utils.softmax(pred, softmax_idx, num_nodes=num_nodes)+1e-9)\n # truth_1h = F.one_hot(truth, num_nodes)\n return (-truth_1h*lsm_pred)" }, { "identifier": "check_edge_index_sorted", "path": "utils_execution.py", "snippet": "def check_edge_index_sorted(ei):\n for i in range(ei.shape[1]-1):\n assert ei[0][i] <= ei[0][i+1]\n if ei[0][i] == ei[0][i+1]:\n assert ei[1][i] < ei[1][i+1]" }, { "identifier": "prepare_constants", "path": "utils_execution.py", "snippet": "def prepare_constants(batch):\n SIZE = batch.num_nodes\n STEPS_SIZE = batch.lengths.max()-1\n return SIZE, STEPS_SIZE" }, { "identifier": "edge_one_hot_encode_pointers", "path": "utils_execution.py", "snippet": "def edge_one_hot_encode_pointers(pred, edge_index):\n pred_ei = torch.stack((torch.arange(pred.shape[0]).to(pred), pred))\n amat = torch_geometric.utils.to_dense_adj(pred_ei)\n return amat[0, edge_index[0], edge_index[1]]" }, { "identifier": "get_number_of_nodes", "path": "utils_execution.py", "snippet": "def get_number_of_nodes(algorithm, split):\n nns = CONFIGS[algorithm][split]['num_nodes']\n if isinstance(nns, int):\n nns = [nns]\n return nns" } ]
from collections import defaultdict from pprint import pprint from torch_geometric.loader import DataLoader from pytorch_lightning.trainer.supporters import CombinedLoader from baselines.beam_search import vmapped_beam_search_rollout, BEAM_WIDTH from models.algorithm_reasoner import AlgorithmReasoner, LitAlgorithmReasoner from hyperparameters import get_hyperparameters from torch_geometric.utils import k_hop_subgraph from datasets._configs import CONFIGS from utils_execution import cross_entropy, check_edge_index_sorted, prepare_constants, edge_one_hot_encode_pointers, get_number_of_nodes from clrs import Type, Location, Stage import copy import itertools import time import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import torch_scatter import torch_geometric import pytorch_lightning as pl
15,275
u1, v1 = batch.edge_index[:, i] u2, v2 = edges[:, j] if u1 == u2 and v1 == v2: mask[i] = 1 j += 1 if j == edges.shape[1]: break assert j == edges.shape[1] return mask def get_mask_v2(edges): dense_edges = torch_geometric.utils.to_dense_adj(edges, batch=batch.batch).bool() dense_edges_batch = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch).bool() edge_index, mask = torch_geometric.utils.dense_to_sparse(((dense_edges & dense_edges_batch).float()+1)) mask = mask - 1 return mask acc = None # st = time.time() outputs = type(self.algorithm_module).convert_logits_to_outputs( self.dataset.spec, output_logits, batch.edge_index[0], batch.edge_index[1], batch.num_nodes, batch.batch, include_probabilities=False)['output'] for name in outputs: pred = outputs[name] pred_gt = getattr(batch, name) stage, loc, data_type = self.dataset.spec[name] if loc == Location.NODE: if name == 'predecessor_index': tours = torch.stack([torch.arange(pred.shape[0]).to(pred), pred]) mask = get_mask_v2(tours).bool() st = time.time() mattr = batch.edge_attr[mask] mbatch = batch.edge_index_batch[mask] msrc, mdst = batch.edge_index[:, mask] tour_len = torch_scatter.scatter_sum(mattr, mbatch) tour_correctness = torch_scatter.scatter_sum((msrc == mdst.sort().values), mbatch) assert sum(tour_correctness)/len(tour_correctness) == 1 return dict(tour_len=tour_len.mean(), tour_len_gt=batch.optimal_value.mean().item(), tour_correctness=sum(tour_correctness)/len(tour_correctness), tour_relative_error=((tour_len-batch.optimal_value)/batch.optimal_value).mean()) def process_TSP_tour_greedy(self, batch, output_logits): mask_active_nodes = torch.tensor(batch.start_route).bool() mask_edges_to_nodes_in_tour = torch.zeros_like(batch.edge_index[0]).bool() max_nodes_per_graph = batch.batch.unique(return_counts=True)[1].max() num_nodes_per_graph = batch.num_nodes // batch.num_graphs for _ in range(max_nodes_per_graph - 1): mask_active_edges = mask_active_nodes[batch.edge_index[0]] & ~mask_edges_to_nodes_in_tour # Any edge outwards of active nodes and not pointing to previously used node mask_edges_to_nodes_in_tour |= mask_active_nodes[batch.edge_index[1]] # any edge towards the active nodes should not be used in future iterations sloops = (batch.edge_index[0] == batch.edge_index[1]) preds = output_logits['output']['predecessor_index'].clone() preds = preds.masked_fill(~mask_active_edges | sloops, -1e6) # nudge the max value to ensure there is a unique maximum max_idxs = preds.reshape(-1, num_nodes_per_graph).argmax(-1) max_idxs = F.one_hot(max_idxs, num_nodes_per_graph) preds[max_idxs.bool().flatten()] = (preds.reshape(-1, num_nodes_per_graph)[max_idxs.bool()] + 1e-4).flatten() output_logits['output']['predecessor_index'][mask_active_nodes[batch.edge_index[0]]] = preds[mask_active_nodes[batch.edge_index[0]]] new_active_nodes = preds.reshape(-1, num_nodes_per_graph).argmax(-1)[mask_active_nodes.bool()].unsqueeze(-1) # NOTE the reshape/flatten mechanic may not work if graphs in the same batch are of different sizes (consider using torch_scatter.scatter_max) mask_active_nodes = F.one_hot(new_active_nodes, num_nodes_per_graph).flatten().bool() final_pred_mask = mask_active_nodes[batch.edge_index[0]] & batch.start_route.bool()[batch.edge_index[1]] output_logits['output']['predecessor_index'] = output_logits['output']['predecessor_index'].masked_fill(final_pred_mask, 1e8) return output_logits def process_TSP_tour_BS(self, batch, output_logits): start_route = torch_geometric.utils.to_dense_batch(batch.start_route, batch=batch.batch)[0] dens_logits = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch, edge_attr=output_logits['output']['predecessor_index']) num_nodes = start_route.shape[1] # st = time.time() tours = torch.tensor(np.array(vmapped_beam_search_rollout( start_route.cpu().detach().numpy(), -dens_logits.cpu().detach().numpy(), num_nodes, BEAM_WIDTH)), device=start_route.device) # print('tours took', time.time()-st) # st = time.time() dens_logits_o = torch.full_like(dens_logits, -1e9) arranged = torch.arange(dens_logits_o.shape[0], device=dens_logits.device) fr = tours[arranged, 0] to = tours[arranged, 1] batch_id = arranged.unsqueeze(1).expand_as(fr) fr = fr.flatten() to = to.flatten() batch_id = batch_id.flatten() dens_logits_o[batch_id, fr, to] = 1e9 edge_index, sparse_logits = torch_geometric.utils.dense_to_sparse(dens_logits_o) sparse_logits = sparse_logits.to(batch.edge_index.device) assert (edge_index == batch.edge_index).all() output_logits['output']['predecessor_index'] = sparse_logits # print('rest took', time.time()-st) return output_logits def process_TSP_tour(self, batch, output_logits): if self.ensure_permutation == "greedy": return self.process_TSP_tour_greedy(batch, output_logits) return self.process_TSP_tour_BS(batch, output_logits) def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): output_logits = self.process_TSP_tour(batch, output_logits) accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_tour_metrics(output_logits, batch)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix
class TSPReasoner(AlgorithmReasoner): def __init__(self, spec, data, latent_features, algo_processor, bias=True, use_TF=False, L1_loss=False, global_termination_pool='max', #'predinet', get_attention=False, use_batch_norm=False, transferring=False, timeit=True, double_process=False, **algo_reasoner_kwargs): super().__init__( spec, data, latent_features, algo_processor, use_TF=use_TF, timeit=timeit, L1_loss=L1_loss, global_termination_pool=global_termination_pool, get_attention=get_attention, use_batch_norm=use_batch_norm, transferring=transferring, **algo_reasoner_kwargs, ) self.step_idx = 0 self.assert_checks = False self.debug = False self.debug_epoch_threshold = 1e9 self.next_step_pool = True self.double_process = double_process self.lambda_mul = 1# 0.0001 self.transferring = transferring def get_input_output_hints(self, batch): hint_inp_curr = dict() hint_out_curr = dict() return hint_inp_curr, hint_out_curr def process( self, *args, **kwargs): self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, first_n_processors=1000 if not self.double_process else 1, **kwargs) if self.double_process: self.all_hint_logits, self.last_logits, self.all_masks_graph = super().process( *args, init_last_latent=self.last_latent, **kwargs) return self.all_hint_logits, self.last_logits, self.all_masks_graph class LitTSPReasoner(LitAlgorithmReasoner): def __init__(self, hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=True, use_TF=False, ensure_permutation='greedy', transferring=False, learning_rate=get_hyperparameters()['lr'], double_process=False, **algo_reasoner_kwargs): super().__init__(hidden_dim, algo_processor, dataset_class, dataset_root, dataset_kwargs, bias=bias, use_TF=use_TF, transferring=transferring, learning_rate=learning_rate, **algo_reasoner_kwargs) self.algorithm_module = TSPReasoner(self.dataset.spec, self.dataset[0], hidden_dim, algo_processor, bias=bias, use_TF=use_TF, transferring=transferring, timeit=self.timeit, double_process=double_process, **algo_reasoner_kwargs) self.ensure_permutation = ensure_permutation self.double_process = double_process self.save_hyperparameters(ignore=['algo_processor']) def training_step(self, batch, batch_idx): ret = {'loss': 0, 'losses_dict': defaultdict(list), 'accuracies': defaultdict(list)} for bb in batch: ans = super().training_step(bb, batch_idx) ret['loss'] += ans['loss'] for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k].append(v) ret['loss'] /= len(batch) for name in ['losses_dict', 'accuracies']: for k, v in ans[name].items(): ret[name][k] = torch.tensor(v).mean() return ret def get_tour_metrics(self, output_logits, batch): def get_mask(edges): mask = torch.zeros_like(batch.edge_index[0]) j = 0 for i in range(batch.edge_index.shape[1]): u1, v1 = batch.edge_index[:, i] u2, v2 = edges[:, j] if u1 == u2 and v1 == v2: mask[i] = 1 j += 1 if j == edges.shape[1]: break assert j == edges.shape[1] return mask def get_mask_v2(edges): dense_edges = torch_geometric.utils.to_dense_adj(edges, batch=batch.batch).bool() dense_edges_batch = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch).bool() edge_index, mask = torch_geometric.utils.dense_to_sparse(((dense_edges & dense_edges_batch).float()+1)) mask = mask - 1 return mask acc = None # st = time.time() outputs = type(self.algorithm_module).convert_logits_to_outputs( self.dataset.spec, output_logits, batch.edge_index[0], batch.edge_index[1], batch.num_nodes, batch.batch, include_probabilities=False)['output'] for name in outputs: pred = outputs[name] pred_gt = getattr(batch, name) stage, loc, data_type = self.dataset.spec[name] if loc == Location.NODE: if name == 'predecessor_index': tours = torch.stack([torch.arange(pred.shape[0]).to(pred), pred]) mask = get_mask_v2(tours).bool() st = time.time() mattr = batch.edge_attr[mask] mbatch = batch.edge_index_batch[mask] msrc, mdst = batch.edge_index[:, mask] tour_len = torch_scatter.scatter_sum(mattr, mbatch) tour_correctness = torch_scatter.scatter_sum((msrc == mdst.sort().values), mbatch) assert sum(tour_correctness)/len(tour_correctness) == 1 return dict(tour_len=tour_len.mean(), tour_len_gt=batch.optimal_value.mean().item(), tour_correctness=sum(tour_correctness)/len(tour_correctness), tour_relative_error=((tour_len-batch.optimal_value)/batch.optimal_value).mean()) def process_TSP_tour_greedy(self, batch, output_logits): mask_active_nodes = torch.tensor(batch.start_route).bool() mask_edges_to_nodes_in_tour = torch.zeros_like(batch.edge_index[0]).bool() max_nodes_per_graph = batch.batch.unique(return_counts=True)[1].max() num_nodes_per_graph = batch.num_nodes // batch.num_graphs for _ in range(max_nodes_per_graph - 1): mask_active_edges = mask_active_nodes[batch.edge_index[0]] & ~mask_edges_to_nodes_in_tour # Any edge outwards of active nodes and not pointing to previously used node mask_edges_to_nodes_in_tour |= mask_active_nodes[batch.edge_index[1]] # any edge towards the active nodes should not be used in future iterations sloops = (batch.edge_index[0] == batch.edge_index[1]) preds = output_logits['output']['predecessor_index'].clone() preds = preds.masked_fill(~mask_active_edges | sloops, -1e6) # nudge the max value to ensure there is a unique maximum max_idxs = preds.reshape(-1, num_nodes_per_graph).argmax(-1) max_idxs = F.one_hot(max_idxs, num_nodes_per_graph) preds[max_idxs.bool().flatten()] = (preds.reshape(-1, num_nodes_per_graph)[max_idxs.bool()] + 1e-4).flatten() output_logits['output']['predecessor_index'][mask_active_nodes[batch.edge_index[0]]] = preds[mask_active_nodes[batch.edge_index[0]]] new_active_nodes = preds.reshape(-1, num_nodes_per_graph).argmax(-1)[mask_active_nodes.bool()].unsqueeze(-1) # NOTE the reshape/flatten mechanic may not work if graphs in the same batch are of different sizes (consider using torch_scatter.scatter_max) mask_active_nodes = F.one_hot(new_active_nodes, num_nodes_per_graph).flatten().bool() final_pred_mask = mask_active_nodes[batch.edge_index[0]] & batch.start_route.bool()[batch.edge_index[1]] output_logits['output']['predecessor_index'] = output_logits['output']['predecessor_index'].masked_fill(final_pred_mask, 1e8) return output_logits def process_TSP_tour_BS(self, batch, output_logits): start_route = torch_geometric.utils.to_dense_batch(batch.start_route, batch=batch.batch)[0] dens_logits = torch_geometric.utils.to_dense_adj(batch.edge_index, batch=batch.batch, edge_attr=output_logits['output']['predecessor_index']) num_nodes = start_route.shape[1] # st = time.time() tours = torch.tensor(np.array(vmapped_beam_search_rollout( start_route.cpu().detach().numpy(), -dens_logits.cpu().detach().numpy(), num_nodes, BEAM_WIDTH)), device=start_route.device) # print('tours took', time.time()-st) # st = time.time() dens_logits_o = torch.full_like(dens_logits, -1e9) arranged = torch.arange(dens_logits_o.shape[0], device=dens_logits.device) fr = tours[arranged, 0] to = tours[arranged, 1] batch_id = arranged.unsqueeze(1).expand_as(fr) fr = fr.flatten() to = to.flatten() batch_id = batch_id.flatten() dens_logits_o[batch_id, fr, to] = 1e9 edge_index, sparse_logits = torch_geometric.utils.dense_to_sparse(dens_logits_o) sparse_logits = sparse_logits.to(batch.edge_index.device) assert (edge_index == batch.edge_index).all() output_logits['output']['predecessor_index'] = sparse_logits # print('rest took', time.time()-st) return output_logits def process_TSP_tour(self, batch, output_logits): if self.ensure_permutation == "greedy": return self.process_TSP_tour_greedy(batch, output_logits) return self.process_TSP_tour_BS(batch, output_logits) def get_metrics(self, batch, all_hint_logits, output_logits, all_masks_graph): output_logits = self.process_TSP_tour(batch, output_logits) accs_dict = super().get_metrics(batch, all_hint_logits, output_logits, all_masks_graph) accs_dict.update(**self.get_tour_metrics(output_logits, batch)) return accs_dict def load_dataset(self, split, suffix=''): split = split+suffix
nns = get_number_of_nodes(self.algorithm, split)
9
2023-11-20 15:32:43+00:00
24k
bearyi26/DCPT
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n CVPR, 2019\n https://arxiv.org/pdf/1809.07845.pdf\n\n Download the dataset from https://cis.temple.edu/lasot/download.html\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_dir if root is None else root\n super().__init__('LaSOT', root, image_loader)\n\n # Keep a list of all classes\n self.class_list = [f for f in os.listdir(self.root)]\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n sequence_list = pandas.read_csv(file_path, header=None).squeeze(\"columns\").values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n with open(out_of_view_file, 'r') as f:\n out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k", "path": "lib/train/dataset/got10k.py", "snippet": "class Got10k(BaseVideoDataset):\n \"\"\" GOT-10k dataset.\n\n Publication:\n GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n Lianghua Huang, Xin Zhao, and Kaiqi Huang\n arXiv:1810.11981, 2018\n https://arxiv.org/pdf/1810.11981.pdf\n\n Download dataset from http://got-10k.aitestunion.com/downloads\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().got10k_dir if root is None else root\n super().__init__('GOT10k', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze(\"columns\").values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\n return sequence_meta_info\n\n def _read_meta(self, seq_path):\n try:\n with open(os.path.join(seq_path, 'meta_info.ini')) as f:\n meta_info = f.readlines()\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\n 'motion_class': meta_info[6].split(': ')[-1][:-1],\n 'major_class': meta_info[7].split(': ')[-1][:-1],\n 'root_class': meta_info[8].split(': ')[-1][:-1],\n 'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n with open(os.path.join(self.root, 'list.txt')) as f:\n dir_list = list(csv.reader(f))\n dir_list = [dir_name[0] for dir_name in dir_list]\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n with open(cover_file, 'r', newline='') as f:\n cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(self.root, self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "TrackingNet", "path": "lib/train/dataset/tracking_net.py", "snippet": "class TrackingNet(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_dir if root is None else root\n super().__init__('TrackingNet', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root, self.set_ids)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\n low_memory=False).values\n return torch.tensor(gt)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\n return self.image_loader(frame_path)\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID", "path": "lib/train/dataset/imagenetvid.py", "snippet": "class ImagenetVID(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid\", root, image_loader)\n\n cache_file = os.path.join(root, 'cache.json')\n if os.path.isfile(cache_file):\n # If available, load the pre-processed cache file containing meta-info for each sequence\n with open(cache_file, 'r') as f:\n sequence_list_dict = json.load(f)\n\n self.sequence_list = sequence_list_dict\n else:\n # Else process the imagenet annotations and generate the cache file\n self.sequence_list = self._process_anno(root)\n\n with open(cache_file, 'w') as f:\n json.dump(self.sequence_list, f)\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return self.image_loader(frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta\n\n def _process_anno(self, root):\n # Builds individual tracklets\n base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\n\n all_sequences = []\n for set in sorted(os.listdir(base_vid_anno_path)):\n set_id = int(set.split('_')[-1])\n for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\n\n vid_id = int(vid.split('_')[-1])\n anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\n\n frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\n image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\n\n objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\n for f in anno_files]\n\n tracklets = {}\n\n # Find all tracklets along with start frame\n for f_id, all_targets in enumerate(objects):\n for target in all_targets:\n tracklet_id = target.find('trackid').text\n if tracklet_id not in tracklets:\n tracklets[tracklet_id] = f_id\n\n for tracklet_id, tracklet_start in tracklets.items():\n tracklet_anno = []\n target_visible = []\n class_name_id = None\n\n for f_id in range(tracklet_start, len(objects)):\n found = False\n for target in objects[f_id]:\n if target.find('trackid').text == tracklet_id:\n if not class_name_id:\n class_name_id = target.find('name').text\n x1 = int(target.find('bndbox/xmin').text)\n y1 = int(target.find('bndbox/ymin').text)\n x2 = int(target.find('bndbox/xmax').text)\n y2 = int(target.find('bndbox/ymax').text)\n\n tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\n target_visible.append(target.find('occluded').text == '0')\n\n found = True\n break\n if not found:\n break\n\n new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\n 'start_frame': tracklet_start, 'anno': tracklet_anno,\n 'target_visible': target_visible, 'image_size': image_size}\n all_sequences.append(new_sequence)\n\n return all_sequences" }, { "identifier": "MSCOCOSeq", "path": "lib/train/dataset/coco_seq.py", "snippet": "class MSCOCOSeq(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n # Load the COCO set.\n self.coco_set = COCO(self.anno_path)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "BDD100K_Night", "path": "lib/train/dataset/bdd100k_night.py", "snippet": "class BDD100K_Night(BaseVideoDataset):\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None):\n root = env_settings().bdd100k_dir if root is None else root\n super().__init__('bdd100k_night', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/')\n self.anno_path = os.path.join(root, 'annotations/bdd100k_night.json')\n\n # load dataset\n self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()\n self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)\n if not self.anno_path == None:\n print('loading annotations into memory...')\n tic = time.time()\n with open(self.anno_path, 'r') as f:\n dataset = json.load(f)\n print('Done (t={:0.2f}s)'.format(time.time()- tic))\n self.dataset = dataset\n self.sequence_list = self._get_sequence_list()\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n\n #得到序列\n def _get_sequence_list(self):\n anns = {}\n for picture in self.dataset:\n for box in picture['labels']:\n anns[box['id']] = box\n anns[box['id']]['name'] = picture['name']\n self.anns = anns\n\n #anns对应的是每一个框\n seq_list = list(anns.keys())\n\n return seq_list\n\n def _get_anno(self, seq_id):\n anno = self.anns[self.sequence_list[seq_id]]\n return anno\n\n\n #得到图片帧\n def _get_frames(self, seq_id):\n path = self.anns[self.sequence_list[seq_id]]['name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n #得到每一帧的bounding box\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n x = anno['box2d']['x1']\n y = anno['box2d']['y1']\n width = anno['box2d']['x2'] - anno['box2d']['x1']\n height = anno['box2d']['y2'] - anno['box2d']['y1']\n\n bbox = torch.Tensor([x,y,width,height]).view(1, 4)\n\n '''v0.4 BDD100K_Night avoid too small bounding boxes'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def is_video_sequence(self):\n return False\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # BDD100K is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta\n\n def get_name(self):\n return 'bdd100k_night'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.anns[self.sequence_list[seq_id]]['category']\n object_meta = OrderedDict({'object_class_name': cat_dict_current,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta" }, { "identifier": "SHIFT_Night", "path": "lib/train/dataset/shift_night.py", "snippet": "class SHIFT_Night(BaseVideoDataset):\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None):\n \"\"\"\n SHIFT_NIGHT Dataset\n \"\"\"\n root = env_settings().shift_dir if root is None else root\n super().__init__('shift_night', root, image_loader)\n\n sequence_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n sequence_path = os.path.join(sequence_path, 'data_specs', 'shift_info_1fps.json')\n with open(sequence_path, 'r') as f:\n info = json.load(f)\n self.info = info\n\n self.sequence_list = self._build_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n def _build_sequence_list(self):\n sequence_list = [sequence for sequence in self.info.keys()]\n return sequence_list\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n video_name = seq_name.split('/')[0]\n return os.path.join(self.root, video_name), seq_name\n\n def _get_frame_path(self, seq_path, seq_name, frame_id):\n frame = self.info[seq_name]['frame'][frame_id]\n return os.path.join(seq_path, frame) # frames extracted from info.json\n\n def _get_frame(self, seq_path, seq_name, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, seq_name, frame_id))\n\n def _read_bb_anno(self, seq_path, seq_name):\n bbox_all = []\n for bbox in self.info[seq_name]['box2d']:\n x = bbox['x1']\n y = bbox['y1']\n width = bbox['x2'] - bbox['x1']\n height = bbox['y2'] - bbox['y1']\n bbox_np = np.array([[x,y,width,height]])\n bbox_all.append(bbox_np)\n bbox_all_np = np.concatenate([bbox for bbox in bbox_all],axis=0)\n return torch.tensor(bbox_all_np)\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n seq_path, seq_name = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path, seq_name)\n\n '''v0.4 Shift avoid too small bounding boxes'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def get_name(self):\n return 'shift_night'\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path, seq_name = self._get_sequence_path(seq_id)\n\n frame_list = [self._get_frame(seq_path, seq_name, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': self.info[seq_name]['category'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ExDark", "path": "lib/train/dataset/exdark.py", "snippet": "class ExDark(BaseVideoDataset):\n \"\"\" The ExDark dataset. ExDark is an image dataset. Thus, we treat each image as a sequence of length 1.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n \"\"\"\n root = env_settings().exdark_dir if root is None else root\n super().__init__('exdark', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/')\n self.anno_path = os.path.join(root, 'annotations/annotations.json')\n\n # Load the COCO set.\n self.coco_set = COCO(self.anno_path)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'exdark'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''v0.4 ExDark avoid too small bounding boxes'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # ExDark is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k_lmdb", "path": "lib/train/dataset/got10k_lmdb.py", "snippet": "class Got10k_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n use_lmdb - whether the dataset is stored in lmdb format\n \"\"\"\n root = env_settings().got10k_lmdb_dir if root is None else root\n super().__init__('GOT10k_lmdb', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n def _read_meta(meta_info):\n\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1],\n 'motion_class': meta_info[6].split(': ')[-1],\n 'major_class': meta_info[7].split(': ')[-1],\n 'root_class': meta_info[8].split(': ')[-1],\n 'motion_adverb': meta_info[9].split(': ')[-1]})\n\n return object_meta\n sequence_meta_info = {}\n for s in self.sequence_list:\n try:\n meta_str = decode_str(self.root, \"train/%s/meta_info.ini\" %s)\n sequence_meta_info[s] = _read_meta(meta_str.split('\\n'))\n except:\n sequence_meta_info[s] = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return sequence_meta_info\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n dir_str = decode_str(self.root, 'train/list.txt')\n dir_list = dir_str.split('\\n')\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line in got10k is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # full occlusion and out_of_view files\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n # Read these files\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split('\\n')[:-1])) # the last line in got10k is empty\n occlusion = torch.ByteTensor(occ_list)\n cover_list = list(map(int, decode_str(self.root, cover_file).split('\\n')[:-1])) # the last line in got10k is empty\n cover = torch.ByteTensor(cover_list)\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(\"train\", self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "Lasot_lmdb", "path": "lib/train/dataset/lasot_lmdb.py", "snippet": "class Lasot_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_lmdb_dir if root is None else root\n super().__init__('LaSOT_lmdb', root, image_loader)\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]\n self.class_list = []\n for ele in class_list:\n if ele not in self.class_list:\n self.class_list.append(ele)\n # Keep a list of all classes\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))\n occlusion = torch.ByteTensor(occ_list)\n out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))\n out_of_view = torch.ByteTensor(out_view_list)\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID_lmdb", "path": "lib/train/dataset/imagenetvid_lmdb.py", "snippet": "class ImagenetVID_lmdb(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid_lmdb\", root, image_loader)\n\n sequence_list_dict = decode_json(root, \"cache.json\")\n self.sequence_list = sequence_list_dict\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid_lmdb'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return decode_img(self.root, frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "MSCOCOSeq_lmdb", "path": "lib/train/dataset/coco_seq_lmdb.py", "snippet": "class MSCOCOSeq_lmdb(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO_lmdb', root, image_loader)\n self.root = root\n self.img_pth = 'images/{}{}/'.format(split, version)\n self.anno_path = 'annotations/instances_{}{}.json'.format(split, version)\n\n # Load the COCO set.\n print('loading annotations into memory...')\n tic = time.time()\n coco_json = decode_json(root, self.anno_path)\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n\n self.coco_set = COCO(coco_json)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n # img = self.image_loader(os.path.join(self.img_pth, path))\n img = decode_img(self.root, os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "TrackingNet_lmdb", "path": "lib/train/dataset/tracking_net_lmdb.py", "snippet": "class TrackingNet_lmdb(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_lmdb_dir if root is None else root\n super().__init__('TrackingNet_lmdb', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n gt_str_list = decode_str(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"anno\", vid_name + \".txt\")).split('\\n')[:-1]\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n return decode_img(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"frames\", vid_name, str(frame_id) + \".jpg\"))\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\n train_cls=False, pos_prob=0.5):\n def __len__(self):\n def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None,\n allow_invisible=False, force_invisible=False):\n def __getitem__(self, index):\n def getitem(self):\n def getitem_cls(self):\n def get_center_box(self, H, W, ratio=1/8):\n def sample_seq_from_dataset(self, dataset, is_video_dataset):\n def get_one_search(self):\n def get_frame_ids_trident(self, visible):\n def get_frame_ids_stark(self, visible, valid):\nclass TrackingSampler(torch.utils.data.Dataset):\n H, W, _ = template_frames[0].shape\n H, W, _ = template_frames[0].shape\n H, W, _ = search_frames[0].shape" }, { "identifier": "processing", "path": "lib/train/data/processing.py", "snippet": "def stack_tensors(x):\n def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None, joint_transform=None):\n def __call__(self, data: TensorDict):\n def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor,\n mode='pair', settings=None, *args, **kwargs):\n def _get_jittered_box(self, box, mode):\n def __call__(self, data: TensorDict):\nclass BaseProcessing:\nclass STARKProcessing(BaseProcessing):" }, { "identifier": "LTRLoader", "path": "lib/train/data/loader.py", "snippet": "class LTRLoader(torch.utils.data.dataloader.DataLoader):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n\n Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\n select along which dimension the data should be stacked to form a batch.\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset. If specified, ``shuffle`` must be False.\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\n indices at a time. Mutually exclusive with batch_size, shuffle,\n sampler, and drop_last.\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process.\n (default: 0)\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n into CUDA pinned memory before returning them.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n timeout (numeric, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative. (default: 0)\n worker_init_fn (callable, optional): If not None, this will be called on each\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n input, after seeding and before data loading. (default: None)\n\n .. note:: By default, each worker will have its PyTorch seed set to\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\n by main process using its RNG. However, seeds for other libraries\n may be duplicated upon initializing workers (w.g., NumPy), causing\n each worker to return identical random numbers. (See\n :ref:`dataloader-workers-random-seed` section in FAQ.) You may\n use ``torch.initial_seed()`` to access the PyTorch seed for each\n worker in :attr:`worker_init_fn`, and use it to set other seeds\n before data loading.\n\n .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\n unpicklable object, e.g., a lambda function.\n \"\"\"\n\n __initialized = False\n\n def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None):\n if collate_fn is None:\n if stack_dim == 0:\n collate_fn = ltr_collate\n elif stack_dim == 1:\n collate_fn = ltr_collate_stack1\n else:\n raise ValueError('Stack dim no supported. Must be 0 or 1.')\n\n super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n num_workers, collate_fn, pin_memory, drop_last,\n timeout, worker_init_fn)\n\n self.name = name\n self.training = training\n self.epoch_interval = epoch_interval\n self.stack_dim = stack_dim" }, { "identifier": "opencv_loader", "path": "lib/train/data/image_loader.py", "snippet": "def opencv_loader(path):\n \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\n try:\n im = cv.imread(path, cv.IMREAD_COLOR)\n\n # convert to rgb and return\n return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n except Exception as e:\n print('ERROR: Could not read image \"{}\"'.format(path))\n print(e)\n return None" }, { "identifier": "is_main_process", "path": "lib/utils/misc.py", "snippet": "def is_main_process():\n return get_rank() == 0" } ]
import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet, BDD100K_Night, SHIFT_Night, ExDark from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
21,081
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else:
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", "BDD100K_NIGHT", "SHIFT_NIGHT", "ExDark"] if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else:
datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader))
1
2023-11-20 06:41:15+00:00
24k
shercoo/RGDiffSR
ldm/models/diffusion/ddpm.py
[ { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=10):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new(\"RGB\", wh, color=\"white\")\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(40 * (wh[0] / 256))\n lines = \"\\n\".join(xc[bi][start:start + nc] for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill=\"black\", font=font)\n except UnicodeEncodeError:\n print(\"Cant encode string for logging. Skipping.\")\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(f\"{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.\")\n return total_params" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not \"target\" in config:\n if config == '__is_first_stage__':\n return None\n elif config == \"__is_unconditional__\":\n return None\n raise KeyError(\"Expected key `target` to instantiate.\")\n return get_obj_from_str(config[\"target\"])(**config.get(\"params\", dict()))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates\n else torch.tensor(-1,dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.','')\n self.m_name2s_name.update({name:s_name})\n self.register_buffer(s_name,p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self,model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(m_param[key])\n shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, \"at least one argument must be a Tensor\"\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (\n -1.0\n + logvar2\n - logvar1\n + torch.exp(logvar1 - logvar2)\n + ((mean1 - mean2) ** 2) * torch.exp(-logvar2)\n )" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(torch.pow(self.mean, 2)\n + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1,2,3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n # print('************************encoder shape',x.shape)\n\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key=\"image\",\n colorize_nlabels=None,\n monitor=None,\n ):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n assert ddconfig[\"double_z\"]\n self.quant_conv = torch.nn.Conv2d(2*ddconfig[\"z_channels\"], 2*embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig[\"z_channels\"], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels)==int\n self.register_buffer(\"colorize\", torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n sd = torch.load(path, map_location=\"cpu\")[\"state_dict\"]\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print(\"Deleting key {} from state_dict.\".format(k))\n del sd[k]\n self.load_state_dict(sd, strict=False)\n print(f\"Restored from {path}\")\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def forward(self, input, sample_posterior=True):\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1, 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n self.log(\"aeloss\", aeloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_ae, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, optimizer_idx, self.global_step,\n last_layer=self.get_last_layer(), split=\"train\")\n\n self.log(\"discloss\", discloss, prog_bar=True, logger=True, on_step=True, on_epoch=True)\n self.log_dict(log_dict_disc, prog_bar=False, logger=True, on_step=True, on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(inputs, reconstructions, posterior, 0, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n discloss, log_dict_disc = self.loss(inputs, reconstructions, posterior, 1, self.global_step,\n last_layer=self.get_last_layer(), split=\"val\")\n\n self.log(\"val/rec_loss\", log_dict_ae[\"val/rec_loss\"])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(list(self.encoder.parameters())+\n list(self.decoder.parameters())+\n list(self.quant_conv.parameters())+\n list(self.post_quant_conv.parameters()),\n lr=lr, betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(self.loss.discriminator.parameters(),\n lr=lr, betas=(0.5, 0.9))\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n log[\"samples\"] = self.decode(torch.randn_like(posterior.sample()))\n log[\"reconstructions\"] = xrec\n log[\"inputs\"] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == \"segmentation\"\n if not hasattr(self, \"colorize\"):\n self.register_buffer(\"colorize\", torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2.*(x-x.min())/(x.max()-x.min()) - 1.\n return x" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule, n_timestep, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):\n if schedule == \"linear\":\n betas = (\n torch.linspace(linear_start ** 0.5, linear_end ** 0.5, n_timestep, dtype=torch.float64) ** 2\n )\n\n elif schedule == \"cosine\":\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep + cosine_s\n )\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == \"sqrt_linear\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == \"sqrt\":\n betas = torch.linspace(linear_start, linear_end, n_timestep, dtype=torch.float64) ** 0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1,) * (len(x_shape) - 1)))" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(shape[0], *((1,) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n def __init__(self, model, schedule=\"linear\", **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device(\"cuda\"):\n attr = attr.to(torch.device(\"cuda\"))\n setattr(self, name, attr)\n\n def make_schedule(self, ddim_num_steps, ddim_discretize=\"uniform\", ddim_eta=0., verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(ddim_discr_method=ddim_discretize, num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model.device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev', to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas', np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) * (\n 1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps', sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs\n ):\n\n\n if conditioning is not None:\n if isinstance(conditioning, dict):\n if isinstance(list(conditioning.values())[0],list):\n cbs = conditioning[list(conditioning.keys())[0]][0].shape[0]\n else:\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(f\"Warning: Got {cbs} conditionings but batch-size is {batch_size}\")\n else:\n if conditioning.shape[0] != batch_size:\n print(f\"Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}\")\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for DDIM sampling is {size}, eta {eta}')\n\n samples, intermediates = self.ddim_sampling(conditioning, size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask, x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self, cond, shape,\n x_T=None, ddim_use_original_steps=False,\n callback=None, timesteps=None, quantize_denoised=False,\n mask=None, x0=None, img_callback=None, log_every_t=100,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None,):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(min(timesteps / self.ddim_timesteps.shape[0], 1) * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(0,timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[0]\n print(f\"Running DDIM Sampling with {total_steps} timesteps\")\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b,), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(img, cond, ts, index=index, use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised, temperature=temperature,\n noise_dropout=noise_dropout, score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self, x, c, t, index, repeat_noise=False, use_original_steps=False, quantize_denoised=False,\n temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None,\n unconditional_guidance_scale=1., unconditional_conditioning=None):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in, c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == \"eps\"\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c, **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1), sqrt_one_minus_alphas[index],device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device, repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0" }, { "identifier": "Attention_AR_counter", "path": "text_super_resolution/model/VisionLAN/utils.py", "snippet": "class Attention_AR_counter():\n def __init__(self, display_string, dict_file, case_sensitive):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n self.display_string = display_string\n self.case_sensitive = case_sensitive\n self.de = cha_encdec(dict_file, case_sensitive)\n\n def clear(self):\n self.correct = 0\n self.total_samples = 0.\n self.distance_C = 0\n self.total_C = 0.\n self.distance_W = 0\n self.total_W = 0.\n \n def add_iter(self, output, out_length, label_length, labels):\n self.total_samples += label_length.size()[0]\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n for i in range(0, len(prdt_texts)):\n if not self.case_sensitive:\n prdt_texts[i] = prdt_texts[i].lower()\n labels[i] = labels[i].lower()\n all_words = []\n for w in labels[i].split('|') + prdt_texts[i].split('|'):\n if w not in all_words:\n all_words.append(w)\n l_words = [all_words.index(_) for _ in labels[i].split('|')]\n p_words = [all_words.index(_) for _ in prdt_texts[i].split('|')]\n self.distance_C += ed.eval(labels[i], prdt_texts[i])\n self.distance_W += ed.eval(l_words, p_words)\n self.total_C += len(labels[i])\n self.total_W += len(l_words)\n self.correct = self.correct + 1 if labels[i] == prdt_texts[i] else self.correct\n return prdt_texts, labels\n\n def show(self):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W))\n self.clear()\n def show_test(self,best_acc, change= False):\n print(self.display_string)\n if self.total_samples == 0:\n pass\n if (self.correct / self.total_samples) > best_acc:\n best_acc = np.copy(self.correct / self.total_samples)\n change = True\n print('Accuracy: {:.6f}, AR: {:.6f}, CER: {:.6f}, WER: {:.6f}, best_acc: {:.6f}'.format(\n self.correct / self.total_samples,\n 1 - self.distance_C / self.total_C,\n self.distance_C / self.total_C,\n self.distance_W / self.total_W, best_acc))\n\n self.clear()\n return best_acc, change\n \n def convert(self, output, out_length):\n prdt_texts, prdt_prob = self.de.decode(output, out_length)\n prdt_prob = prdt_prob.cpu().unsqueeze(0)\n MAX_LEN = 25\n length = prdt_prob.size(1)\n if length >= MAX_LEN:\n return prdt_prob[:, :MAX_LEN, :], prdt_prob\n pad = torch.zeros([prdt_prob.shape[0], MAX_LEN - length, prdt_prob.shape[2]])\n prdt_prob = torch.cat([prdt_prob, pad], dim=1)\n return prdt_texts, prdt_prob" }, { "identifier": "TPSSpatialTransformer", "path": "text_super_resolution/model/tps_spatial_transformer.py", "snippet": "class TPSSpatialTransformer(nn.Module):\n\n def __init__(self, output_image_size=None, num_control_points=None, margins=None):\n super(TPSSpatialTransformer, self).__init__()\n self.output_image_size = output_image_size\n self.num_control_points = num_control_points\n self.margins = margins\n\n self.target_height, self.target_width = output_image_size\n target_control_points = build_output_control_points(num_control_points, margins)\n N = num_control_points\n # N = N - 4\n\n # create padded kernel matrix\n forward_kernel = torch.zeros(N + 3, N + 3)\n target_control_partial_repr = compute_partial_repr(target_control_points, target_control_points)\n forward_kernel[:N, :N].copy_(target_control_partial_repr)\n forward_kernel[:N, -3].fill_(1)\n forward_kernel[-3, :N].fill_(1)\n forward_kernel[:N, -2:].copy_(target_control_points)\n forward_kernel[-2:, :N].copy_(target_control_points.transpose(0, 1))\n # compute inverse matrix\n inverse_kernel = torch.inverse(forward_kernel)\n\n # create target cordinate matrix\n HW = self.target_height * self.target_width\n target_coordinate = list(itertools.product(range(self.target_height), range(self.target_width)))\n target_coordinate = torch.Tensor(target_coordinate) # HW x 2\n Y, X = target_coordinate.split(1, dim = 1)\n Y = Y / (self.target_height - 1)\n X = X / (self.target_width - 1)\n target_coordinate = torch.cat([X, Y], dim = 1) # convert from (y, x) to (x, y)\n target_coordinate_partial_repr = compute_partial_repr(target_coordinate, target_control_points)\n target_coordinate_repr = torch.cat([\n target_coordinate_partial_repr, torch.ones(HW, 1), target_coordinate\n ], dim = 1)\n\n # register precomputed matrices\n self.register_buffer('inverse_kernel', inverse_kernel)\n self.register_buffer('padding_matrix', torch.zeros(3, 2))\n self.register_buffer('target_coordinate_repr', target_coordinate_repr)\n self.register_buffer('target_control_points', target_control_points)\n\n def forward(self, input, source_control_points):\n assert source_control_points.ndimension() == 3\n assert source_control_points.size(1) == self.num_control_points\n assert source_control_points.size(2) == 2\n batch_size = source_control_points.size(0)\n\n Y = torch.cat([source_control_points, self.padding_matrix.expand(batch_size, 3, 2)], 1)\n mapping_matrix = torch.matmul(self.inverse_kernel, Y)\n source_coordinate = torch.matmul(self.target_coordinate_repr, mapping_matrix)\n\n grid = source_coordinate.view(-1, self.target_height, self.target_width, 2)\n grid = torch.clamp(grid, 0, 1) # the source_control_points may be out of [0, 1].\n # the input to grid_sample is normalized [-1, 1], but what we get is [0, 1]\n grid = 2.0 * grid - 1.0\n output_maps = grid_sample(input, grid, canvas=None)\n return output_maps, source_coordinate" }, { "identifier": "STNHead", "path": "text_super_resolution/model/stn_head.py", "snippet": "class STNHead(nn.Module):\n def __init__(self, in_planes, num_ctrlpoints, activation='none', input_size=(16, 64)):\n super(STNHead, self).__init__()\n\n self.in_planes = in_planes\n self.num_ctrlpoints = num_ctrlpoints\n self.activation = activation\n self.stn_convnet = nn.Sequential(\n # conv3x3_block(in_planes, 32), # 32*128\n # nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(in_planes, 32), # 16*64\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(32, 64), # 8*32\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(64, 128), # 4*16\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(128, 256), # 2*8\n nn.MaxPool2d(kernel_size=2, stride=2),\n conv3x3_block(256, 256), # 1*4,\n nn.MaxPool2d(kernel_size=(1,2), stride=(1,2)),\n conv3x3_block(256, 256)) # 1*2\n\n flatten_width = int(input_size[1] / 32)\n # print(\"flw:\", input_size[1] / 32)\n self.stn_fc1 = nn.Sequential(\n nn.Linear(512, 512), #flatten_width*256\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True))\n self.stn_fc2 = nn.Linear(512, num_ctrlpoints*2)\n\n self.init_weights(self.stn_convnet)\n self.init_weights(self.stn_fc1)\n self.init_stn(self.stn_fc2)\n\n def init_weights(self, module):\n for m in module.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n m.weight.data.normal_(0, 0.001)\n m.bias.data.zero_()\n\n def init_stn(self, stn_fc2):\n margin = 0.01\n sampling_num_per_side = int(self.num_ctrlpoints / 2)\n ctrl_pts_x = np.linspace(margin, 1.-margin, sampling_num_per_side)\n ctrl_pts_y_top = np.ones(sampling_num_per_side) * margin\n ctrl_pts_y_bottom = np.ones(sampling_num_per_side) * (1-margin)\n ctrl_pts_top = np.stack([ctrl_pts_x, ctrl_pts_y_top], axis=1)\n ctrl_pts_bottom = np.stack([ctrl_pts_x, ctrl_pts_y_bottom], axis=1)\n ctrl_points = np.concatenate([ctrl_pts_top, ctrl_pts_bottom], axis=0).astype(np.float32)\n # print(ctrl_points.shape)\n if self.activation is 'none':\n pass\n elif self.activation == 'sigmoid':\n ctrl_points = -np.log(1. / ctrl_points - 1.)\n elif self.activation == 'relu':\n ctrl_points = F.relu(torch.Tensor(ctrl_points))\n stn_fc2.weight.data.zero_()\n stn_fc2.bias.data = torch.Tensor(ctrl_points).view(-1)\n\n def forward(self, x):\n x = self.stn_convnet(x)\n batch_size, _, h, w = x.size()\n x = x.view(batch_size, -1)\n\n # print(\"x:\", x.shape)\n\n img_feat = self.stn_fc1(x)\n x = self.stn_fc2(0.1 * img_feat)\n if self.activation == 'sigmoid':\n x = torch.sigmoid(x)\n if self.activation == 'relu':\n x = F.relu(x)\n x = x.view(-1, self.num_ctrlpoints, 2)\n return img_feat, x" }, { "identifier": "VisionLAN", "path": "text_super_resolution/model/VisionLAN/VisionLAN.py", "snippet": "class VisionLAN(nn.Module):\n '''\n Architecture of VisionLAN\n input\n input: input image\n label_pos: character index\n output\n text_pre: word-level prediction from VRM\n test_rem: remaining string prediction from MLM\n text_mas: occluded character prediction from MLM\n '''\n def __init__(self, strides, input_shape):\n super(VisionLAN, self).__init__()\n self.backbone = resnet.resnet45(strides, compress_layer=False)\n self.input_shape = input_shape\n self.MLM_VRM = MLM_VRM()\n def forward(self, input, label_pos, training_stp, Train_in = True):\n # extract features\n features = self.backbone(input)\n # MLM + VRM\n if Train_in:\n text_pre, test_rem, text_mas, mask_map = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return text_pre, test_rem, text_mas, mask_map\n else:\n output, out_length = self.MLM_VRM(features[-1], label_pos, training_stp, is_Train=Train_in)\n return output, out_length" }, { "identifier": "SemanticLoss", "path": "text_super_resolution/loss/semantic_loss.py", "snippet": "class SemanticLoss(nn.Module):\n def __init__(self, margin=0.1):\n super(SemanticLoss, self).__init__()\n self.cos_sim = nn.CosineSimilarity(dim=-1, eps=1e-8)\n self.margin = margin\n\n self.lambda1 = 1.0\n self.lambda2 = 1.0\n\n self.kl_loss = torch.nn.KLDivLoss()\n\n def forward(self, pred_vec, gt_vec):\n # pred_vec: [N, C]\n # gt_vec: [N, C]\n # mean_sim = torch.mean(self.cos_sim(gt_vec, pred_vec))\n # sim_loss = 1 - mean_sim\n \n #noise = Variable(torch.rand(pred_vec.shape)) * 0.1 - 0.05\n\n #normed_pred_vec = pred_vec + noise.to(pred_vec.device)\n # print(\"pred_vec:\", pred_vec.shape)\n norm_vec = torch.abs(gt_vec - pred_vec)\n margin_loss = torch.mean(norm_vec) #\n\n # pr int(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n ce_loss = self.kl_loss(torch.log(pred_vec + 1e-20), gt_vec + 1e-20)\n # print(\"sem_loss:\", float(margin_loss.data), \"sim_loss:\", float(sim_loss.data))\n\n return self.lambda1 * margin_loss + self.lambda2 * ce_loss# ce_loss #margin_loss # + ce_loss # + sim_loss #margin_loss +\n\n def cross_entropy(self, pred_vec, gt_vec, l=1e-5):\n cal = gt_vec * torch.log(pred_vec+l) + (1 - gt_vec) * torch.log(1 - pred_vec+l)\n #print(\"cal:\", cal)\n return -cal" }, { "identifier": "ssim_psnr", "path": "text_super_resolution/utils/ssim_psnr.py", "snippet": "def calculate_psnr(img1, img2):\ndef weighted_calculate_psnr(img1, img2, weighted_mask):\ndef gaussian(window_size, sigma):\ndef create_window(window_size, channel):\ndef create_rect_window(window_H, window_W, channel):\ndef _ssim_weighted(img1_, img2_, window, window_size, channel, weighted_mask, size_average=True):\ndef _ssim(img1, img2, window, window_size, channel, size_average=True):\ndef _tri_ssim(img1, img2, img3, window, window_size, channel, size_average=True):\ndef _ssim_rect(img1, img2, window, window_size, channel, size_average=True):\n def __init__(self, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, img3):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2, weighted_mask):\n def __init__(self, window_size=11, size_average=True):\n def forward(self, img1, img2):\ndef ssim(img1, img2, window_size=11, size_average=True):\ndef ssim_weighted(img1, img2, weighted_mask, window_size=11, size_average=True):\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\n H, W = window_size\n C1 = 0.01 ** 2\n C2 = 0.03 ** 2\nclass Distorted_SSIM(torch.nn.Module):\nclass SSIM(torch.nn.Module):\nclass TRI_SSIM(torch.nn.Module):\nclass SSIM_WEIGHTED(torch.nn.Module):\nclass SSIM_TSR(torch.nn.Module):" } ]
import datetime import math import cv2 import torch import torch.nn as nn import numpy as np import pytorch_lightning as pl import pygame from collections import OrderedDict from matplotlib import pyplot as plt from torch.optim.lr_scheduler import LambdaLR from einops import rearrange, repeat from contextlib import contextmanager from functools import partial from torchvision import transforms from tqdm import tqdm from torchvision.utils import make_grid from pytorch_lightning.utilities.distributed import rank_zero_only from ldm.util import log_txt_as_img, exists, default, ismap, isimage, mean_flat, count_params, instantiate_from_config from ldm.modules.ema import LitEma from ldm.modules.distributions.distributions import normal_kl, DiagonalGaussianDistribution from ldm.models.autoencoder import VQModelInterface, IdentityFirstStage, AutoencoderKL from ldm.modules.diffusionmodules.util import make_beta_schedule, extract_into_tensor, noise_like from ldm.models.diffusion.ddim import DDIMSampler from text_super_resolution.model.VisionLAN.utils import Attention_AR_counter from text_super_resolution.model.tps_spatial_transformer import TPSSpatialTransformer from text_super_resolution.model.stn_head import STNHead from text_super_resolution.model.VisionLAN.VisionLAN import VisionLAN from utils.render_standard_text import * from text_super_resolution.loss.semantic_loss import SemanticLoss from text_super_resolution.utils import ssim_psnr from pygame import freetype from utils.metrics import *
14,521
return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) # print(cond.shape) if self.text_prior_enable: if isinstance(cond, dict): shape = (self.channels, cond['c_concat'][0].shape[2], cond['c_concat'][0].shape[3]) elif isinstance(cond, list): shape = (self.channels, cond[0].shape[2], cond[0].shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) # shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) # print('**********************c shape',c.shape) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]:
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ __conditioning_keys__ = {'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y'} sem_loss = SemanticLoss() def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__(self, unet_config, timesteps=1000, beta_schedule="linear", loss_type="l2", ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor="val/loss", use_ema=True, first_stage_key="image", image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization="eps", # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., ): super().__init__() assert parameterization in ["eps", "x0"], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print(f"{self.__class__.__name__}: Running in {self.parameterization}-prediction mode") self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f"Keeping EMAs of {len(list(self.model_ema.buffers()))}.") self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule(given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full(fill_value=logvar_init, size=(self.num_timesteps,)) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): if exists(given_betas): betas = given_betas else: betas = make_beta_schedule(beta_schedule, timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) alphas = 1. - betas alphas_cumprod = np.cumprod(alphas, axis=0) alphas_cumprod_prev = np.append(1., alphas_cumprod[:-1]) timesteps, = betas.shape self.num_timesteps = int(timesteps) self.linear_start = linear_start self.linear_end = linear_end assert alphas_cumprod.shape[0] == self.num_timesteps, 'alphas have to be defined for each timestep' to_torch = partial(torch.tensor, dtype=torch.float32) self.register_buffer('betas', to_torch(betas)) self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod)) self.register_buffer('alphas_cumprod_prev', to_torch(alphas_cumprod_prev)) # calculations for diffusion q(x_t | x_{t-1}) and others self.register_buffer('sqrt_alphas_cumprod', to_torch(np.sqrt(alphas_cumprod))) self.register_buffer('sqrt_one_minus_alphas_cumprod', to_torch(np.sqrt(1. - alphas_cumprod))) self.register_buffer('log_one_minus_alphas_cumprod', to_torch(np.log(1. - alphas_cumprod))) self.register_buffer('sqrt_recip_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod))) self.register_buffer('sqrt_recipm1_alphas_cumprod', to_torch(np.sqrt(1. / alphas_cumprod - 1))) # calculations for posterior q(x_{t-1} | x_t, x_0) posterior_variance = (1 - self.v_posterior) * betas * (1. - alphas_cumprod_prev) / ( 1. - alphas_cumprod) + self.v_posterior * betas # above: equal to 1. / (1. / (1. - alpha_cumprod_tm1) + alpha_t / beta_t) self.register_buffer('posterior_variance', to_torch(posterior_variance)) # below: log calculation clipped because the posterior variance is 0 at the beginning of the diffusion chain self.register_buffer('posterior_log_variance_clipped', to_torch(np.log(np.maximum(posterior_variance, 1e-20)))) self.register_buffer('posterior_mean_coef1', to_torch( betas * np.sqrt(alphas_cumprod_prev) / (1. - alphas_cumprod))) self.register_buffer('posterior_mean_coef2', to_torch( (1. - alphas_cumprod_prev) * np.sqrt(alphas) / (1. - alphas_cumprod))) if self.parameterization == "eps": lvlb_weights = self.betas ** 2 / ( 2 * self.posterior_variance * to_torch(alphas) * (1 - self.alphas_cumprod)) elif self.parameterization == "x0": lvlb_weights = 0.5 * np.sqrt(torch.Tensor(alphas_cumprod)) / (2. * 1 - torch.Tensor(alphas_cumprod)) else: raise NotImplementedError("mu not supported") # TODO how to choose this term lvlb_weights[0] = lvlb_weights[1] self.register_buffer('lvlb_weights', lvlb_weights, persistent=False) assert not torch.isnan(self.lvlb_weights).all() @contextmanager def ema_scope(self, context=None): if self.use_ema: self.model_ema.store(self.model.parameters()) self.model_ema.copy_to(self.model) if context is not None: print(f"{context}: Switched to EMA weights") try: yield None finally: if self.use_ema: self.model_ema.restore(self.model.parameters()) if context is not None: print(f"{context}: Restored training weights") def init_from_ckpt(self, path, ignore_keys=list(), only_model=False): sd = torch.load(path, map_location="cpu") print(sd.keys()) print(sd['epoch']) print(sd['global_step']) print(sd['callbacks']) # print(sd['optimizer_states']) # print(sd['lr_schedulers']) # print(sd['state_dict'].keys()) # exit(0) if "state_dict" in list(sd.keys()): sd = sd["state_dict"] keys = list(sd.keys()) for k in keys: for ik in ignore_keys: if k.startswith(ik): print("Deleting key {} from state_dict.".format(k)) del sd[k] missing, unexpected = self.load_state_dict(sd, strict=False) if not only_model else self.model.load_state_dict( sd, strict=False) print(f"Restored from {path} with {len(missing)} missing and {len(unexpected)} unexpected keys") if len(missing) > 0: print(f"Missing Keys: {missing}") if len(unexpected) > 0: print(f"Unexpected Keys: {unexpected}") def q_mean_variance(self, x_start, t): """ Get the distribution q(x_t | x_0). :param x_start: the [N x C x ...] tensor of noiseless inputs. :param t: the number of diffusion steps (minus 1). Here, 0 means one step. :return: A tuple (mean, variance, log_variance), all of x_start's shape. """ mean = (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start) variance = extract_into_tensor(1.0 - self.alphas_cumprod, t, x_start.shape) log_variance = extract_into_tensor(self.log_one_minus_alphas_cumprod, t, x_start.shape) return mean, variance, log_variance def predict_start_from_noise(self, x_t, t, noise): return ( extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) * noise ) def q_posterior(self, x_start, x_t, t): posterior_mean = ( extract_into_tensor(self.posterior_mean_coef1, t, x_t.shape) * x_start + extract_into_tensor(self.posterior_mean_coef2, t, x_t.shape) * x_t ) posterior_variance = extract_into_tensor(self.posterior_variance, t, x_t.shape) posterior_log_variance_clipped = extract_into_tensor(self.posterior_log_variance_clipped, t, x_t.shape) return posterior_mean, posterior_variance, posterior_log_variance_clipped def p_mean_variance(self, x, t, clip_denoised: bool): model_out = self.model(x, t) if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out if clip_denoised: x_recon.clamp_(-1., 1.) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, t, clip_denoised=True, repeat_noise=False): b, *_, device = *x.shape, x.device model_mean, _, model_log_variance = self.p_mean_variance(x=x, t=t, clip_denoised=clip_denoised) noise = noise_like(x.shape, device, repeat_noise) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def p_sample_loop(self, shape, return_intermediates=False): device = self.betas.device b = shape[0] img = torch.randn(shape, device=device) intermediates = [img] for i in tqdm(reversed(range(0, self.num_timesteps)), desc='Sampling t', total=self.num_timesteps): img = self.p_sample(img, torch.full((b,), i, device=device, dtype=torch.long), clip_denoised=self.clip_denoised) if i % self.log_every_t == 0 or i == self.num_timesteps - 1: intermediates.append(img) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, batch_size=16, return_intermediates=False): image_size = self.image_size channels = self.channels return self.p_sample_loop((batch_size, channels, image_size, image_size), return_intermediates=return_intermediates) def q_sample(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) return (extract_into_tensor(self.sqrt_alphas_cumprod, t, x_start.shape) * x_start + extract_into_tensor(self.sqrt_one_minus_alphas_cumprod, t, x_start.shape) * noise) def get_loss(self, pred, target, mean=True): if self.loss_type == 'l1': loss = (target - pred).abs() if mean: loss = loss.mean() elif self.loss_type == 'l2': if mean: loss = torch.nn.functional.mse_loss(target, pred) else: loss = torch.nn.functional.mse_loss(target, pred, reduction='none') else: raise NotImplementedError("unknown loss type '{loss_type}'") return loss def p_losses(self, x_start, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_out = self.model(x_noisy, t) loss_dict = {} if self.parameterization == "eps": target = noise elif self.parameterization == "x0": target = x_start else: raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported") loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3]) log_prefix = 'train' if self.training else 'val' loss_dict.update({f'{log_prefix}/loss_simple': loss.mean()}) loss_simple = loss.mean() * self.l_simple_weight loss_vlb = (self.lvlb_weights[t] * loss).mean() loss_dict.update({f'{log_prefix}/loss_vlb': loss_vlb}) loss = loss_simple + self.original_elbo_weight * loss_vlb loss_dict.update({f'{log_prefix}/loss': loss}) return loss, loss_dict def forward(self, x, *args, **kwargs): # b, c, h, w, device, img_size, = *x.shape, x.device, self.image_size # assert h == img_size and w == img_size, f'height and width of image must be {img_size}' t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() return self.p_losses(x, t, *args, **kwargs) def get_input(self, batch, k): # print('************************fuck',k) x = batch[k] if len(x.shape) == 3: x = x[..., None] x = rearrange(x, 'b h w c -> b c h w') x = x.to(memory_format=torch.contiguous_format).float() return x def shared_step(self, batch): x = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x) return loss, loss_dict def training_step(self, batch, batch_idx): loss, loss_dict = self.shared_step(batch) self.log_dict(loss_dict, prog_bar=True, logger=True, on_step=True, on_epoch=True) self.log("global_step", self.global_step, prog_bar=True, logger=True, on_step=True, on_epoch=False) if self.use_scheduler: lr = self.optimizers().param_groups[0]['lr'] self.log('lr_abs', lr, prog_bar=True, logger=True, on_step=True, on_epoch=False) return loss @torch.no_grad() def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) def on_train_batch_end(self, *args, **kwargs): if self.use_ema: self.model_ema(self.model) def _get_rows_from_list(self, samples): n_imgs_per_row = len(samples) denoise_grid = rearrange(samples, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid @torch.no_grad() def log_images(self, batch, N=8, n_row=2, sample=True, return_keys=None, **kwargs): log = dict() x = self.get_input(batch, self.first_stage_key) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) x = x.to(self.device)[:N] log["inputs"] = x # get diffusion row diffusion_row = list() x_start = x[:n_row] for t in range(self.num_timesteps): if t % self.log_every_t == 0 or t == self.num_timesteps - 1: t = repeat(torch.tensor([t]), '1 -> b', b=n_row) t = t.to(self.device).long() noise = torch.randn_like(x_start) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) diffusion_row.append(x_noisy) log["diffusion_row"] = self._get_rows_from_list(diffusion_row) if sample: # get denoise row with self.ema_scope("Plotting"): samples, denoise_row = self.sample(batch_size=N, return_intermediates=True) log["samples"] = samples log["denoise_row"] = self._get_rows_from_list(denoise_row) if return_keys: if np.intersect1d(list(log.keys()), return_keys).shape[0] == 0: return log else: return {key: log[key] for key in return_keys} return log def configure_optimizers(self): lr = self.learning_rate params = list(self.model.parameters()) if self.learn_logvar: params = params + [self.logvar] opt = torch.optim.AdamW(params, lr=lr) return opt class LatentDiffusion(DDPM): """main class""" def __init__(self, first_stage_config, cond_stage_config, num_timesteps_cond=None, cond_stage_key="image", cond_stage_trainable=False, concat_mode=True, cond_stage_forward=None, conditioning_key=None, scale_factor=1.0, scale_by_std=False, text_prior_enable=False, image_height=32, image_width=128, STN_enable=False, standard_text=False, VL_pretrained_path=None, fid_eval=False, visualize=False, down_sample_rate=2, recog_loss_enable=False, font_path=None, *args, **kwargs): self.fid_eval = fid_eval self.visualize = visualize self.text_prior_enable = text_prior_enable self.recog_loss_enable = recog_loss_enable self.num_timesteps_cond = default(num_timesteps_cond, 1) self.scale_by_std = scale_by_std assert self.num_timesteps_cond <= kwargs['timesteps'] # for backwards compatibility after implementation of DiffusionWrapper if conditioning_key is None: conditioning_key = 'concat' if concat_mode else 'crossattn' if cond_stage_config == '__is_unconditional__': conditioning_key = None ckpt_path = kwargs.pop("ckpt_path", None) ignore_keys = kwargs.pop("ignore_keys", []) super().__init__(conditioning_key=conditioning_key, *args, **kwargs) self.concat_mode = concat_mode self.cond_stage_trainable = cond_stage_trainable self.cond_stage_key = cond_stage_key try: self.num_downs = len(first_stage_config.params.ddconfig.ch_mult) - 1 except: self.num_downs = 0 if not scale_by_std: self.scale_factor = scale_factor else: self.register_buffer('scale_factor', torch.tensor(scale_factor)) self.instantiate_first_stage(first_stage_config) self.instantiate_cond_stage(cond_stage_config) self.cond_stage_forward = cond_stage_forward self.clip_denoised = False self.bbox_tokenizer = None self.restarted_from_ckpt = False if ckpt_path is not None: self.init_from_ckpt(ckpt_path, ignore_keys) self.restarted_from_ckpt = True self.image_height = image_height self.image_width = image_width self.stn = STN_enable if self.stn: self.tps_inputsize = [image_height // down_sample_rate, image_width // down_sample_rate] tps_outputsize = [image_height // down_sample_rate, image_width // down_sample_rate] num_control_points = 20 tps_margins = [0.05, 0.05] self.tps = TPSSpatialTransformer( output_image_size=tuple(tps_outputsize), num_control_points=num_control_points, margins=tuple(tps_margins)) self.stn_head = STNHead( in_planes=3, num_ctrlpoints=num_control_points, activation='none', input_size=self.tps_inputsize) self.standard_text = standard_text if self.standard_text: # self.VL_model = self.VisionLAN_init(VL_pretrained_path) # self.test_acc_counter = Attention_AR_counter('\ntest accuracy: ', # '/home/zhouyuxuan/latent-diffusion/dic_36.txt', False) self.font_path = font_path pygame.init() freetype.init() self.cal_psnr = ssim_psnr.calculate_psnr self.cal_ssim = ssim_psnr.SSIM() def VisionLAN_init(self, path=None): cfg = {'args': { 'strides': [(1, 1), (2, 2), (2, 2), (2, 2), (1, 1), (1, 1)], 'input_shape': [3, 64, 256], # C x H x W }, 'init_state_dict': '/home/zhouyuxuan/latent-diffusion/visionlan.pth', } model_VL = VisionLAN(**cfg['args']) model_path = cfg['init_state_dict'] if path is None else path print('load pre_trained VisionLAN model from %s' % model_path) model_VL = model_VL.to(self.device) model_VL = nn.DataParallel(model_VL) if cfg['init_state_dict'] != None: fe_state_dict_ori = torch.load(model_path) fe_state_dict = OrderedDict() for k, v in fe_state_dict_ori.items(): if 'module' not in k: k = 'module.' + k else: k = k.replace('features.module.', 'module.features.') fe_state_dict[k] = v model_dict_fe = model_VL.state_dict() state_dict_fe = {k: v for k, v in fe_state_dict.items() if k in model_dict_fe.keys()} model_dict_fe.update(state_dict_fe) model_VL.load_state_dict(model_dict_fe) return model_VL def parse_visionlan_data(self, imgs_input): imgs_input = transforms.ToPILImage()(imgs_input).convert('RGB') imgs_input = cv2.resize(np.array(imgs_input), (256, 64)) imgs_input = transforms.ToTensor()(imgs_input).unsqueeze(0) imgs_input = imgs_input.to(self.device) return imgs_input def make_cond_schedule(self, ): self.cond_ids = torch.full(size=(self.num_timesteps,), fill_value=self.num_timesteps - 1, dtype=torch.long) ids = torch.round(torch.linspace(0, self.num_timesteps - 1, self.num_timesteps_cond)).long() self.cond_ids[:self.num_timesteps_cond] = ids def on_save_checkpoint(self, checkpoint): if not isinstance(self.cond_stage_model, torch.nn.Identity): self.cond_stage_model.save_state_dict( '/home/zhouyuxuan/latent-diffusion/crnn_ckpt/', self.current_epoch) @rank_zero_only @torch.no_grad() def on_train_batch_start(self, batch, batch_idx, dataloader_idx): # only for very first batch if self.scale_by_std and self.current_epoch == 0 and self.global_step == 0 and batch_idx == 0 and not self.restarted_from_ckpt: assert self.scale_factor == 1., 'rather not use custom rescaling and std-rescaling simultaneously' # set rescale weight to 1./std of encodings print("### USING STD-RESCALING ###") x = super().get_input(batch, self.first_stage_key) x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() del self.scale_factor self.register_buffer('scale_factor', 1. / z.flatten().std()) print(f"setting self.scale_factor to {self.scale_factor}") print("### USING STD-RESCALING ###") def register_schedule(self, given_betas=None, beta_schedule="linear", timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3): super().register_schedule(given_betas, beta_schedule, timesteps, linear_start, linear_end, cosine_s) self.shorten_cond_schedule = self.num_timesteps_cond > 1 if self.shorten_cond_schedule: self.make_cond_schedule() def instantiate_first_stage(self, config): model = instantiate_from_config(config) self.first_stage_model = model.eval() self.first_stage_model.train = disabled_train for param in self.first_stage_model.parameters(): param.requires_grad = False def instantiate_cond_stage(self, config): if not self.cond_stage_trainable: if config == "__is_first_stage__": print("Using first stage also as cond stage.") self.cond_stage_model = self.first_stage_model elif config == "__is_unconditional__": print(f"Training {self.__class__.__name__} as an unconditional model.") self.cond_stage_model = None # self.be_unconditional = True else: model = instantiate_from_config(config) self.cond_stage_model = model.eval() self.cond_stage_model.train = disabled_train for param in self.cond_stage_model.parameters(): param.requires_grad = False else: assert config != '__is_first_stage__' assert config != '__is_unconditional__' model = instantiate_from_config(config) self.cond_stage_model = model def _get_denoise_row_from_list(self, samples, desc='', force_no_decoder_quantization=False): denoise_row = [] for zd in tqdm(samples, desc=desc): denoise_row.append(self.decode_first_stage(zd.to(self.device), force_not_quantize=force_no_decoder_quantization)) n_imgs_per_row = len(denoise_row) denoise_row = torch.stack(denoise_row) # n_log_step, n_row, C, H, W denoise_grid = rearrange(denoise_row, 'n b c h w -> b n c h w') denoise_grid = rearrange(denoise_grid, 'b n c h w -> (b n) c h w') denoise_grid = make_grid(denoise_grid, nrow=n_imgs_per_row) return denoise_grid def get_first_stage_encoding(self, encoder_posterior): if isinstance(encoder_posterior, DiagonalGaussianDistribution): z = encoder_posterior.sample() elif isinstance(encoder_posterior, torch.Tensor): z = encoder_posterior else: raise NotImplementedError(f"encoder_posterior of type '{type(encoder_posterior)}' not yet implemented") return self.scale_factor * z def get_learned_conditioning(self, c): if self.cond_stage_forward is None: if hasattr(self.cond_stage_model, 'encode') and callable(self.cond_stage_model.encode): c = self.cond_stage_model.encode(c) if isinstance(c, DiagonalGaussianDistribution): c = c.mode() else: c = self.cond_stage_model(c) else: assert hasattr(self.cond_stage_model, self.cond_stage_forward) c = getattr(self.cond_stage_model, self.cond_stage_forward)(c) return c def meshgrid(self, h, w): y = torch.arange(0, h).view(h, 1, 1).repeat(1, w, 1) x = torch.arange(0, w).view(1, w, 1).repeat(h, 1, 1) arr = torch.cat([y, x], dim=-1) return arr def delta_border(self, h, w): """ :param h: height :param w: width :return: normalized distance to image border, wtith min distance = 0 at border and max dist = 0.5 at image center """ lower_right_corner = torch.tensor([h - 1, w - 1]).view(1, 1, 2) arr = self.meshgrid(h, w) / lower_right_corner dist_left_up = torch.min(arr, dim=-1, keepdims=True)[0] dist_right_down = torch.min(1 - arr, dim=-1, keepdims=True)[0] edge_dist = torch.min(torch.cat([dist_left_up, dist_right_down], dim=-1), dim=-1)[0] return edge_dist def get_weighting(self, h, w, Ly, Lx, device): weighting = self.delta_border(h, w) weighting = torch.clip(weighting, self.split_input_params["clip_min_weight"], self.split_input_params["clip_max_weight"], ) weighting = weighting.view(1, h * w, 1).repeat(1, 1, Ly * Lx).to(device) if self.split_input_params["tie_braker"]: L_weighting = self.delta_border(Ly, Lx) L_weighting = torch.clip(L_weighting, self.split_input_params["clip_min_tie_weight"], self.split_input_params["clip_max_tie_weight"]) L_weighting = L_weighting.view(1, 1, Ly * Lx).to(device) weighting = weighting * L_weighting return weighting def get_fold_unfold(self, x, kernel_size, stride, uf=1, df=1): # todo load once not every time, shorten code """ :param x: img of size (bs, c, h, w) :return: n img crops of size (n, bs, c, kernel_size[0], kernel_size[1]) """ bs, nc, h, w = x.shape # print(x.shape) # number of crops in image Ly = (h - kernel_size[0]) // stride[0] + 1 Lx = (w - kernel_size[1]) // stride[1] + 1 if uf == 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold = torch.nn.Fold(output_size=x.shape[2:], **fold_params) weighting = self.get_weighting(kernel_size[0], kernel_size[1], Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h, w) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0], kernel_size[1], Ly * Lx)) elif uf > 1 and df == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] * uf, kernel_size[1] * uf), dilation=1, padding=0, stride=(stride[0] * uf, stride[1] * uf)) fold = torch.nn.Fold(output_size=(x.shape[2] * uf, x.shape[3] * uf), **fold_params2) weighting = self.get_weighting(kernel_size[0] * uf, kernel_size[1] * uf, Ly, Lx, x.device).to(x.dtype) # print('weighting',weighting.shape,Ly,Lx) normalization = fold(weighting).view(1, 1, h * uf, w * uf) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] * uf, kernel_size[1] * uf, Ly * Lx)) elif df > 1 and uf == 1: fold_params = dict(kernel_size=kernel_size, dilation=1, padding=0, stride=stride) unfold = torch.nn.Unfold(**fold_params) fold_params2 = dict(kernel_size=(kernel_size[0] // df, kernel_size[1] // df), dilation=1, padding=0, stride=(stride[0] // df, stride[1] // df)) fold = torch.nn.Fold(output_size=(x.shape[2] // df, x.shape[3] // df), **fold_params2) weighting = self.get_weighting(kernel_size[0] // df, kernel_size[1] // df, Ly, Lx, x.device).to(x.dtype) normalization = fold(weighting).view(1, 1, h // df, w // df) # normalizes the overlap weighting = weighting.view((1, 1, kernel_size[0] // df, kernel_size[1] // df, Ly * Lx)) else: raise NotImplementedError return fold, unfold, normalization, weighting @torch.no_grad() def get_input(self, batch, k, return_first_stage_outputs=False, force_c_encode=False, cond_key=None, return_original_cond=False, bs=None): x = super().get_input(batch, k) if bs is not None: x = x[:bs] x = x.to(self.device) encoder_posterior = self.encode_first_stage(x) z = self.get_first_stage_encoding(encoder_posterior).detach() if self.model.conditioning_key is not None: if cond_key is None: cond_key = self.cond_stage_key if cond_key != self.first_stage_key: if cond_key in ['caption', 'coordinates_bbox']: xc = batch[cond_key] elif cond_key == 'class_label': xc = batch else: xc = super().get_input(batch, cond_key).to(self.device) else: xc = x if not self.cond_stage_trainable or force_c_encode: # if not self.cond_stage_trainable or force_c_encode: if isinstance(xc, dict) or isinstance(xc, list): # import pudb; pudb.set_trace() c = self.get_learned_conditioning(xc) else: c = self.get_learned_conditioning(xc.to(self.device)) if self.text_prior_enable: c = self.get_additional_cond(xc, c) # c = {'c_concat': [xc], 'c_crossattn': [c]} else: c = xc if bs is not None: if isinstance(c, dict): for k, v in c.items(): c[k] = [v[0][:bs]] else: c = c[:bs] if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) ckey = __conditioning_keys__[self.model.conditioning_key] c = {ckey: c, 'pos_x': pos_x, 'pos_y': pos_y} else: c = None xc = None if self.use_positional_encodings: pos_x, pos_y = self.compute_latent_shifts(batch) c = {'pos_x': pos_x, 'pos_y': pos_y} out = [z, c] # print('fuck',c.shape) if return_first_stage_outputs: xrec = self.decode_first_stage(z) out.extend([x, xrec]) if return_original_cond: out.append(xc) return out @torch.no_grad() def decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape print('decode z shape', z.shape) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") print(ks, stride, uf) fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) # same as above but without decorator def differentiable_decode_first_stage(self, z, predict_cids=False, force_not_quantize=False): if predict_cids: if z.dim() == 4: z = torch.argmax(z.exp(), dim=1).long() z = self.first_stage_model.quantize.get_codebook_entry(z, shape=None) z = rearrange(z, 'b h w c -> b c h w').contiguous() z = 1. / self.scale_factor * z if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) uf = self.split_input_params["vqf"] bs, nc, h, w = z.shape if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(z, ks, stride, uf=uf) z = unfold(z) # (bn, nc * prod(**ks), L) # 1. Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) # 2. apply model loop over last dim if isinstance(self.first_stage_model, VQModelInterface): output_list = [self.first_stage_model.decode(z[:, :, :, :, i], force_not_quantize=predict_cids or force_not_quantize) for i in range(z.shape[-1])] else: output_list = [self.first_stage_model.decode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) # # (bn, nc, ks[0], ks[1], L) o = o * weighting # Reverse 1. reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization # norm is shape (1, 1, h, w) return decoded else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) else: if isinstance(self.first_stage_model, VQModelInterface): return self.first_stage_model.decode(z, force_not_quantize=predict_cids or force_not_quantize) else: return self.first_stage_model.decode(z) @torch.no_grad() def encode_first_stage(self, x): if hasattr(self, "split_input_params"): if self.split_input_params["patch_distributed_vq"]: ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) df = self.split_input_params["vqf"] self.split_input_params['original_image_size'] = x.shape[-2:] bs, nc, h, w = x.shape print('encode x shape', x.shape) print('ks', ks, 'stride', stride, 'df', df) if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) print("reducing stride") fold, unfold, normalization, weighting = self.get_fold_unfold(x, ks, stride, df=df) z = unfold(x) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) print('encode z shape', z.shape) output_list = [self.first_stage_model.encode(z[:, :, :, :, i]) for i in range(z.shape[-1])] o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together decoded = fold(o) decoded = decoded / normalization return decoded else: return self.first_stage_model.encode(x) else: return self.first_stage_model.encode(x) def on_validation_start(self) -> None: print(f'******************************in validation {self.current_epoch}') def validation_step(self, batch, batch_idx): # print('******************************in validation') _, loss_dict_no_ema = self.shared_step(batch) with self.ema_scope(): _, loss_dict_ema = self.shared_step(batch) loss_dict_ema = {key + '_ema': loss_dict_ema[key] for key in loss_dict_ema} self.log_dict(loss_dict_no_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) self.log_dict(loss_dict_ema, prog_bar=False, logger=True, on_step=False, on_epoch=True) if self.fid_eval and self.current_epoch % 10 == 0: results = self.recognize_sample(batch, N=114514, inpaint=False) rec_image = results['samples'] target = batch[self.first_stage_key] target = rearrange(target, 'b h w c -> b c h w') cond = batch[self.cond_stage_key] cond = rearrange(cond, 'b h w c -> b c h w') if self.visualize: batchlen = rec_image.shape[0] rc = int(math.sqrt(batchlen)) f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) print(len(axs), batchlen, int(math.sqrt(batchlen))) assert len(axs) ** 2 == batchlen for i in range(batchlen): axs[i // rc, i % rc].set_xticklabels([]) axs[i // rc, i % rc].set_yticklabels([]) axs[i // rc, i % rc].set_aspect('equal') axs[i // rc, i % rc].imshow(rec_image[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/sample_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(target[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/target_{batch_idx}.jpg') plt.cla() f, axs = plt.subplots(rc, rc, figsize=(16, 4), sharex=True, sharey=True) plt.subplots_adjust(wspace=0, hspace=0) for i in range(batchlen): axs[i // rc, i % rc].imshow(cond[i, :3, :, :].cpu().numpy().transpose(1, 2, 0)) axs[i // rc, i % rc].axis('off') plt.savefig(f'/home/zhouyuxuan/res/input_{batch_idx}.jpg') PSNR = self.cal_psnr(rec_image[:, :3], target[:, :3]) SSIM = self.cal_ssim(rec_image[:, :3], target[:, :3]) self.log_dict({'PSNR': PSNR, 'SSIM': SSIM}, prog_bar=False, logger=True, on_step=False, on_epoch=True) def shared_step(self, batch, **kwargs): # print('*******************************************************batch',batch['image'].shape) # print('*******************************************************batch',batch['image'].shape) # if hasattr(self, "split_input_params"): # print(self.split_input_params) # else: # print('fuck') x, c = self.get_input(batch, self.first_stage_key) loss, loss_dict = self(x, c) if self.recog_loss_enable: HR = batch['image'] HR = rearrange(HR, 'b h w c -> b c h w') HR = HR.to(memory_format=torch.contiguous_format).float() LR = c label_vecs = self.get_learned_conditioning(c).permute(1, 0, 2) label_vecs_hr = self.get_learned_conditioning(HR).permute(1, 0, 2) loss_recog_distill = sem_loss(label_vecs, label_vecs_hr) * 100 # 100 loss = loss + loss_recog_distill loss_dict.update({f'loss_recog': loss_recog_distill}) # return loss + loss_recog_distill, loss_dict # # else: return loss, loss_dict def get_additional_cond(self, c, tp): if self.stn: _, ctrl_points_c = self.stn_head(c) c, _ = self.tps(c, ctrl_points_c) if self.standard_text: x_q = torch.empty(1, 2, c.shape[2], c.shape[3]) # prob_lr = torch.empty(1, 25, 37) rec_results = get_string_crnn(tp.permute(1, 0, 2), False) for i in range(c.shape[0]): # visionlan_dict_lr = self.parse_visionlan_data(c[i, :3, :, :]) # target = '' # label_lr, label_length = self.VL_model(visionlan_dict_lr, target, '', False) # pred_str_lr, pred_prob = self.test_acc_counter.convert(label_lr, label_length) # s = pred_str_lr[0] # prob_lr = torch.cat([prob_lr, pred_prob], dim=0) s = rec_results[i] if s == "" or type(s) == torch.Tensor: s = "\t" lower_case = s.lower() upper_case = s.upper() i_t_lower = make_standard_text(self.font_path, lower_case, (c.shape[2], c.shape[3])) i_t_lower_tensor = torch.from_numpy(i_t_lower).unsqueeze(0).unsqueeze(0) i_t_upper = make_standard_text(self.font_path, upper_case, (c.shape[2], c.shape[3])) i_t_upper_tensor = torch.from_numpy(i_t_upper).unsqueeze(0).unsqueeze(0) i_t_tensor = torch.cat([i_t_lower_tensor, i_t_upper_tensor], dim=1) x_q = torch.cat([x_q, i_t_tensor], dim=0) x_q = x_q[1:] # prob_lr = prob_lr[1:] x_q = x_q.to(self.device) # prob_lr = prob_lr.to(self.device) c = torch.cat([c, x_q], dim=1) return {'c_concat': [c], 'c_crossattn': [tp]} def forward(self, x, c, *args, **kwargs): t = torch.randint(0, self.num_timesteps, (x.shape[0],), device=self.device).long() if self.model.conditioning_key is not None: assert c is not None if self.text_prior_enable and self.model.conditioning_key == 'hybrid': tp = self.get_learned_conditioning(c) c = self.get_additional_cond(c, tp) else: if self.cond_stage_trainable: c = self.get_learned_conditioning(c) if self.shorten_cond_schedule: # TODO: drop this option tc = self.cond_ids[t].to(self.device) c = self.q_sample(x_start=c, t=tc, noise=torch.randn_like(c.float())) return self.p_losses(x, c, t, *args, **kwargs) def _rescale_annotations(self, bboxes, crop_coordinates): # TODO: move to dataset def rescale_bbox(bbox): x0 = clamp((bbox[0] - crop_coordinates[0]) / crop_coordinates[2]) y0 = clamp((bbox[1] - crop_coordinates[1]) / crop_coordinates[3]) w = min(bbox[2] / crop_coordinates[2], 1 - x0) h = min(bbox[3] / crop_coordinates[3], 1 - y0) return x0, y0, w, h return [rescale_bbox(b) for b in bboxes] def apply_model(self, x_noisy, t, cond, return_ids=False): if isinstance(cond, dict): # hybrid case, cond is exptected to be a dict pass else: if not isinstance(cond, list): cond = [cond] key = 'c_concat' if self.model.conditioning_key == 'concat' else 'c_crossattn' cond = {key: cond} if hasattr(self, "split_input_params"): assert len(cond) == 1 # todo can only deal with one conditioning atm assert not return_ids ks = self.split_input_params["ks"] # eg. (128, 128) stride = self.split_input_params["stride"] # eg. (64, 64) h, w = x_noisy.shape[-2:] if ks[0] > h or ks[1] > w: ks = (min(ks[0], h), min(ks[1], w)) # print("reducing Kernel") if stride[0] > h or stride[1] > w: stride = (min(stride[0], h), min(stride[1], w)) # print("reducing stride") # print('ddpm','x_noisy shape',x_noisy.shape,'ks',ks,'stride',stride) fold, unfold, normalization, weighting = self.get_fold_unfold(x_noisy, ks, stride) z = unfold(x_noisy) # (bn, nc * prod(**ks), L) # Reshape to img shape z = z.view((z.shape[0], -1, ks[0], ks[1], z.shape[-1])) # (bn, nc, ks[0], ks[1], L ) z_list = [z[:, :, :, :, i] for i in range(z.shape[-1])] if self.cond_stage_key in ["image", "LR_image", "segmentation", 'bbox_img'] and self.model.conditioning_key: # todo check for completeness c_key = next(iter(cond.keys())) # get key c = next(iter(cond.values())) # get value assert (len(c) == 1) # todo extend to list with more than one elem c = c[0] # get element c = unfold(c) c = c.view((c.shape[0], -1, ks[0], ks[1], c.shape[-1])) # (bn, nc, ks[0], ks[1], L ) cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])] elif self.cond_stage_key == 'coordinates_bbox': assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size' # assuming padding of unfold is always 0 and its dilation is always 1 n_patches_per_row = int((w - ks[0]) / stride[0] + 1) full_img_h, full_img_w = self.split_input_params['original_image_size'] # as we are operating on latents, we need the factor from the original image size to the # spatial latent size to properly rescale the crops for regenerating the bbox annotations num_downs = self.first_stage_model.encoder.num_resolutions - 1 rescale_latent = 2 ** (num_downs) # get top left postions of patches as conforming for the bbbox tokenizer, therefore we # need to rescale the tl patch coordinates to be in between (0,1) tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w, rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h) for patch_nr in range(z.shape[-1])] # patch_limits are tl_coord, width and height coordinates as (x_tl, y_tl, h, w) patch_limits = [(x_tl, y_tl, rescale_latent * ks[0] / full_img_w, rescale_latent * ks[1] / full_img_h) for x_tl, y_tl in tl_patch_coordinates] # patch_values = [(np.arange(x_tl,min(x_tl+ks, 1.)),np.arange(y_tl,min(y_tl+ks, 1.))) for x_tl, y_tl in tl_patch_coordinates] # tokenize crop coordinates for the bounding boxes of the respective patches patch_limits_tknzd = [torch.LongTensor(self.bbox_tokenizer._crop_encoder(bbox))[None].to(self.device) for bbox in patch_limits] # list of length l with tensors of shape (1, 2) print(patch_limits_tknzd[0].shape) # cut tknzd crop position from conditioning assert isinstance(cond, dict), 'cond must be dict to be fed into model' cut_cond = cond['c_crossattn'][0][..., :-2].to(self.device) print(cut_cond.shape) adapted_cond = torch.stack([torch.cat([cut_cond, p], dim=1) for p in patch_limits_tknzd]) adapted_cond = rearrange(adapted_cond, 'l b n -> (l b) n') print(adapted_cond.shape) adapted_cond = self.get_learned_conditioning(adapted_cond) print(adapted_cond.shape) adapted_cond = rearrange(adapted_cond, '(l b) n d -> l b n d', l=z.shape[-1]) print(adapted_cond.shape) cond_list = [{'c_crossattn': [e]} for e in adapted_cond] else: cond_list = [cond for i in range(z.shape[-1])] # Todo make this more efficient # apply model by loop over crops output_list = [self.model(z_list[i], t, **cond_list[i]) for i in range(z.shape[-1])] assert not isinstance(output_list[0], tuple) # todo cant deal with multiple model outputs check this never happens o = torch.stack(output_list, axis=-1) o = o * weighting # Reverse reshape to img shape o = o.view((o.shape[0], -1, o.shape[-1])) # (bn, nc * ks[0] * ks[1], L) # stitch crops together x_recon = fold(o) / normalization else: x_recon = self.model(x_noisy, t, **cond) if isinstance(x_recon, tuple) and not return_ids: return x_recon[0] else: return x_recon def _predict_eps_from_xstart(self, x_t, t, pred_xstart): return (extract_into_tensor(self.sqrt_recip_alphas_cumprod, t, x_t.shape) * x_t - pred_xstart) / \ extract_into_tensor(self.sqrt_recipm1_alphas_cumprod, t, x_t.shape) def _prior_bpd(self, x_start): """ Get the prior KL term for the variational lower-bound, measured in bits-per-dim. This term can't be optimized, as it only depends on the encoder. :param x_start: the [N x C x ...] tensor of inputs. :return: a batch of [N] KL values (in bits), one per batch element. """ batch_size = x_start.shape[0] t = torch.tensor([self.num_timesteps - 1] * batch_size, device=x_start.device) qt_mean, _, qt_log_variance = self.q_mean_variance(x_start, t) kl_prior = normal_kl(mean1=qt_mean, logvar1=qt_log_variance, mean2=0.0, logvar2=0.0) return mean_flat(kl_prior) / np.log(2.0) def p_losses(self, x_start, cond, t, noise=None): noise = default(noise, lambda: torch.randn_like(x_start)) x_noisy = self.q_sample(x_start=x_start, t=t, noise=noise) model_output = self.apply_model(x_noisy, t, cond) loss_dict = {} prefix = 'train' if self.training else 'val' if self.parameterization == "x0": target = x_start elif self.parameterization == "eps": target = noise else: raise NotImplementedError() loss_simple = self.get_loss(model_output, target, mean=False).mean([1, 2, 3]) loss_dict.update({f'{prefix}/loss_simple': loss_simple.mean()}) self.logvar = self.logvar.to(self.device) logvar_t = self.logvar[t].to(self.device) loss = loss_simple / torch.exp(logvar_t) + logvar_t # loss = loss_simple / torch.exp(self.logvar) + self.logvar if self.learn_logvar: loss_dict.update({f'{prefix}/loss_gamma': loss.mean()}) loss_dict.update({'logvar': self.logvar.data.mean()}) loss = self.l_simple_weight * loss.mean() loss_vlb = self.get_loss(model_output, target, mean=False).mean(dim=(1, 2, 3)) loss_vlb = (self.lvlb_weights[t] * loss_vlb).mean() loss_dict.update({f'{prefix}/loss_vlb': loss_vlb}) loss += (self.original_elbo_weight * loss_vlb) loss_dict.update({f'{prefix}/loss': loss}) return loss, loss_dict def p_mean_variance(self, x, c, t, clip_denoised: bool, return_codebook_ids=False, quantize_denoised=False, return_x0=False, score_corrector=None, corrector_kwargs=None): t_in = t model_out = self.apply_model(x, t_in, c, return_ids=return_codebook_ids) if score_corrector is not None: assert self.parameterization == "eps" model_out = score_corrector.modify_score(self, model_out, x, t, c, **corrector_kwargs) if return_codebook_ids: model_out, logits = model_out if self.parameterization == "eps": x_recon = self.predict_start_from_noise(x, t=t, noise=model_out) elif self.parameterization == "x0": x_recon = model_out else: raise NotImplementedError() if clip_denoised: x_recon.clamp_(-1., 1.) if quantize_denoised: x_recon, _, [_, _, indices] = self.first_stage_model.quantize(x_recon) model_mean, posterior_variance, posterior_log_variance = self.q_posterior(x_start=x_recon, x_t=x, t=t) if return_codebook_ids: return model_mean, posterior_variance, posterior_log_variance, logits elif return_x0: return model_mean, posterior_variance, posterior_log_variance, x_recon else: return model_mean, posterior_variance, posterior_log_variance @torch.no_grad() def p_sample(self, x, c, t, clip_denoised=False, repeat_noise=False, return_codebook_ids=False, quantize_denoised=False, return_x0=False, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None): b, *_, device = *x.shape, x.device outputs = self.p_mean_variance(x=x, c=c, t=t, clip_denoised=clip_denoised, return_codebook_ids=return_codebook_ids, quantize_denoised=quantize_denoised, return_x0=return_x0, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if return_codebook_ids: raise DeprecationWarning("Support dropped.") model_mean, _, model_log_variance, logits = outputs elif return_x0: model_mean, _, model_log_variance, x0 = outputs else: model_mean, _, model_log_variance = outputs noise = noise_like(x.shape, device, repeat_noise) * temperature if noise_dropout > 0.: noise = torch.nn.functional.dropout(noise, p=noise_dropout) # no noise when t == 0 nonzero_mask = (1 - (t == 0).float()).reshape(b, *((1,) * (len(x.shape) - 1))) if return_codebook_ids: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, logits.argmax(dim=1) if return_x0: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise, x0 else: return model_mean + nonzero_mask * (0.5 * model_log_variance).exp() * noise @torch.no_grad() def progressive_denoising(self, cond, shape, verbose=True, callback=None, quantize_denoised=False, img_callback=None, mask=None, x0=None, temperature=1., noise_dropout=0., score_corrector=None, corrector_kwargs=None, batch_size=None, x_T=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t timesteps = self.num_timesteps if batch_size is not None: b = batch_size if batch_size is not None else shape[0] shape = [batch_size] + list(shape) else: b = batch_size = shape[0] if x_T is None: img = torch.randn(shape, device=self.device) else: img = x_T intermediates = [] if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Progressive Generation', total=timesteps) if verbose else reversed( range(0, timesteps)) if type(temperature) == float: temperature = [temperature] * timesteps for i in iterator: ts = torch.full((b,), i, device=self.device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img, x0_partial = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised, return_x0=True, temperature=temperature[i], noise_dropout=noise_dropout, score_corrector=score_corrector, corrector_kwargs=corrector_kwargs) if mask is not None: assert x0 is not None img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(x0_partial) if callback: callback(i) if img_callback: img_callback(img, i) return img, intermediates @torch.no_grad() def p_sample_loop(self, cond, shape, return_intermediates=False, x_T=None, verbose=True, callback=None, timesteps=None, quantize_denoised=False, mask=None, x0=None, img_callback=None, start_T=None, log_every_t=None): if not log_every_t: log_every_t = self.log_every_t device = self.betas.device b = shape[0] if x_T is None: img = torch.randn(shape, device=device) else: img = x_T intermediates = [img] if timesteps is None: timesteps = self.num_timesteps if start_T is not None: timesteps = min(timesteps, start_T) iterator = tqdm(reversed(range(0, timesteps)), desc='Sampling t', total=timesteps) if verbose else reversed( range(0, timesteps)) if mask is not None: assert x0 is not None assert x0.shape[2:3] == mask.shape[2:3] # spatial size has to match for i in iterator: ts = torch.full((b,), i, device=device, dtype=torch.long) if self.shorten_cond_schedule: assert self.model.conditioning_key != 'hybrid' tc = self.cond_ids[ts].to(cond.device) cond = self.q_sample(x_start=cond, t=tc, noise=torch.randn_like(cond)) img = self.p_sample(img, cond, ts, clip_denoised=self.clip_denoised, quantize_denoised=quantize_denoised) if mask is not None: img_orig = self.q_sample(x0, ts) img = img_orig * mask + (1. - mask) * img if i % log_every_t == 0 or i == timesteps - 1: intermediates.append(img) if callback: callback(i) if img_callback: img_callback(img, i) if return_intermediates: return img, intermediates return img @torch.no_grad() def sample(self, cond, batch_size=16, return_intermediates=False, x_T=None, verbose=True, timesteps=None, quantize_denoised=False, mask=None, x0=None, shape=None, **kwargs): if shape is None: shape = (batch_size, self.channels, self.image_size, self.image_size) if cond is not None: if isinstance(cond, dict): cond = {key: cond[key][:batch_size] if not isinstance(cond[key], list) else list(map(lambda x: x[:batch_size], cond[key])) for key in cond} else: cond = [c[:batch_size] for c in cond] if isinstance(cond, list) else cond[:batch_size] return self.p_sample_loop(cond, shape, return_intermediates=return_intermediates, x_T=x_T, verbose=verbose, timesteps=timesteps, quantize_denoised=quantize_denoised, mask=mask, x0=x0) @torch.no_grad() def sample_log(self, cond, batch_size, ddim, ddim_steps, **kwargs): if ddim: ddim_sampler = DDIMSampler(self) # print(cond.shape) if self.text_prior_enable: if isinstance(cond, dict): shape = (self.channels, cond['c_concat'][0].shape[2], cond['c_concat'][0].shape[3]) elif isinstance(cond, list): shape = (self.channels, cond[0].shape[2], cond[0].shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) else: shape = (self.channels, cond.shape[2], cond.shape[3]) # shape = (self.channels, self.image_size, self.image_size) samples, intermediates = ddim_sampler.sample(ddim_steps, batch_size, shape, cond, verbose=False, **kwargs) else: samples, intermediates = self.sample(cond=cond, batch_size=batch_size, return_intermediates=True, **kwargs) return samples, intermediates @torch.no_grad() def log_images(self, batch, N=8, n_row=4, sample=True, ddim_steps=200, ddim_eta=1., return_keys=None, quantize_denoised=True, inpaint=True, plot_denoise_rows=False, plot_progressive_rows=True, plot_diffusion_rows=True, **kwargs): use_ddim = ddim_steps is not None log = dict() z, c, x, xrec, xc = self.get_input(batch, self.first_stage_key, return_first_stage_outputs=True, force_c_encode=True, return_original_cond=True, bs=N) # print('**********************c shape',c.shape) N = min(x.shape[0], N) n_row = min(x.shape[0], n_row) log["inputs"] = x log["reconstruction"] = xrec if self.model.conditioning_key is not None: if hasattr(self.cond_stage_model, "decode"): xc = self.cond_stage_model.decode(c) log["conditioning"] = xc elif self.cond_stage_key in ["caption"]:
xc = log_txt_as_img((x.shape[2], x.shape[3]), batch["caption"])
0
2023-11-20 06:34:21+00:00
24k
microsoft/Project-BayesDAG
src/causica/models/visl.py
[ { "identifier": "Dataset", "path": "src/causica/datasets/dataset.py", "snippet": "class Dataset(BaseDataset):\n \"\"\"\n Class to store dense train/val/test data and masks and variables metadata.\n Note that the data and masks provided by this class are read only.\n \"\"\"\n\n def __init__(\n self,\n train_data: np.ndarray,\n train_mask: np.ndarray,\n val_data: Optional[np.ndarray] = None,\n val_mask: Optional[np.ndarray] = None,\n test_data: Optional[np.ndarray] = None,\n test_mask: Optional[np.ndarray] = None,\n variables: Optional[Variables] = None,\n data_split: Optional[Dict[str, Any]] = None,\n held_out_interventions: Optional[Dict[str, Any]]=None,\n true_posterior: Optional[Any]=None,\n graph_args: Optional[Dict[str, Any]]=None\n ) -> None:\n super().__init__(train_data, train_mask, val_data, val_mask, test_data, test_mask, variables, data_split, held_out_interventions, true_posterior, graph_args)\n\n # Ensure that data and masks are immutable\n if not issparse(self._train_data):\n self._train_data.setflags(write=False)\n self._train_mask.setflags(write=False)\n if test_data is not None and not issparse(test_data):\n self._test_data = cast(np.ndarray, test_data)\n self._test_data.setflags(write=False)\n self._test_mask = cast(np.ndarray, test_mask)\n self._test_mask.setflags(write=False)\n\n if val_data is not None and not issparse(val_data):\n self._val_data = cast(np.ndarray, val_data)\n self._val_mask = cast(np.ndarray, val_mask)\n self._val_data.setflags(write=False)\n self._val_mask.setflags(write=False)\n\n def to_causal(\n self,\n adjacency_data: Optional[np.ndarray],\n subgraph_data: Optional[np.ndarray],\n intervention_data: Optional[List[InterventionData]],\n counterfactual_data: Optional[List[InterventionData]] = None,\n ):\n \"\"\"\n Return the dag version of this dataset.\n \"\"\"\n return CausalDataset(\n train_data=self._train_data,\n train_mask=self._train_mask,\n adjacency_data=adjacency_data,\n subgraph_data=subgraph_data,\n intervention_data=intervention_data,\n counterfactual_data=counterfactual_data,\n val_data=self._val_data,\n val_mask=self._val_mask,\n test_data=self._test_data,\n test_mask=self._test_mask,\n variables=self._variables,\n data_split=self._data_split,\n held_out_interventions=self._held_out_interventions,\n true_posterior=self._true_posterior,\n graph_args=self._graph_args\n )\n\n @property\n def train_data_and_mask(self) -> Tuple[np.ndarray, np.ndarray]:\n # Add to avoid inconsistent type mypy error\n return self._train_data, self._train_mask" }, { "identifier": "Variables", "path": "src/causica/datasets/variables.py", "snippet": "class Variables:\n \"\"\"\n This class represents any variables present in a model.\n \"\"\"\n\n def __init__(\n self,\n variables: List[Variable],\n auxiliary_variables: Optional[List[Variable]] = None,\n used_cols: Optional[List[int]] = None,\n ) -> None:\n \"\"\"\n Args:\n variables: A list Variable objects.\n auxiliary_variables: A list of Variable objects only used for input into VAE,\n not produced in output.\n These are assumed to be appended onto the end of the variables in the data.\n Defaults to None - no aux variables present.\n used_cols: A list of column ids that were used when processing the original data.\n \"\"\"\n if not auxiliary_variables:\n auxiliary_variables = []\n self.auxiliary_variables = auxiliary_variables\n self._variables = variables\n\n self._deduplicate_names()\n\n # Dictionary mapping from variable name to variable index.\n self.name_to_idx = {var.name: idx for idx, var in enumerate(self._variables)}\n\n # Lists containing query and target variable indices\n self.target_var_idxs = []\n self.not_target_var_idxs = []\n self.query_var_idxs = []\n self.not_query_var_idxs = []\n for idx, var in enumerate(self._variables):\n if var.query:\n self.query_var_idxs.append(idx)\n else:\n self.not_query_var_idxs.append(idx)\n if var.target:\n self.target_var_idxs.append(idx)\n else:\n self.not_target_var_idxs.append(idx)\n\n if len(self.target_var_idxs) > 0 and all(idx in self.query_var_idxs for idx in self.target_var_idxs):\n warnings.warn(\n \"All target variables are marked as queriable, it is likely that active learning will always \"\n \"select these variables first.\"\n )\n\n # Lists containing continuous (including text) and binary/categorical variable indices\n self.var_idxs_by_type: DefaultDict[str, List[int]] = defaultdict(list)\n for idx, var in enumerate(self._variables + self.auxiliary_variables):\n self.var_idxs_by_type[var.type_].append(idx)\n\n # List of lists, where self.unprocessed_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data.\n self.unprocessed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.unprocessed_non_aux_cols[i] gives the columns occupied by the ith variable in the unprocessed\n # data (non-auxiliary).\n self.unprocessed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.unprocessed_dim\n self.unprocessed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_cols[i] gives the columns occupied by the ith variable in the processed\n # data.\n self.processed_cols = []\n start_col = 0\n for var in self._all_variables:\n end_col = start_col + var.processed_dim\n self.processed_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # List of lists, where self.processed_non_aux_cols[i] gives the columns occupied by the ith variable in the processed\n # data (non-auxiliary).\n self.processed_non_aux_cols = []\n start_col = 0\n for var in self._variables:\n end_col = start_col + var.processed_dim\n self.processed_non_aux_cols.append(list(range(start_col, end_col)))\n start_col = end_col\n\n # Set of all query group names, maintaining order in which they are first encountered when iterating through\n # the variables list. This is the simplest way to do this since dictionaries are guaranteed to be\n # insertion-ordered since Python 3.7\n self.group_names = list(dict.fromkeys([var.group_name for var in self._variables]))\n\n # List containing indices for each query group, where the query group names are assumed to be in the same order\n # as self.group_names\n self.group_idxs = [\n [idx for idx, var in enumerate(self._variables) if var.group_name == group_name]\n for group_name in self.group_names\n ]\n\n # Remove groups containing no queriable variables from self.group_names and self.group_idxs, as\n # we can guarantee that we will never query these groups.\n is_group_queriable = [any(self._variables[idx].query for idx in idxs) for idxs in self.group_idxs]\n\n self.group_names = [name for group_idx, name in enumerate(self.group_names) if is_group_queriable[group_idx]]\n self.group_idxs = [idxs for group_idx, idxs in enumerate(self.group_idxs) if is_group_queriable[group_idx]]\n\n # Save the list of observed column ids\n default_used_cols = list(range(len(self._variables) + len(auxiliary_variables))) # All columns observed\n self.used_cols = used_cols if used_cols is not None else default_used_cols\n assert len(self.used_cols) == len(self._variables) + len(self.auxiliary_variables)\n\n self.col_id_to_var_index = {old: new for new, old in enumerate(self.used_cols)}\n\n def __repr__(self):\n return str(self._variables)\n\n def __iter__(self) -> Iterator[Variable]:\n \"\"\"\n Iterate through the variables within the container.\n Note - Now it iterate through all the variables within the container\n (including auxiliary variables, if they are present)\n \"\"\"\n for var in self._all_variables:\n yield var\n\n def __getitem__(self, idx):\n return (self._all_variables)[idx]\n\n def __len__(self) -> int:\n return len(self._variables) + len(self.auxiliary_variables)\n\n @classmethod\n def create_from_json(cls, path: str) -> Variables:\n return cls.create_from_dict(read_json_as(path, dict))\n\n @classmethod\n def create_from_dict(cls, variables_dict: Dict[str, List[Any]]) -> Variables:\n \"\"\"\n Create variables object from a dictionary\n \"\"\"\n variables = variables_dict[\"variables\"]\n for var in variables:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n var_obj_list = [Variable(**var) for var in variables]\n\n auxiliary_vars = variables_dict.get(\"auxiliary_variables\", [])\n if len(auxiliary_vars) == 0:\n auxiliary_vars_obj = None\n else:\n for var in auxiliary_vars:\n # remove deprecated \"id\" key if present\n var.pop(\"id\", None)\n\n auxiliary_vars_obj = [Variable(**var) for var in auxiliary_vars]\n\n used_cols = variables_dict.get(\"used_cols\", None)\n\n return cls(var_obj_list, auxiliary_vars_obj, used_cols)\n\n @classmethod\n def create_from_data_and_dict(\n cls, data: np.ndarray, mask: np.ndarray, variables_dict: Optional[Dict[str, Any]] = None\n ) -> Variables:\n \"\"\"\n Create variables object from an input dictionary, inferring missing fields using `data` and `mask`.\n \"\"\"\n # Infer missing fields in variables_dict\n variables_dict = cls.infer_from_data(data, mask, variables_dict, True)\n variables = cls.create_from_dict(variables_dict)\n return variables\n\n @staticmethod\n def _metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n ) -> Tuple[List[Any], Union[List[Any], None]]:\n \"\"\"\n Infer variables_metadata from input data\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n variables_type: is it aux variables, or normal variables\n Returns:\n varaibles_metadata: inferred metadata from input data\n A list of column ids that were used when processing the original data.\n \"\"\"\n\n variables_metadata = []\n # Use None rather than {} as default since mutable default args are dangerous in Python.\n used_cols = variables_dict.get(\"used_cols\", None)\n if used_cols:\n used_cols = cast(List[int], used_cols)\n assert len(used_cols) == data.shape[1]\n\n for idx, variable_metadata in enumerate(variables_dict[variables_type]):\n if not all(\n k in variable_metadata for k in [\"name\", \"type\", \"lower\", \"upper\", \"query\", \"target\", \"always_observed\"]\n ):\n # If variable metadata fully specified, do not try to infer, as doing column indexing can be expensive\n # for CSR sparse matrices.\n var_data = data[:, idx]\n var_mask = mask[:, idx]\n if issparse(var_data):\n var_data = var_data.toarray()\n var_mask = var_mask.toarray()\n\n if \"name\" not in variable_metadata:\n if used_cols:\n variable_metadata[\"name\"] = str(used_cols[idx])\n else:\n variable_metadata[\"name\"] = f\"Column {idx}\"\n\n # If data type/min max/num categories specified explicitly, overwrite variables file\n if \"type\" not in variable_metadata:\n # Test if all unmasked elements are integers\n\n if np.all((var_data * var_mask) // 1 == var_data * var_mask):\n if (var_data * var_mask).max() <= 1:\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as binary. This can be '\n \"changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"binary\"\n else:\n # Note that we always infer integer values with a max value > 1 as categorical. This may want to be\n # reconsidered if support for ordinal variables is introduced at a later date.\n print(\n f'Type of variable {variable_metadata[\"name\"]} inferred as categorical. This can be'\n \" changed manually in the dataset's variables.json file\"\n )\n variable_metadata[\"type\"] = \"categorical\"\n else:\n variable_metadata[\"type\"] = \"continuous\"\n\n if \"lower\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_lower = 0\n else:\n inferred_lower = min(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"lower\"] = inferred_lower\n print(\n f'Minimum value of variable {variable_metadata[\"name\"]} inferred as {inferred_lower}. This'\n \" can be changed manually in the dataset's variables.json file\"\n )\n\n if \"upper\" not in variable_metadata:\n if variable_metadata[\"type\"] == \"binary\":\n inferred_upper = 1\n else:\n inferred_upper = max(var_data[np.where(var_mask == 1)]).item()\n variable_metadata[\"upper\"] = inferred_upper\n print(\n f'Max value of variable {variable_metadata[\"name\"]} inferred as {inferred_upper}. This can '\n \"be changed manually in the dataset's variables.json file\"\n )\n\n if \"query\" not in variable_metadata:\n # By default, assume all variables can be queried unless specified otherwise.\n if variables_type == \"auxiliary_variables\":\n variable_metadata[\"query\"] = False\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a non-queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n else:\n variable_metadata[\"query\"] = True\n print(\n f'Variable {variable_metadata[\"name\"]} inferred to be a queriable variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"query\" field.'\n )\n\n if \"target\" not in variable_metadata:\n # By default, assume variable is a target if and only if it is not queriable.\n variable_metadata[\"target\"] = not variable_metadata[\"query\"]\n fill_string = \"not \" if not variable_metadata[\"target\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an active learning target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"target\" field.'\n )\n\n if \"always_observed\" not in variable_metadata:\n # By default, assume variable is always observed if there is no missing in the mask.\n if np.sum((var_mask - 1) ** 2) == 0:\n variable_metadata[\"always_observed\"] = True\n else:\n variable_metadata[\"always_observed\"] = False\n fill_string = \"not \" if not variable_metadata[\"always_observed\"] else \"\"\n print(\n f'Variable {variable_metadata[\"name\"]} inferred as {fill_string}an always observed target variable. '\n 'This can be changed manually in the dataset\\'s variables.json file by updating the \"always_observed\" field.'\n )\n\n variables_metadata.append(variable_metadata)\n\n return variables_metadata, used_cols\n\n @staticmethod\n def infer_from_data(data, mask, variables_dict=None, infer_aux_variables=False) -> Dict[str, List[Any]]:\n \"\"\"\n Infer missing values in an input variables dictionary, using the input data.\n\n Args:\n data: NumPy array containing data\n mask: NumPy array containing 1 for observed data values, 0 for unobserved data values.\n variables_dict: Dictionary containing metadata for each variable (column) in the input data. Missing variables,\n or missing fields for a particular variable, will attempt to be inferred from the input data.\n infer_aux_variables: infer auxiliary variables for GINA or not.\n Returns:\n variables_dict: Updated version of the input variables_dict, with missing variables and fields inferred from the\n data.\n \"\"\"\n\n if variables_dict is None:\n variables_dict = {}\n\n # NOTE this assumes all variables have only one column in unprocessed data, which should always be the case when\n # inferring from a dataset.\n if \"auxiliary_variables\" not in variables_dict:\n variables_dict[\"auxiliary_variables\"] = []\n\n if \"variables\" not in variables_dict or variables_dict[\"variables\"] == []:\n num_var_cols = data.shape[1] - len(variables_dict[\"auxiliary_variables\"])\n variables_dict[\"variables\"] = [{} for _ in range(num_var_cols)]\n\n variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": variables_dict[\"auxiliary_variables\"],\n \"used_cols\": used_cols,\n }\n if infer_aux_variables:\n aux_variables_metadata, used_cols = Variables._metadata_from_dict(\n data, mask, variables_dict, variables_type=\"auxiliary_variables\"\n )\n variables_dict = {\n \"variables\": variables_metadata,\n \"auxiliary_variables\": aux_variables_metadata,\n \"used_cols\": used_cols,\n }\n\n return variables_dict\n\n @property\n def _all_variables(self):\n return self._variables + self.auxiliary_variables\n\n @property\n def has_auxiliary(self) -> bool:\n \"\"\"\n True if there are aux variables present.\n \"\"\"\n return len(self.auxiliary_variables) > 0\n\n @property\n def binary_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all binary variables.\n \"\"\"\n return self.var_idxs_by_type[\"binary\"]\n\n @property\n def categorical_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all categorical variables.\n \"\"\"\n return self.var_idxs_by_type[\"categorical\"]\n\n @property\n def discrete_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all discrete (i.e. binary or categorical) variables. We sort to ensure that the\n combined list is in ascending order.\n \"\"\"\n return sorted(self.var_idxs_by_type[\"categorical\"] + self.var_idxs_by_type[\"binary\"])\n\n @property\n def continuous_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all continuous variables.\n \"\"\"\n return self.var_idxs_by_type[\"continuous\"]\n\n @property\n def text_idxs(self) -> List[int]:\n \"\"\"\n Return a list of the indices of all text variables.\n \"\"\"\n return self.var_idxs_by_type[\"text\"]\n\n @property\n def non_text_idxs(self) -> List[bool]:\n \"\"\"Helper method. Returns list of booleans, where an element\n at index i indicates whether a variable at index i is non-text or not\n e.g. For Variables object of [...\"continous\"..., ...\"text\"..., \"continuous\"],\n the result would be [True, False, True]\n \"\"\"\n unproc_cols_by_type = self.unprocessed_cols_by_type\n if \"text\" not in unproc_cols_by_type:\n return [True for _ in range(len(self))]\n return (~np.in1d(range(len(self)), unproc_cols_by_type[\"text\"])).tolist()\n\n @property\n def num_unprocessed_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_cols)\n\n @property\n def num_unprocessed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the unprocessed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.unprocessed_non_aux_cols)\n\n @property\n def num_processed_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by all variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_cols)\n\n @property\n def num_processed_non_aux_cols(self) -> int:\n \"\"\"\n Return number of columns in the processed data represented by non auxiliary variables\n \"\"\"\n return sum(len(idxs) for idxs in self.processed_non_aux_cols)\n\n @property\n def num_groups(self) -> int:\n \"\"\"\n Return the number of unique query groups in the variables object.\n \"\"\"\n return len(self.group_names)\n\n @property\n def group_mask(self) -> np.ndarray:\n \"\"\"\n Return a mask of shape (num_groups, num_processed_cols) indicating which column\n corresponds to which group.\n \"\"\"\n mask = np.zeros((self.num_groups, self.num_processed_cols), dtype=bool)\n for group_idx, group in enumerate(self.group_idxs):\n for var in group:\n for proc_col in self.processed_cols[var]:\n mask[group_idx, proc_col] = 1\n return mask\n\n @property\n def proc_always_observed_list(self) -> List[Optional[bool]]:\n \"\"\"\n The mask that indicates if the variable is always observed (for processed data)\n \"\"\"\n return sum(([var.always_observed] * var.processed_dim for var in self._all_variables), [])\n\n @property\n def processed_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data associated with each variable of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._all_variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def processed_non_aux_cols_by_type(self) -> Dict[str, List[List[int]]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list of lists, where each\n sublist represents indices in the processed (i.e. one-hot) data (w/o aux variables) associated with each\n variable of that type.\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [[0,1,2], [3,4,5]], 'binary': [[6]]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[List[int]]] = defaultdict(list)\n for var, cols in zip(self._variables, self.processed_cols):\n grouped_vars[var.type_].append(cols)\n return grouped_vars\n\n @property\n def unprocessed_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._all_variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n @property\n def unprocessed_non_aux_cols_by_type(self) -> DefaultDict[str, List[int]]:\n \"\"\"\n Return a dictionary mapping each type of data (e.g. continuous, binary, ...) to a list containing the column\n indices in the unprocessed data for all variables of that type.\n\n E.g. for a two categorical variables each taking 3 values, followed by a binary variable, return\n {'categorical': [0, 1], 'binary': [2]}.\n \"\"\"\n grouped_vars: DefaultDict[str, List[int]] = defaultdict(list)\n i = 0\n for var, cols in zip(self._variables, self.unprocessed_cols):\n grouped_vars[var.type_] += cols\n i += var.unprocessed_dim\n return grouped_vars\n\n def subset(self, idxs: List[int], auxiliary_idxs: Optional[List[int]] = None) -> Variables:\n \"\"\"\n Returns a new Variables object containing only the Variable objects whose indices are given in `idxs`.\n Note that this currently ignores metadata variables.\n \"\"\"\n if auxiliary_idxs is None:\n auxiliary_idxs = []\n\n variables_list = [self._variables[idx] for idx in idxs]\n auxiliary_variables_list = [self.auxiliary_variables[idx] for idx in auxiliary_idxs]\n return Variables(variables_list, auxiliary_variables_list)\n\n def to_dict(self) -> Dict[str, Any]:\n variables_list = [var.to_json() for var in self._variables]\n if self.auxiliary_variables is None:\n auxiliary_vars_list = []\n else:\n auxiliary_vars_list = [var.to_json() for var in self.auxiliary_variables]\n\n variables_json = {\n \"variables\": variables_list,\n \"auxiliary_variables\": auxiliary_vars_list,\n \"used_cols\": [int(col) for col in self.used_cols],\n }\n return variables_json\n\n def save(self, path: str) -> None:\n variables_json = self.to_dict()\n save_json(variables_json, path)\n\n def as_list(self) -> List[Variable]:\n return self._variables\n\n def get_idxs_from_name_list(self, variable_names: List[Union[str, int]]) -> np.ndarray:\n \"\"\"\n Get a binary array of shape (variable_count,), where for each index the array value is 1 if the corresponding\n variable is named in `variable_names`, and 0 otherwise.\n \"\"\"\n variables_to_query = np.zeros((len(self._variables),))\n # Look up indices of specified variables and mark as queriable.\n for variable_name in variable_names:\n # Cast name to string in case numeric names (e.g. question ids) have been input as integers.\n variable_name = str(variable_name)\n variable_idx = self.name_to_idx[variable_name]\n variables_to_query[variable_idx] = 1\n\n return variables_to_query\n\n def get_observable_groups(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of indices for groups that are still observable in the current row\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n list of indices of groups that can be observed, where the indices correspond to the corresponding group\n names in `self.group_names`.\n \"\"\"\n observable_variables_idxs = self.get_observable_variable_idxs(data_mask_row, obs_mask_row)\n observable_groups_idxs: List[int] = []\n for group_idx, idxs in enumerate(self.group_idxs):\n if any(i in observable_variables_idxs for i in idxs):\n observable_groups_idxs.append(group_idx)\n return observable_groups_idxs\n\n def get_observable_variable_idxs(self, data_mask_row: np.ndarray, obs_mask_row: np.ndarray) -> List[int]:\n \"\"\"\n Get list of variable idxs for variables that are still observable in the current row.\n Args:\n data_mask_row: 1D numpy array containing 1 for observed variables and 0 for unobserved in the underlying data\n obs_mask_row: 1D numpy array containing 1 for variables observed during active learning and 0 for ones unobserved\n\n Returns:\n observable_vars: List of indices of variables that can be observed.\n \"\"\"\n if data_mask_row.ndim != 1:\n raise ValueError(f\"Test mask should be 1D, had {data_mask_row.ndim} dims and shape {data_mask_row.shape}.\")\n if obs_mask_row.ndim != 1:\n raise ValueError(\n f\"Observation mask should be 1D, had {obs_mask_row.ndim} dims and shape {obs_mask_row.shape}.\"\n )\n if len(obs_mask_row) != len(data_mask_row) or len(data_mask_row) != len(self._variables):\n # One likely cause is accidentally passing 'processed' masks, which may be longer\n # if some variables are categorical.\n raise ValueError(\n f\"Lengths of obs_mask_row {len(obs_mask_row)}, data_mask_row {len(data_mask_row)}, \"\n f\"and variables list {len(self._variables)} should all be the same.\"\n )\n # Get ids where there is an underlying data value (test_mask == 1) and that we haven't yet queried (obs_mask == 0)\n unobserved_idxs = np.where((data_mask_row == 1) & (obs_mask_row == 0))[0]\n\n # Intersection of these and query_var_idxs.\n observable_idx_set = set(unobserved_idxs).intersection(set(self.query_var_idxs))\n return list(observable_idx_set)\n\n def get_var_cols_from_data(self, var_idx, data):\n \"\"\"\n Get data from an array for a single variable only.\n\n Args:\n var_idx: Index of variable we want data for.\n data (shape (batch_size, variable_count)): Array to get variable info from.\n\n Returns:\n var_data (shape (observed_count, processed_dim)): Values only for\n the corresponding variable.\n \"\"\"\n return data[:, self.processed_cols[var_idx]]\n\n def get_variables_to_observe(self, data_mask: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Return a boolean tensor of length num_variables, where each element indicates whether the corresponding variable\n can be queried during active learning (i.e. the variable is queriable and has at least one observed value in\n the data).\n Args:\n data_mask (shape (batch_size, num_processed_cols)): Processed mask\n\n Returns:\n torch.Tensor (shape (variable_count,)): True where it's a query-able variable and we have at least one\n observed value\n \"\"\"\n cols_with_data = data_mask.sum(dim=0).to(torch.bool)\n\n # data_mask may have multiple columns for a single variable, if it's a categorical variable. Pick first entry per variable\n ii = torch.tensor([cols[0] for cols in self.processed_cols], dtype=torch.long, device=cols_with_data.device)\n cols_with_data = torch.index_select(cols_with_data, 0, ii)\n is_query_id = torch.zeros(len(self), dtype=torch.bool, device=cols_with_data.device)\n is_query_id[\n tuple(self.query_var_idxs),\n ] = True\n return is_query_id * cols_with_data\n\n def _deduplicate_names(self):\n # Produce warning if var name is reused and add an increasing integer to the end until it is unique.\n var_names = set()\n for var in self._all_variables:\n i = 2\n original_name = var.name\n while var.name in var_names:\n new_name = f\"{original_name}_{i}\"\n var.name = new_name\n i += 1\n if var.name != original_name:\n # Do the warning in a separate block to the while loop so that we only raise one warning if we have to\n # try appending several different integers to the name.\n warnings.warn(\n f\"Name {original_name} has already been used, renaming to {var.name}\",\n UserWarning,\n )\n var_names.add(var.name)\n\n # TODO: Maybe create Variables.Utils for methods like the below one\n @staticmethod\n def create_empty_data(variables: Variables) -> np.ndarray:\n var_count = len(variables)\n empty_data = np.zeros((1, var_count), dtype=object)\n for i in range(var_count):\n if variables[i].type_ == \"text\":\n empty_data[:, i] = \"empty str\"\n return empty_data" }, { "identifier": "IModelForCausalInference", "path": "src/causica/models/imodel.py", "snippet": "class IModelForCausalInference(IModel):\n @abstractmethod\n def cate(\n self,\n intervention_idxs: Union[torch.Tensor, np.ndarray],\n intervention_values: Union[torch.Tensor, np.ndarray],\n reference_values: Optional[np.ndarray] = None,\n effect_idxs: Optional[np.ndarray] = None,\n conditioning_idxs: Optional[Union[torch.Tensor, np.ndarray]] = None,\n conditioning_values: Optional[Union[torch.Tensor, np.ndarray]] = None,\n Nsamples_per_graph: int = 1,\n Ngraphs: Optional[int] = 1000,\n most_likely_graph: bool = False,\n fixed_seed: Optional[int] = None,\n ):\n \"\"\"\n Evaluate (optionally conditional) average treatment effect given the learnt causal model.\n \"\"\"\n raise NotImplementedError" }, { "identifier": "to_tensors", "path": "src/causica/utils/helper_functions.py", "snippet": "def to_tensors(\n *arrays: Union[torch.Tensor, np.ndarray], device: torch.device, dtype: torch.dtype = torch.float\n) -> Tuple[torch.Tensor, ...]:\n return tuple(torch.tensor(array, dtype=dtype, device=device) for array in arrays)" }, { "identifier": "save_json", "path": "src/causica/utils/io_utils.py", "snippet": "def save_json(data: Any, path: str) -> None:\n save(data, path, \".json\", partial(json.dump, indent=4, sort_keys=True))" }, { "identifier": "compute_dag_loss", "path": "src/causica/utils/nri_utils.py", "snippet": "def compute_dag_loss(vec, num_nodes):\n \"\"\"\n vec is a n*(n-1) array with the flattened adjacency matrix (without the diag).\n \"\"\"\n dev = vec.device\n adj_matrix = torch.zeros(num_nodes, num_nodes, device=dev)\n mask = (torch.ones(num_nodes, num_nodes, device=dev) - torch.eye(num_nodes, device=dev)).to(bool)\n adj_matrix[mask] = vec\n return torch.abs(torch.trace(torch.matrix_exp(adj_matrix * adj_matrix)) - num_nodes)" }, { "identifier": "get_feature_indices_per_node", "path": "src/causica/utils/nri_utils.py", "snippet": "def get_feature_indices_per_node(variables):\n \"\"\"\n Returns a list in which the i-th element is a list with the features indices that correspond to the i-th node.\n For each Variable in 'variables' argument, the node is specified through the group_name field.\n \"\"\"\n nodes = [v.group_name for v in variables]\n nodes_unique = sorted(set(nodes))\n if len(nodes_unique) == len(nodes):\n nodes_unique = nodes\n output = []\n for node in nodes_unique:\n output.append([i for (i, e) in enumerate(nodes) if e == node])\n return output, nodes_unique" }, { "identifier": "kl_categorical", "path": "src/causica/utils/nri_utils.py", "snippet": "def kl_categorical(preds, log_prior, num_atoms, eps=1e-16):\n \"\"\"\n preds: [num_sims, num_edges, num_edge_types]\n log_prior: [1, 1, num_edge_types]\n \"\"\"\n kl_div = preds * (torch.log(preds + eps) - log_prior)\n return kl_div.sum() / (num_atoms * preds.size(0))" }, { "identifier": "piecewise_linear", "path": "src/causica/utils/nri_utils.py", "snippet": "def piecewise_linear(x, start, width, max_val=1):\n \"\"\"\n Piecewise linear function whose value is:\n 0 if x<=start\n max_val if x>=start+width\n grows linearly from 0 to max_val if start<=x<=(start+width)\n It is used to define the coefficient of the DAG-loss in NRI-MV.\n \"\"\"\n return max_val * max(min((x - start) / width, 1), 0)" }, { "identifier": "get_input_and_scoring_masks", "path": "src/causica/utils/training_objectives.py", "snippet": "def get_input_and_scoring_masks(\n mask: torch.Tensor, *, max_p_train_dropout: float, score_imputation: bool, score_reconstruction: bool\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Apply random dropout to an unprocessed mask, and calculate output positions to be included in the training loss.\n\n Args:\n mask: unprocessed mask indicating which variables are available for training. 1 indicates observed, 0 indicates\n unobserved.\n max_p_train_dropout: max proportion of columns to mask in each row.\n score_imputation: If true, the scoring mask has 1.0 for entries that are present in the data but masked in the\n input to the model.\n score_reconstruction: if true, the scoring mask has 1.0 for entries that are unmasked in the model input.\n\n Returns:\n A tuple (input_mask, scoring_mask), where input_mask is is the unprocessed mask to be applied before passing\n data to the model for reconstruction/imputation. scoring_mask (also, unprocessed mask) indicates which entries\n in the output should be included when calculating negative log-likelihood loss.\n \"\"\"\n\n if max_p_train_dropout > 0:\n p_missing = torch.rand(mask.shape[0], 1) * max_p_train_dropout\n input_mask = mask * torch.bernoulli(1.0 - p_missing.expand_as(mask)).to(mask.dtype).to(mask.device)\n else:\n input_mask = mask\n if score_reconstruction:\n if score_imputation:\n # Score both reconstruction and imputation\n scoring_mask = mask\n else:\n # Only score reconstruction\n scoring_mask = input_mask\n else:\n # Only score imputation\n scoring_mask = mask - input_mask\n return input_mask, scoring_mask" }, { "identifier": "kl_divergence", "path": "src/causica/utils/training_objectives.py", "snippet": "def kl_divergence(\n z1: Tuple[torch.Tensor, torch.Tensor],\n z2: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,\n):\n mean1, logvar1 = z1\n\n if z2 is not None:\n mean2, logvar2 = z2\n else:\n mean2 = torch.zeros_like(mean1)\n logvar2 = torch.zeros_like(logvar1)\n\n sigma1 = logvar1.exp().sqrt()\n sigma2 = logvar2.exp().sqrt()\n\n normal1 = tdist.Normal(mean1, sigma1)\n normal2 = tdist.Normal(mean2, sigma2)\n\n kld = tdist.kl_divergence(normal1, normal2)\n kld = kld.sum(axis=1)\n return kld" }, { "identifier": "negative_log_likelihood", "path": "src/causica/utils/training_objectives.py", "snippet": "def negative_log_likelihood(\n data: torch.Tensor,\n decoder_mean: torch.Tensor,\n decoder_logvar: torch.Tensor,\n variables: Variables,\n alpha: float,\n mask: Optional[torch.Tensor] = None,\n sum_type: Optional[str] = \"all\",\n) -> torch.Tensor:\n \"\"\"\n This function computes the negative log likelihood for all features and sums them.\n\n Args:\n data: Input data, shape (batch_size, input_count).\n decoder_mean: Output from the decoder, shape (batch_size, output_count)\n decoder_logvar: Output from the decoder, shape (batch_size, output_count)\n variables: List of all variables\n alpha: Categorical likelihood coefficient in NLL calculation.\n mask: Mask for input data, shape (batch_size, input_count). 1 is present, 0 is missing. Set to all 1's if None.\n sum_type: How to sum result. None will return the entire array, 'cols' will sum per variable,'all' will sum all\n elements.\n Returns:\n nll: Negative log likelihood summed as per `sum_type`. torch.Tensor of shape (batch_size, num_vars)\n if `sum_type=='all'`, shape (1, num_vars) if `sum_type=='cols'` or a scalar if `sum_type is None`. Note that if\n the data contains categorical variables, then num_vars <= num_features, where num_features is the number of\n features in the input data, since these are encoded using a one-hot encoding which spans multiple columns.\n \"\"\"\n variables = variables.subset(list(range(0, variables.num_unprocessed_non_aux_cols)))\n assert sum_type in [None, \"cols\", \"all\"]\n if data.ndim != 2: # type: ignore\n raise ValueError(\"Data should have dims (batch_size, input_count)\")\n if decoder_logvar.ndim != 2 or decoder_mean.ndim != 2: # type: ignore\n raise ValueError(\"decoder_logvar and decoder_mean should each have dims (batch_size, output_count)\")\n\n batch_size = data.shape[0]\n num_vars = variables.num_unprocessed_cols\n if mask is None:\n mask = torch.ones_like(data)\n\n # Call unprocessed columns vars, processed columns idxs\n vars_by_type, idxs_by_type = (\n variables.unprocessed_cols_by_type,\n variables.processed_cols_by_type,\n )\n\n if sum_type is None:\n nlls = torch.zeros(batch_size, num_vars, device=data.device, dtype=data.dtype)\n else:\n nlls = torch.zeros(1, num_vars, device=data.device, dtype=data.dtype)\n\n def flatten(lists):\n \"\"\"\n Flatten idxs for continuous and binary vars, since they will be of form [[1], [2], ...]\n \"\"\"\n return [i for sublist in lists for i in sublist]\n\n # If returning columnwise/total sum, we sum the NLL for each var. Note if returning the total sum, we don't sum over\n # all elements of each type here, to make it easier to collect everything in a single nlls tensor.\n feature_sum_type = \"cols\" if sum_type is not None else None\n if \"continuous\" in vars_by_type:\n continuous_vars, continuous_idxs = (\n vars_by_type[\"continuous\"],\n flatten(idxs_by_type[\"continuous\"]),\n )\n continuous_idxs_nlls = gaussian_negative_log_likelihood(\n data[:, continuous_idxs],\n decoder_mean[:, continuous_idxs],\n decoder_logvar[:, continuous_idxs],\n mask[:, continuous_idxs],\n sum_type=feature_sum_type,\n )\n # Need to account for VAEM's overwrite_processed_dim hack\n # (i.e. continuous variables possible being of dimension>1)\n if all(len(idxs) == 1 for idxs in idxs_by_type[\"continuous\"]):\n # Optimized operation when all continuous variables are of dimension 1\n nlls[:, continuous_vars] = continuous_idxs_nlls\n else:\n # Slower, correct operation if there is continuous variable of dimension > 1\n if len(continuous_idxs_nlls.shape) == 1:\n continuous_idxs_nlls = continuous_idxs_nlls.unsqueeze(dim=0)\n current_idx = 0\n for var, idxs in zip(continuous_vars, idxs_by_type[\"continuous\"]):\n var_idxs = range(current_idx, current_idx + len(idxs))\n nlls[:, var] = continuous_idxs_nlls[:, var_idxs].sum(dim=1)\n current_idx += len(idxs_by_type[\"continuous\"][-1])\n if \"binary\" in vars_by_type:\n binary_vars, binary_idxs = (\n vars_by_type[\"binary\"],\n flatten(idxs_by_type[\"binary\"]),\n )\n nlls[:, binary_vars] = bernoulli_negative_log_likelihood(\n data[:, binary_idxs],\n decoder_mean[:, binary_idxs],\n mask[:, binary_idxs],\n sum_type=feature_sum_type,\n )\n if \"categorical\" in vars_by_type:\n categorical_vars, categorical_idxs = (\n vars_by_type[\"categorical\"],\n idxs_by_type[\"categorical\"],\n )\n for var, idxs in zip(categorical_vars, categorical_idxs):\n # Have to compute NLL for each categorical variable separately due to different numbers of categories\n nlls[:, var] = alpha * categorical_negative_log_likelihood(\n data[:, idxs],\n decoder_mean[:, idxs],\n mask[:, idxs],\n sum_type=feature_sum_type,\n )\n # Now sum everything if returning total sum.\n if sum_type == \"all\":\n nlls = nlls.sum()\n\n return nlls" }, { "identifier": "PVAEBaseModel", "path": "src/causica/models/pvae_base_model.py", "snippet": "class PVAEBaseModel(TorchModel, IModelForObjective):\n \"\"\"\n Abstract model class.\n\n To instantiate this class, these functions need to be implemented:\n _train: Run the training loop for the model.\n _impute: Fill in any missing values for test data.\n _reconstruct: Reconstruct data by passing them through the VAE\n name: Name of model implementation.\n \"\"\"\n\n def __init__(self, model_id: str, variables: Variables, save_dir: str, device: torch.device) -> None:\n \"\"\"\n Args:\n model_id: Unique model ID for referencing this model instance.\n variables: Information about variables/features used by this model.\n save_dir: Location to save any information about this model, including training data.\n It will be created if it doesn't exist.\n device: Name of Torch device to create the model on. Valid options are 'cpu', 'gpu', or a device ID\n (e.g. 0 or 1 on a two-GPU machine).\n \"\"\"\n super().__init__(model_id, variables, save_dir, device)\n self._alpha = 1.0 # The default value for the categorical likelihood coefficient.\n\n @staticmethod\n def _split_vamp_prior_config(training_config: Dict[str, Any]) -> Tuple[dict, dict]:\n # Split training config into (training_config, vamp_prior_config)\n training_config = training_config.copy()\n vamp_prior_config = {\"save_vamp_prior\": training_config.pop(\"save_vamp_prior\")}\n for k in [\"vamp_prior_reward_samples\", \"vamp_prior_inducing_points\"]:\n vamp_prior_config.update({k: training_config.pop(k, None)})\n return training_config, vamp_prior_config\n\n def _save_vamp_prior(\n self,\n processed_dataset: Union[Dataset, SparseDataset],\n save_vamp_prior: bool,\n vamp_prior_inducing_points: Optional[int] = None,\n vamp_prior_reward_samples: Optional[int] = None,\n ) -> None:\n if not save_vamp_prior:\n return\n assert vamp_prior_inducing_points is not None\n assert vamp_prior_reward_samples is not None\n train_data, train_mask = processed_dataset.train_data_and_mask\n vamp_prior_data = sample_inducing_points(train_data, train_mask, row_count=vamp_prior_inducing_points)\n vamp_prior_data = cast(Union[Tuple[np.ndarray, np.ndarray], Tuple[csr_matrix, csr_matrix]], vamp_prior_data)\n EDDIObjective.calc_and_save_vamp_prior_info_gain(self, vamp_prior_data, sample_count=vamp_prior_reward_samples)\n\n def run_train(\n self,\n dataset: Union[Dataset, SparseDataset],\n train_config_dict: Optional[Dict[str, Any]] = None,\n report_progress_callback: Optional[Callable[[str, int, int], None]] = None,\n ) -> None:\n\n \"\"\"\n Train the model.\n Training results will be saved.\n\n Args:\n dataset: Dataset object with data and masks in unprocessed form.\n train_config_dict (dictionary): Any other parameters needed by a specific concrete class. Of\n the form {arg_name: arg_value}. e.g. {\"learning_rate\": 1e-3, \"epochs\": 100}\n report_progress_callback: Function to report model progress for API.\n \"\"\"\n if train_config_dict is None:\n train_config_dict = {}\n train_config_dict, vamp_prior_config = self._split_vamp_prior_config(train_config_dict)\n processed_dataset = self.data_processor.process_dataset(dataset)\n self._train(\n dataset=processed_dataset,\n report_progress_callback=report_progress_callback,\n **train_config_dict,\n )\n self._save_vamp_prior(processed_dataset, **vamp_prior_config)\n\n @abstractmethod\n def _train(self, *args, **kwargs):\n pass\n\n def impute(self, data, mask, impute_config_dict=None, *, vamp_prior_data=None, average=True):\n if vamp_prior_data is None:\n return impute(self, data, mask, impute_config_dict=impute_config_dict, average=average)\n else:\n processed_vamp_data_array = self.data_processor.process_data_and_masks(*vamp_prior_data)\n # Keep processed VampPrior data on CPU until we sample inducing points, as this data can be large and is\n # not required for any CUDA computations.\n return impute(\n self,\n data,\n mask,\n impute_config_dict=impute_config_dict,\n average=average,\n vamp_prior_data=to_tensors(*processed_vamp_data_array, device=torch.device(\"cpu\")),\n )\n\n def impute_processed_batch(\n self: PVAEBaseModel,\n data: torch.Tensor,\n mask: torch.Tensor,\n *,\n sample_count: int,\n preserve_data: bool = True,\n vamp_prior_data: Optional[Tuple[torch.Tensor, torch.Tensor]] = None,\n **kwargs,\n ) -> torch.Tensor:\n \"\"\"\n Fill in unobserved variables in a minibatch of data using a trained model. Optionally, use a vamp prior to\n impute empty rows, and optionally replace imputed values with input values for observed features.\n\n Assumes data is a torch.Tensor and in processed form (i.e. variables will be in their squashed ranges,\n and categorical variables will be in one-hot form).\n\n Args:\n data (shape (batch_size, input_dim)): Data to be used to train the model, in processed form.\n mask (shape (batch_size, input_dim)): Data observation mask, where observed values are 1 and unobserved\n values are 0.\n sample_count: Number of imputation samples to generate.\n vamp_prior_data (Tuple of (torch tensor, torch tensor)): Data to be used to fill variables if using the VAMP\n prior method. Format: (data, mask). This defaults to None, in which case the VAMP Prior method will not\n be used.\n preserve_data (bool): Whether or not to impute data already present. Defaults to True, which keeps data\n present in input.\n\n Returns:\n imputations (torch.Tensor of shape (sample_count, batch_size, output_dim)): Input data with missing values\n filled in.\n \"\"\"\n if not isinstance(data, torch.Tensor) or not isinstance(mask, torch.Tensor):\n raise ValueError(\"data and mask should be tensors. To work on ndarrays, use impute\")\n assert data.shape == mask.shape\n assert data.shape[1] == self.input_dim\n batch_size, num_features = data.shape\n if self.variables.has_auxiliary:\n num_features = self.variables.num_processed_non_aux_cols\n\n imputations = torch.full((sample_count, batch_size, num_features), np.nan, device=self.device)\n\n # vamp_rows are rows where input is completely unobserved\n vamp_rows = torch.where(mask.sum(dim=1) == 0)[0]\n if vamp_prior_data is not None and vamp_rows.numel() > 0:\n imputed_from_vamp = self._impute_from_vamp_prior(sample_count * vamp_rows.numel(), vamp_prior_data)\n imputed_from_vamp = imputed_from_vamp.reshape(sample_count, vamp_rows.numel(), -1)\n imputations[:, vamp_rows, :] = imputed_from_vamp\n\n not_vamp_rows = torch.where(mask.sum(dim=1) != 0)[0]\n\n else:\n not_vamp_rows = torch.arange(batch_size)\n\n if len(not_vamp_rows) > 0:\n not_vamp_data = data[not_vamp_rows]\n not_vamp_mask = mask[not_vamp_rows]\n imputed_not_vamp_data = self._reconstruct_and_reshape(\n not_vamp_data, not_vamp_mask, sample_count=sample_count, **kwargs\n )\n imputations[:, not_vamp_rows, :] = imputed_not_vamp_data\n\n if preserve_data:\n imputations = restore_preserved_values(self.variables, data, imputations, mask)\n return imputations\n\n def get_model_pll(\n self: PVAEBaseModel,\n data: np.ndarray,\n feature_mask: np.ndarray,\n target_idx,\n sample_count: int = 50,\n ):\n \"\"\"\n Computes the predictive log-likelihood of the target-data given the feature_mask-masked data as input.\n\n Args:\n data (Numpy array of shape (batch_size, feature_count)): Data in unprocessed form to be used to\n compute the pll.\n feature_mask (Numpy array of shape (batch_size, feature_count)): Mask indicating conditioning\n variables for computing the predictive log-likelihood.\n target_idx (int): Column index of target variable for compute the likelihood of.\n sample_count (int): Number of Monte Carlo samples to use from the latent space. Defaults to 50.\n\n Returns:\n predictive_ll (float): Mean predictive log-likelihood (mean taken over batch dim in data).\n\n \"\"\"\n # Process input data\n (\n proc_feature_data_array,\n proc_feature_mask_array,\n ) = self.data_processor.process_data_and_masks(data, feature_mask)\n proc_feature_data, proc_feature_mask = to_tensors(\n proc_feature_data_array, proc_feature_mask_array, device=self.device\n )\n\n # Create target_mask from target_index\n target_mask = np.zeros_like(data, dtype=bool)\n target_mask[:, target_idx] = 1\n\n # Process target data\n (\n proc_target_data_array,\n proc_target_mask_array,\n ) = self.data_processor.process_data_and_masks(data, target_mask)\n proc_target_data, proc_target_mask = to_tensors(\n proc_target_data_array, proc_target_mask_array, device=self.device\n )\n\n # Expand target data and mask to be shape (sample_count, batch_size, feature_count)\n proc_target_data = proc_target_data.expand(sample_count, *proc_target_data.shape)\n proc_target_mask = proc_target_mask.expand(sample_count, *proc_target_mask.shape)\n\n # Compute PVAE outputs given input features (parameters of the Gaussian mixture)\n (dec_mean, dec_logvar), _, _ = self.reconstruct(proc_feature_data, proc_feature_mask, count=sample_count)\n\n # Compute Gaussian negative log-likelihood per sample in sample_count\n gnll = gaussian_negative_log_likelihood(\n proc_target_data, dec_mean, dec_logvar, mask=proc_target_mask, sum_type=None\n )\n gnll = gnll[:, :, target_idx]\n predictive_ll = -gnll\n predictive_ll = torch.logsumexp(predictive_ll, dim=0) - np.log(sample_count)\n predictive_ll = predictive_ll.mean()\n\n return predictive_ll\n\n def get_marginal_log_likelihood(\n self,\n impute_config: Dict[str, int],\n data: Union[np.ndarray, csr_matrix],\n observed_mask: Optional[Union[np.ndarray, csr_matrix]] = None,\n target_mask: Optional[Union[np.ndarray, csr_matrix]] = None,\n evaluate_imputation: Optional[bool] = False,\n num_importance_samples: int = 5000,\n **kwargs,\n ) -> float:\n \"\"\"\n Estimate marginal log-likelihood of the data using importance sampling:\n - Imputation MLL -> imputed data given the observed data log p(x_u|x_o) if evaluate_imputation is True\n - Reconstruction MLL -> all data log p(x) otherwise\n\n Args:\n impute_config: Dictionary containing options for inference.\n data: Data in unprocessed form to be used with shape (num_rows, input_dim).\n mask: If not None, mask indicating observed variables with shape (num_rows, input_dim). 1 is observed,\n 0 is un-observed. If None everything is marked as observed.\n target_mask: Values masked during imputation to use as prediction targets, where 1 is a target, 0 is not.\n If None, nothing is marked as an imputation target.\n evaluate_imputation: Whether to estimate Imputation MLL log p(x_u|x_o) or Reconstruction MLL log p(x).\n num_importance_samples: The number of importance samples to be taken.\n **kwargs: Extra keyword arguments required by reconstruct.\n Returns:\n marginal_log_likelihood: The estimated marginal log likelihood averaged across data points.\n \"\"\"\n # TODO(17895): Add Generation MLL option to the marginal log-likelihood metric.\n\n batch_size = impute_config[\"batch_size\"]\n\n # Assumed to only work on dense arrays for now\n if issparse(data):\n data = cast(csr_matrix, data)\n data = data.toarray()\n if issparse(observed_mask):\n observed_mask = cast(csr_matrix, observed_mask)\n observed_mask = observed_mask.toarray()\n if issparse(target_mask):\n target_mask = cast(csr_matrix, target_mask)\n target_mask = target_mask.toarray()\n if observed_mask is None:\n observed_mask = np.ones_like(data, dtype=bool)\n if target_mask is None:\n assert not evaluate_imputation\n target_mask = np.zeros_like(data, dtype=bool)\n assert data.shape == observed_mask.shape\n assert data.shape == target_mask.shape\n\n num_rows, _ = data.shape\n\n # TODO(17896): Add processing and batching of extra data objects\n processed_data, processed_obs_mask, processed_target_mask = self.data_processor.process_data_and_masks(\n data, observed_mask, target_mask\n )\n marginal_log_likelihood = np.empty((num_rows,), dtype=processed_data.dtype)\n\n with torch.no_grad():\n dataloader = create_dataloader(\n processed_data,\n processed_obs_mask,\n processed_target_mask,\n batch_size=batch_size,\n sample_randomly=False,\n )\n\n for idx, (processed_data_batch, processed_obs_mask_batch, processed_target_mask_batch) in enumerate(\n tqdm(dataloader)\n ):\n processed_data_batch = processed_data_batch.to(self.device)\n processed_obs_mask_batch = processed_obs_mask_batch.to(self.device)\n processed_target_mask_batch = processed_target_mask_batch.to(self.device)\n\n log_importance_weights = self._get_log_importance_weights(\n processed_data_batch,\n processed_obs_mask_batch,\n processed_target_mask_batch,\n evaluate_imputation=cast(bool, evaluate_imputation),\n num_importance_samples=num_importance_samples,\n **kwargs,\n ) # Shape (num_importance_samples, batch_size)\n average_factor = torch.log(torch.tensor(num_importance_samples, dtype=torch.float))\n marginal_log_likelihood_batch = (\n torch.logsumexp(log_importance_weights, dim=0) - average_factor\n ) # Shape (batch_size,)\n\n idx_start = idx * batch_size\n idx_end = min((idx + 1) * batch_size, num_rows)\n marginal_log_likelihood[idx_start:idx_end] = marginal_log_likelihood_batch.cpu().numpy()\n\n return marginal_log_likelihood.sum().item() / num_rows\n\n @abstractmethod\n def reconstruct(\n self, data: torch.Tensor, mask: Optional[torch.Tensor], sample: bool = True, count: int = 1, **kwargs: Any\n ) -> Tuple[Tuple[torch.Tensor, torch.Tensor], torch.Tensor, Tuple[torch.Tensor, torch.Tensor],]:\n \"\"\"\n Reconstruct data by passing them through the VAE.\n Note that mask=None should always be used in subclasses that don's support missing values.\n\n Args:\n data: Input data with shape (batch_size, input_dim).\n mask: If not None, mask indicating observed variables with shape (batch_size, input_dim). 1 is observed,\n 0 is un-observed.\n sample: If True, samples the latent variables, otherwise uses the mean.\n count: Number of samples to reconstruct.\n **kwargs: Extra keyword arguments required.\n\n Returns:\n (decoder_mean, decoder_logvar): Reconstucted variables, output from the decoder. Both are shape (count, batch_size, output_dim). Count dim is removed if 1.\n samples: Latent variable used to create reconstruction (input to the decoder). Shape (count, batch_size, latent_dim). Count dim is removed if 1.\n (encoder_mean, encoder_logvar): Output of the encoder. Both are shape (batch_size, latent_dim)\n \"\"\"\n raise NotImplementedError()\n\n def validate_loss_config(self, loss_config: LossConfig) -> None:\n assert loss_config.score_imputation is not None and loss_config.score_reconstruction is not None\n assert loss_config.score_reconstruction or loss_config.score_imputation\n assert loss_config.max_p_train_dropout is not None\n\n def _impute_from_vamp_prior(\n self, num_samples: int, vamp_prior_data: Tuple[torch.Tensor, torch.Tensor]\n ) -> torch.Tensor:\n vp_data, vp_mask = vamp_prior_data\n assert vp_data.shape == vp_mask.shape\n assert vp_data.shape[1] == self.variables.num_processed_cols\n # Sample inducing points for all rows, shape (sample_count * num_vamp_rows, input_dim)\n inducing_data, inducing_mask = sample_inducing_points(vp_data, vp_mask, num_samples)\n # Only move to GPU once we have sampled the inducing points as these tensors are much smaller.\n inducing_data, inducing_mask = (\n inducing_data.to(self.device),\n inducing_mask.to(self.device),\n )\n # Shape (1, num_samples, output_dim)\n return self._reconstruct_and_reshape(inducing_data, inducing_mask, sample_count=1)\n\n def _reconstruct_and_reshape(\n self, data: torch.Tensor, mask: Optional[torch.Tensor], sample_count: int, **kwargs\n ) -> torch.Tensor:\n \"\"\"\n Make sample_count imputations of missing data for given data and mask.\n\n Args:\n data: partially observed data with shape (batch_size, input_dim).\n mask: mask indicating observed variables with shape (batch_size, input_dim). 1 is observed, 0 is\n un-observed.\n If None, will be set to all 1's.\n sample_count: Number of samples to take.\n\n Returns:\n imputations: PyTorch Tensor with shape: (sample_count, batch_size, input_dim)\n \"\"\"\n if mask is None:\n mask = torch.ones_like(data)\n assert data.dim() == 2\n assert mask.shape == data.shape\n assert data.shape[1] == self.variables.num_processed_cols\n (imputations, _), _, _ = self.reconstruct(data=data, mask=mask, sample=True, count=sample_count, **kwargs)\n if self.variables.has_auxiliary:\n data = data[:, 0 : self.variables.num_processed_non_aux_cols]\n return imputations.reshape(sample_count, *data.shape)\n\n def _get_log_importance_weights(\n self,\n data: torch.Tensor,\n observed_mask: torch.Tensor,\n target_mask: torch.Tensor,\n evaluate_imputation: bool,\n num_importance_samples: int,\n **kwargs,\n ) -> torch.Tensor:\n \"\"\"\n Generate a set of log importance weights/samples to estimate marginal log-likelihood.\n Collect samples from z ~ q(z|x) to estimate:\n - Imputation MLL -> return log [p(x_u|z) q(z|x_o) / q(z|x)] if evaluate_imputation is True\n - Reconstruction MLL -> return log [p(x|z) p(z) / q(z|x)] otherwise\n\n This function assumes that latent prior distribution is standard Normal p(z) ~ N(0, 1).\n\n Args:\n data: Data to be used with shape (batch_size, input_dim).\n mask: Mask indicating observed values in data with shape (batch_size, input_dim). 1 is observed,\n 0 is un-observed.\n target_mask: Values marked as prediction targets during imputation, where 1 is a target and 0 is not.\n evaluate_imputation: Whether to collect samples for Imputation MLL log p(x_u|x_o) or Reconstruction MLL log p(x).\n num_importance_samples: The number of importance samples to be taken.\n **kwargs: Extra keyword arguments required by reconstruct.\n\n Returns:\n log_importance_weights: Log of importance weights with shape (num_importance_samples, batch_size).\n \"\"\"\n assert observed_mask is not None\n assert target_mask is not None\n assert data.shape == observed_mask.shape\n assert data.shape == target_mask.shape\n\n data_non_aux = data[:, 0 : self.variables.num_processed_non_aux_cols]\n num_non_aux_vars = self.variables.num_unprocessed_non_aux_cols\n batch_size, _ = data.shape\n\n # Collect samples\n (dec_mean, dec_logvar), latent_samples, (enc_mean, enc_logvar) = self.reconstruct(\n data=data, mask=observed_mask, sample=True, count=num_importance_samples, **kwargs\n )\n latent_samples = latent_samples.reshape(num_importance_samples, batch_size, -1)\n\n # Calculate nll i.e. -log[p(x_u|z)] or -log[p(x|z)]\n if evaluate_imputation:\n mask_nll = target_mask\n else:\n mask_nll = observed_mask\n\n nll = negative_log_likelihood(\n data=data_non_aux.repeat(num_importance_samples, 1),\n decoder_mean=dec_mean.reshape(\n num_importance_samples * batch_size, self.variables.num_processed_non_aux_cols\n ),\n decoder_logvar=dec_logvar.reshape(\n num_importance_samples * batch_size, self.variables.num_processed_non_aux_cols\n ),\n variables=self.variables,\n alpha=self._alpha,\n mask=mask_nll.repeat(num_importance_samples, 1),\n sum_type=None,\n ) # Shape (num_importance_samples * batch_size, num_non_aux_vars)\n nll = nll.reshape(\n num_importance_samples, batch_size, num_non_aux_vars\n ) # Shape (num_importance_samples, batch_size, num_non_aux_vars\n nll = nll.sum(dim=2) # Shape (num_importance_samples, batch_size)\n\n # Calculate log latent variational log[q(z|x)]\n log_latent_variational = (-1) * gaussian_negative_log_likelihood(\n targets=latent_samples, mean=enc_mean, logvar=enc_logvar, mask=None, sum_type=None\n ) # Shape (num_importance_samples, batch_size, latent_dim)\n log_latent_variational = log_latent_variational.sum(axis=2) # Shape (num_importance_samples, batch_size)\n\n # Calculate log latent prior log[q(z|x_o)] or log[p(z)]\n if evaluate_imputation:\n (_, _), _, (latent_prior_mean, latent_prior_logvar) = self.reconstruct(\n data=data, mask=observed_mask, sample=False, count=1, **kwargs\n )\n else:\n latent_prior_mean = torch.tensor(0.0)\n latent_prior_logvar = torch.log(torch.tensor(1.0))\n\n log_latent_prior = (-1) * gaussian_negative_log_likelihood(\n targets=latent_samples,\n mean=latent_prior_mean,\n logvar=latent_prior_logvar,\n mask=None,\n sum_type=None,\n ) # Shape (num_importance_samples, batch_size, latent_dim)\n log_latent_prior = log_latent_prior.sum(axis=2) # Shape (num_importance_samples, batch_size)\n\n # Calculate log importance weights\n log_importance_weights = (\n (-1) * nll + log_latent_prior - log_latent_variational\n ) # Shape (num_importance_samples, batch_size)\n return log_importance_weights" } ]
import json import math import os import warnings import numpy as np # type: ignore import torch import torch.distributions as tdist import torch.nn.functional as F from typing import Callable, Dict, List, Optional, Tuple from torch import nn from torch.utils.data import DataLoader, TensorDataset from ..datasets.dataset import Dataset from ..datasets.variables import Variables from ..models.imodel import IModelForCausalInference from ..utils.helper_functions import to_tensors from ..utils.io_utils import save_json from ..utils.nri_utils import compute_dag_loss, get_feature_indices_per_node, kl_categorical, piecewise_linear from ..utils.training_objectives import get_input_and_scoring_masks, kl_divergence, negative_log_likelihood from .pvae_base_model import PVAEBaseModel
18,661
# 1. filling the missing values before applying the GNN-based VAE, # 2. processing the output of the GNN-based VAE (i.e. use torch.sigmoid in the binary case) types = np.array([v.type_ for v in self.variables._variables]) if (types == "binary").all(): self.var_types = "binary" elif (types == "continuous").all(): self.var_types = "continuous" elif (types == "categorical").all(): self.var_types = "categorical" else: raise ValueError("Right now all the variables need to have the same type") def _train( # type: ignore self, dataset: Dataset, report_progress_callback: Optional[Callable[[str, int, int], None]], learning_rate: float, batch_size: int, epochs: int, max_p_train_dropout: float, use_dag_loss: bool, output_variance: float, hard: bool, two_steps: bool, lambda_nll: float, lambda_kl_z: float, lambda_kl_A: float, lambda_dagloss: float, sample_count: int, ) -> Dict[str, List[float]]: """ Train the model using the given data. Args: dataset: Dataset with data and masks in processed form. train_output_dir: Path to save any training information to. report_progress_callback: Function to report model progress for API. learning_rate: Learning rate for Adam optimiser. batch_size: Size of minibatches to use. epochs: Number of epochs to train for. max_p_train_dropout: Maximum fraction of extra training features to drop for each row. 0 is none, 1 is all. use_dag_loss: Whether to use the DAG loss regularisation. output_variance: The variance for the output of the GNN based VAE. hard: Whether to use hard or soft samples for the distribution over edges (if hard=True, the edge weights are just 0/1). two_steps: Whether to use the two-step variant of VISL. That is, the first half of training uses only the forward MLP and the second half fixes the distribution over edges and only optimizes the forward and backward MLPs. lambda_nll: Lambda coefficient for the ELBO term negative-log-likelihood lambda_kl_z: Lambda coefficient for the ELBO term lambda*KL(q(z|x) || p(z)) lambda_kl_A: Lambda coefficient for the ELBO term lambda*KL(q(A) || p(A)) lambda_dagloss: Lambda coefficient for the dagloss term of the ELBO. sample_count: Number of samples to reconstruct. Returns: train_results (dictionary): training_loss, KL divergence, NLL, dag_loss, training_loss_complete """ # Put PyTorch into train mode. self.train() # Setting the hard attribute which will be used for training and testing self.hard = hard # Loading data and mask, creating results_dict data, mask = dataset.train_data_and_mask results_dict: Dict[str, List] = { metric: [] for metric in [ "training_loss", "kl_z_term", "kl_A_term", "nll_term", "dag_loss_term", "training_loss_complete", ] } # Creating the optimizer # If two_steps is True, we create a different optimizer for the second half. This optimizer does not optimize over the adjacency matrix. optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate) if two_steps: named_parameters = list(self.named_parameters()) all_but_adj_matrix = [] for a in named_parameters: if a[0] != "Z_edges_logits": all_but_adj_matrix.append(a[1]) optimizer_second_half = torch.optim.Adam(all_but_adj_matrix, lr=learning_rate) # Creating the dataloader tensor_dataset = TensorDataset(*to_tensors(data, mask, device=self.device)) dataloader = DataLoader(tensor_dataset, batch_size=batch_size, shuffle=True) # If DAG loss is used, it appears after 'epochs_without_dagloss' epochs and its coefficient (lambda) grows linearly # during 10% of the total number of epochs until 1. This scheme is used for lambda because of empirical # reasons (DAG loss might take over the training if it is used with coefficient 1.0 from the beginning). if use_dag_loss: epochs_without_dagloss = 5 if (epochs_without_dagloss + 0.1 * epochs) > epochs: warnings.warn("max lambda will not be achieved") best_train_loss = np.nan for epoch in range(epochs): training_loss_full = 0.0 nll_term_full = 0.0 kl_z_term_full = 0.0 kl_A_term_full = 0.0 dag_loss_term_full = 0.0 training_loss_complete_full = 0.0 # Set the optimizer_to_use depending on whether we are using the two-steps variant or not. if not two_steps: optimizer_to_use = optimizer only_forward = False elif epoch < epochs // 2: optimizer_to_use = optimizer only_forward = True else: optimizer_to_use = optimizer_second_half only_forward = False for x, mask_train_batch in dataloader: # Drop additional values (same procedure as PVAE)
# This is required in python 3 to allow return types of the same class. from __future__ import annotations class VISL(PVAEBaseModel, IModelForCausalInference): """ Subclass of `models.pvae_base_model.PVAEBaseModel` representing the algorithm VISL (missing value imputation with causal discovery). Requires file <data_dir>/<dataset_name>/adj_matrix.csv to evaluate causal discovery against ground truth. """ def __init__( self, model_id: str, variables: Variables, save_dir: str, device: torch.device, gnn_iters: int, shared_init_and_final_mappings: bool, embedding_dim: int, init_prob: float, simpler: str = None, **_, ): """ Args: model_id: Unique model ID for referencing this model instance. variables: Information about variables/features used by this model. save_dir: Location to save any information about this model, including training data. device: Device to load model to. gnn_iters: Number of message passing iterations for the GNN. shared_init_and_final_mappings: Whether all the nodes should use the same MLPs for the initial and final mappings. embedding_dim: Dimensionality of the nodes embedding. init_prob: Initial probability of having edge. simpler: Choose what MLP should be simpler (options are 'forward', 'backward', or None). Specifically, 'simpler' means to divide by 10 the dimensionality of the hidden layer of the corresponding MLP (with a minimum of 10 units). """ super().__init__(model_id, variables, save_dir, device) # Define some useful attributes feature_indices_per_node, ordered_nodes = get_feature_indices_per_node(variables) with open(os.path.join(self.save_dir, "ordered_nodes.json"), "w", encoding="utf-8") as f: json.dump(ordered_nodes, f, indent=4) self.num_nodes = len(feature_indices_per_node) self.num_edges = self.num_nodes * (self.num_nodes - 1) self.input_dim = variables.num_processed_cols # Define and initialize Z_edges # The learnable parameter is Z_edges_logits. Z_edges is F.softmax(Z_edges_logits, dim=1). self.Z_edges_logits = torch.nn.Parameter( torch.stack( [ torch.full([self.num_edges], math.log(1 - init_prob)), torch.full([self.num_edges], math.log(init_prob)), ], dim=1, ).to(device) ) # Define the GNN-based VAE self.gnn_vae = GNN_based_VAE( embedding_dim=embedding_dim, skip_first=True, device=device, n_iters=gnn_iters, num_nodes=self.num_nodes, shared_init_and_final_mappings=shared_init_and_final_mappings, simpler=simpler, feature_indices_per_node=feature_indices_per_node, ) # Create rel_rec and rel_send, which codify the receiving and sending node for each edge # Shape of rel_rec and rel_send: [num_edges, num_nodes] # The second dimension is a one-hot encoding of the receiver or sender node off_diag = np.ones([self.num_nodes, self.num_nodes]) - np.eye(self.num_nodes) rel_rec = F.one_hot(torch.tensor(np.where(off_diag)[0], dtype=torch.long)) rel_send = F.one_hot(torch.tensor(np.where(off_diag)[1], dtype=torch.long)) self.rel_rec = rel_rec.type(torch.float).to(device) self.rel_send = rel_send.type(torch.float).to(device) # Define the prior over edge types (favors sparse graphs) self.log_prior = torch.log( torch.tensor([0.95, 0.05], device=device) ) # The no-edge type is the first one (recall the skip_first argument of GNN_based_VAE __init__) # Save type of variables. Used in reconstruct method for # 1. filling the missing values before applying the GNN-based VAE, # 2. processing the output of the GNN-based VAE (i.e. use torch.sigmoid in the binary case) types = np.array([v.type_ for v in self.variables._variables]) if (types == "binary").all(): self.var_types = "binary" elif (types == "continuous").all(): self.var_types = "continuous" elif (types == "categorical").all(): self.var_types = "categorical" else: raise ValueError("Right now all the variables need to have the same type") def _train( # type: ignore self, dataset: Dataset, report_progress_callback: Optional[Callable[[str, int, int], None]], learning_rate: float, batch_size: int, epochs: int, max_p_train_dropout: float, use_dag_loss: bool, output_variance: float, hard: bool, two_steps: bool, lambda_nll: float, lambda_kl_z: float, lambda_kl_A: float, lambda_dagloss: float, sample_count: int, ) -> Dict[str, List[float]]: """ Train the model using the given data. Args: dataset: Dataset with data and masks in processed form. train_output_dir: Path to save any training information to. report_progress_callback: Function to report model progress for API. learning_rate: Learning rate for Adam optimiser. batch_size: Size of minibatches to use. epochs: Number of epochs to train for. max_p_train_dropout: Maximum fraction of extra training features to drop for each row. 0 is none, 1 is all. use_dag_loss: Whether to use the DAG loss regularisation. output_variance: The variance for the output of the GNN based VAE. hard: Whether to use hard or soft samples for the distribution over edges (if hard=True, the edge weights are just 0/1). two_steps: Whether to use the two-step variant of VISL. That is, the first half of training uses only the forward MLP and the second half fixes the distribution over edges and only optimizes the forward and backward MLPs. lambda_nll: Lambda coefficient for the ELBO term negative-log-likelihood lambda_kl_z: Lambda coefficient for the ELBO term lambda*KL(q(z|x) || p(z)) lambda_kl_A: Lambda coefficient for the ELBO term lambda*KL(q(A) || p(A)) lambda_dagloss: Lambda coefficient for the dagloss term of the ELBO. sample_count: Number of samples to reconstruct. Returns: train_results (dictionary): training_loss, KL divergence, NLL, dag_loss, training_loss_complete """ # Put PyTorch into train mode. self.train() # Setting the hard attribute which will be used for training and testing self.hard = hard # Loading data and mask, creating results_dict data, mask = dataset.train_data_and_mask results_dict: Dict[str, List] = { metric: [] for metric in [ "training_loss", "kl_z_term", "kl_A_term", "nll_term", "dag_loss_term", "training_loss_complete", ] } # Creating the optimizer # If two_steps is True, we create a different optimizer for the second half. This optimizer does not optimize over the adjacency matrix. optimizer = torch.optim.Adam(self.parameters(), lr=learning_rate) if two_steps: named_parameters = list(self.named_parameters()) all_but_adj_matrix = [] for a in named_parameters: if a[0] != "Z_edges_logits": all_but_adj_matrix.append(a[1]) optimizer_second_half = torch.optim.Adam(all_but_adj_matrix, lr=learning_rate) # Creating the dataloader tensor_dataset = TensorDataset(*to_tensors(data, mask, device=self.device)) dataloader = DataLoader(tensor_dataset, batch_size=batch_size, shuffle=True) # If DAG loss is used, it appears after 'epochs_without_dagloss' epochs and its coefficient (lambda) grows linearly # during 10% of the total number of epochs until 1. This scheme is used for lambda because of empirical # reasons (DAG loss might take over the training if it is used with coefficient 1.0 from the beginning). if use_dag_loss: epochs_without_dagloss = 5 if (epochs_without_dagloss + 0.1 * epochs) > epochs: warnings.warn("max lambda will not be achieved") best_train_loss = np.nan for epoch in range(epochs): training_loss_full = 0.0 nll_term_full = 0.0 kl_z_term_full = 0.0 kl_A_term_full = 0.0 dag_loss_term_full = 0.0 training_loss_complete_full = 0.0 # Set the optimizer_to_use depending on whether we are using the two-steps variant or not. if not two_steps: optimizer_to_use = optimizer only_forward = False elif epoch < epochs // 2: optimizer_to_use = optimizer only_forward = True else: optimizer_to_use = optimizer_second_half only_forward = False for x, mask_train_batch in dataloader: # Drop additional values (same procedure as PVAE)
input_mask, scoring_mask = get_input_and_scoring_masks(
9
2023-11-21 12:55:08+00:00
24k
KU-Leuven-Geomatics/geomapi
geomapi/nodes/panonode.py
[ { "identifier": "Node", "path": "geomapi/nodes/node.py", "snippet": "class Node:\n def __init__(self, subject: URIRef = None,\n graph: Graph = None,\n graphPath: str = None, \n name:str=None,\n path:str=None,\n timestamp:str=None,\n resource=None,\n cartesianTransform:np.ndarray=None,\n **kwargs):\n \"\"\"Creates a Node from one or more of the following inputs. \\n\n\n Args:\n 1.graph (Graph, optional): An RDF Graph to parse.\\n\n 2.graphPath (str, optional): The path of an RDF Graph to parse. If no subject is provided, the first subject of the graph is retained. \\n\n 3.path(str,optional): A filepath to a resource. \\n\n 4.subject (URIRef, optional): A subject to use as identifier for the Node. If a graph is also present, the subject should be part of the graph.\\n\n 5.name (str, optional): A name of the Node. This is not a unique identifier but serves as non-functional description.\\n\n\n Returns:\n Node\n \"\"\"\n #private attributes \n self._subject=None\n self._graph=None\n self._graphPath=None \n self._path=None\n self._name=None\n self._timestamp=None \n self._resource=None \n self._cartesianTransform=None\n\n #instance variables\n \n self.subject=subject\n self.graphPath=graphPath\n self.graph=graph\n self.path=path \n self.name=name\n self.timestamp=timestamp\n self.resource=resource \n self.cartesianTransform=cartesianTransform\n\n #initialisation functionality\n if self._path:\n self.name=ut.get_filename(path)\n if os.path.exists(self._path):\n self.timestamp=ut.get_timestamp(path) \n\n if(graphPath and os.path.exists(self._graphPath) and not self._graph):\n self._graph = Graph().parse(graphPath) \n\n self.get_subject() \n if(self._graph):\n if ut.check_if_subject_is_in_graph(self._graph,self._subject):\n self._graph=ut.get_subject_graph(self._graph,self._subject)\n self.get_metadata_from_graph(self._graph,self._subject) \n elif 'session' in str(type(self)):\n pass\n else:\n raise ValueError( 'Subject not in graph')\n self.__dict__.update(kwargs)\n\n#---------------------PROPERTIES----------------------------\n\n #---------------------PATH----------------------------\n @property\n def path(self): \n \"\"\"Get the resource path (str) of the node. If no path is present, you can use get_path() to reconstruct the path from either \n the graphPath or working directory\n \n Features:\n 1. folder\\n\n 2. self.name\\n\n 3. self.graphPath\\n\n \"\"\"\n\n return ut.parse_path(self._path)\n \n @path.setter\n def path(self,value):\n if value is None:\n return None\n nodeExtensions=ut.get_node_resource_extensions(str(type(self)))\n if (ut.get_extension(str(value)) in nodeExtensions):\n self._path=str(value)\n else:\n raise ValueError('self.path has invalid type, path or extension')\n\n #---------------------NAME----------------------------\n @property\n def name(self):\n \"\"\"Get the name (str) of the node. This can include characters that the operating\n system does not allow. If no name is present, you can use get_name() to construct a name from the subject or path.\n\n Features:\n 1. self.path\\n\n 2. self.subject\\n\n \"\"\" \n return self._name\n\n @name.setter\n def name(self,name):\n if name is None:\n return None\n try: \n self._name=str(name)\n except:\n raise TypeError('self.name should be string compatible')\n\n #---------------------TIMESTAMP----------------------------\n @property\n def timestamp(self):\n \"\"\"Get the timestamp (str(yyyy-MM-ddTHH:mm:ss)) of the node. If no timestamp is present, use get_timestamp() to gather the timestamp from the path or graphPath.\n\n Features:\n 1. self.path\\n\n 2. self.graphPath\\n\n \"\"\"\n return self._timestamp\n\n @timestamp.setter\n def timestamp(self,timestamp):\n if timestamp is None:\n return None\n elif timestamp:\n self._timestamp=ut.validate_timestamp(timestamp)\n else:\n raise ValueError('timestamp should be str(yyyy-MM-ddTHH:mm:ss)')\n\n #---------------------GRAPHPATH---------------------------- \n @property\n def graphPath(self):\n \"\"\"Get the path (str) of graph of the node, or the graphPath in which the subject is contained.\"\"\"\n \n return ut.parse_path(self._graphPath)\n\n @graphPath.setter\n def graphPath(self,value):\n if value is None:\n return None\n elif (next(str(value).endswith(extension) for extension in ut.RDF_EXTENSIONS) ):\n self._graphPath=str(value)\n else:\n raise ValueError('self.graphPath has invalid type, path or extension') \n\n #---------------------GRAPH---------------------------- \n @property\n def graph(self):\n \"\"\"Get the graph (RDFLib.Graph) of the node. If no graph is present, you can use get_graph() to parse the graph from a graphPath. Alternatively,\n you can use to_graph() to serialize the Nodes attributes to RDF.\n \n Features:\n 1. self.graphPath\n \"\"\" \n return self._graph\n\n @graph.setter\n def graph(self,graph):\n if graph is None:\n return None\n elif (type(graph) is rdflib.Graph):\n self._graph=graph\n else:\n raise TypeError('type(graph) should be rdflib.Graph') \n\n #---------------------SUBJECT---------------------------- \n @property\n def subject(self):\n \"\"\"Get the subject (RDFLib.URIRef) of the node. If no subject is present, you can use get_subject() to construct it from a graph, name or path.\n Otherwise, a random guid is generated.\n \n Features:\n 1. self.name\\n\n 2. self.graph\\n\n 3. self.path\\n\n \"\"\"\n return self._subject\n\n @subject.setter\n def subject(self,subject):\n if subject is None:\n return None\n elif type(subject) is rdflib.URIRef:\n self._subject=subject\n else:\n string=str(subject)\n prefix='file:///'\n if 'file:///' in string:\n string=string.replace('file:///','')\n prefix='file:///'\n elif 'http://' in string:\n string=string.replace('http://','')\n prefix='http://' \n self._subject=URIRef(prefix+ut.validate_string(string)) \n \n #---------------------RESOURCE---------------------------- \n @property\n def resource(self):\n \"\"\"Get the resource (mesh, pcd, etc.) of the node. If no resource is present, you can use get_resource() to load the resource from a path or search it through the name and graphpath. \n\n Features:\n 1. self.path\\n\n 2. self.name\\n\n 3. self.graphPath\\n\n \"\"\" \n return self.get_resource()\n\n @resource.setter\n def resource(self,value):\n if value is None:\n return None\n else:\n self.set_resource(value)\n\n @resource.deleter\n def resource(self):\n self._resource=None\n #This will be depricated. use resource instead\n if getattr(self,'mesh',None) is not None:\n self._mesh=None\n if getattr(self,'image',None) is not None:\n self._image=None\n if getattr(self,'pcd',None) is not None:\n self._pcd=None\n if getattr(self,'ortho',None) is not None:\n self._ortho=None\n \n #---------------------CARTESIANTRANSFORM---------------------------- \n @property\n def cartesianTransform(self):\n \"\"\"Get the Cartesian Transform (translation & rotation matrix) (np.ndarray(4x4)) of the node.\n Note that the initialisation from different inputs may result in different cartesianTransform values.\\n\n E.g. the pose of a mesh is retrieved from the mean of the vertices,\n while the same pose initiated from the cartesianBounds equals the mean of that cartesianBounds array.\n \\n\n If no cartesianTransform is present, you can use get_cartesianTransform() to construct the cartesiantransform from the resource, cartesianBounds, orientedBounds or orientedBoundingBox. \n \n Features:\n 1. self.cartesianBounds\\n\n 2. self.resource\\n\n 3. self.orientedBounds\\n\n 4. self.orientedBoundingBox\\n\n \"\"\" \n return self._cartesianTransform\n\n @cartesianTransform.setter\n def cartesianTransform(self,value):\n if value is None:\n return None\n else:\n self.set_cartesianTransform(value)\n\n#---------------------METHODS---------------------------- \n def get_metadata_from_graph(self, graph:Graph,subject:URIRef):\n \"\"\"Convert the data contained in a graph to a set of node attributes.\n If the graph contains multiple subjects, it is reduced to the subject's triples. \\n\n \n **NOTE**: The use of a SessionNode is advised when dealing with multi-subject graphs.\\n\n\n Args:\n 1. self.graph (RDFlib.Graph): Graph to parse\\n\n 2. self.subject (RDFlib.URIRef): The subject to parse the graph for\n \n \"\"\"\n if len([x for x in self._graph.subjects(RDF.type)])>1:\n self._graph=ut.get_subject_graph(graph,subject)\n\n for predicate, object in graph.predicate_objects(subject=subject):\n attr= ut.get_attribute_from_predicate(graph, predicate) \n value=object.toPython()\n \n #GEOMETRY\n if attr == 'cartesianBounds':\n self.cartesianBounds=ut.literal_to_array(object) \n elif attr == 'orientedBounds':\n self.orientedBounds=ut.literal_to_orientedBounds(object) \n elif attr == 'cartesianTransform':\n self.cartesianTransform=ut.literal_to_cartesianTransform(object) \n elif attr == 'geospatialTransform':\n self.geospatialTransform=ut.literal_to_array(object) \n #PATHS\n elif re.search('path', attr, re.IGNORECASE):\n path=ut.literal_to_string(object)\n if path and self._graphPath:\n path = path.replace(\"\\\\\", os.sep)\n if '..' in path:\n path=path.strip(str('..' + os.sep))\n folder=ut.get_folder_path(ut.get_folder_path(self._graphPath))\n else:\n folder=ut.get_folder_path(self._graphPath)\n path=os.path.join(folder,path)\n setattr(self,attr,path)\n \n #INT \n elif attr in ut.INT_ATTRIBUTES:\n setattr(self,attr,ut.literal_to_int(object)) \n #FLOAT\n elif attr in ut.FLOAT_ATTRIBUTES:\n setattr(self,attr,ut.literal_to_float(object)) \n #LISTS\n elif attr in ut.LIST_ATTRIBUTES:\n setattr(self,attr,ut.literal_to_list(object)) \n #LINKEDSUBEJCTS\n elif attr == 'linkedSubjects':\n # test=ut.literal_to_linked_subjects(object)\n # self.linkedSubjects=test\n setattr(self,attr,ut.literal_to_linked_subjects(object)) \n \n #STRINGS\n else:\n setattr(self,attr,object.toPython()) \n\n def get_subject(self) -> str:\n \"\"\"Returns and validates the current subject. If empty, a new subject is created based on an unique GUID.\\n\n\n Returns:\n subject (URIREF)\n \"\"\"\n #subject\n if self._subject:\n pass\n # self.graph\n elif self._graph:\n self._subject=next(self._graph.subjects(RDF.type))\n #self.path\n elif self._path:\n self._name=ut.get_filename(self._path)\n self._subject=URIRef('file:///'+ut.validate_string(self._name))\n elif self._name:\n self._subject=URIRef('file:///'+ut.validate_string(self._name))\n #guid\n else:\n self._name=str(uuid.uuid1())\n self._subject=URIRef('file:///'+self._name) \n return self._subject\n\n def get_timestamp(self):\n \"\"\"Get the timestamp (str) of the Node. \n This can be retrieved from user input, the path, the graphPath or the current time.\\n\n\n Returns:\n timestamp (str): '%Y-%m-%dT%H:%M:%S'\n \"\"\"\n if self._timestamp is None:\n if self._path and os.path.exists(self._path):\n self._timestamp=ut.get_timestamp(self._path) \n elif self._graphPath and os.path.exists(self._graphPath):\n self._timestamp=ut.get_timestamp(self._graphPath) \n else:\n self._timestamp=datetime.datetime.now().strftime('%Y-%m-%dT%H:%M:%S')\n return self._timestamp\n\n def get_name(self) -> str:\n \"\"\"Returns the name (str) of the Node.\\n\n\n Returns:\n name (str)\n \"\"\"\n if self._name is None:\n if self._path:\n self._name=ut.get_filename(self._path)\n else: \n self._name=ut.get_subject_name(self.subject)\n return self.name\n \n def get_subjectname(self) -> str:\n \"\"\"Returns the subjectname (str) of the Node.\\n\n\n Returns:\n name (str)\n \"\"\"\n \n self._name=ut.get_subject_name(self.subject)\n return self.name\n\n def set_cartesianTransform(self,value):\n \"\"\"sets the cartesianTransform for the Node type. Overwrite this function for each node type.\n \"\"\"\n # print(\"This is the base Node functionality, overwite for each childNode to retrieve the relevant cartesianTransform\")\n\n def get_cartesianTransform(self):\n \"\"\"Returns the cartesianTransform from the Node type. Overwrite this function for each node type.\"\"\"\n # print(\"This is the base Node functionality, overwite for each childNode to retrieve the relevant cartesianTransform\")\n\n def get_resource(self):\n \"\"\"Returns the resource from the Node type. Overwrite this function for each node type.\n \"\"\"\n # print(\"This is the base Node functionality, overwite for each childNode to import the relevant resource type\")\n\n def set_resource(self,value):\n \"\"\"sets the resource for the Node type. Overwrite this function for each node type.\n \"\"\"\n # print(\"This is the base Node functionality, overwite for each childNode to import the relevant resource type\")\n\n\n def clear_resource(self):\n \"\"\"Clear all resources (images, pcd, meshes, ortho's, etc.) in the Node.\n \"\"\"\n if getattr(self,'resource',None) is not None:\n self.resource=None\n \n def get_path(self) -> str:\n \"\"\"Returns the full path of the resource from this Node.\\n\n\n Features:\n 1. self.graphPath\\n\n 2. self._name or self._subject\\n\n\n Returns:\n path (str)\n \"\"\" \n if self._path and os.path.exists(self._path):\n return self._path\n \n elif self._graphPath and (self._name or self._subject):\n folder=ut.get_folder_path(self._graphPath)\n nodeExtensions=ut.get_node_resource_extensions(str(type(self)))\n allSessionFilePaths=ut.get_list_of_files(folder) \n for path in allSessionFilePaths:\n if ut.get_extension(path) in nodeExtensions:\n if self.get_name() in path or self.get_subject() in path :\n self._path = path \n return self._path\n if self._name:\n self._path=os.path.join(folder,self._name+nodeExtensions[0])\n else:\n self._path=os.path.join(folder,self._subject+nodeExtensions[0])\n return self._path\n else:\n # print(\"No file containing this object's name and extension is found in the graphPath folder\")\n return None\n\n def get_graph(self):\n \"\"\"Returns the graph of the Node.\n\n Features:\n 1. self.graphPath\\n\n 2. self._subject\\n\n\n Returns:\n graph (RDFlib.GRAPH)\n \"\"\"\n if self._graph is None:\n if self._graphPath and os.path.exists(self._graphPath):\n self._graph=Graph().parse(self._graphPath)\n if self._subject and ut.check_if_subject_is_in_graph(self._graph,self._subject):\n self._graph=ut.get_subject_graph(self._graph,self._subject)\n else:\n print( 'Subject not in Graph')\n return self._graph\n\n def to_graph(self, graphPath : str = None, overwrite:bool=True,save:bool=False) -> Graph:\n \"\"\"Converts the current Node variables to a graph and optionally save.\n\n Args:\n 1. graphPath (str, optional): The full path to write the graph to. Defaults to None.\\n\n 2. overwrite (bool, optional=True): Overwrite current graph values or not\\n\n 3. save (bool, optional=False): Save the graph to the self.graphPath or graphPath.\\n\n \"\"\"\n if graphPath and next(graphPath.endswith(extension) for extension in ut.RDF_EXTENSIONS) :\n self._graphPath=graphPath\n\n self._graph=Graph() \n ut.bind_ontologies(self._graph) \n nodeType=ut.get_node_type(str(type(self))) \n self._graph.add((self.subject, RDF.type, nodeType )) \n\n # enumerate attributes in node and write them to triples\n attributes = ut.get_variables_in_class(self)\n attributes = ut.clean_attributes_list(attributes) \n pathlist = ut.get_paths_in_class(self)\n \n for attribute in attributes: \n predicate = ut.match_uri(attribute)\n value=getattr(self,attribute)\n \n if value is not None:\n dataType=ut.get_data_type(value)\n temp=dataType.toPython()\n predtemp=predicate.toPython()\n\n if self._graph.value(self._subject, predicate, None)== str(value):\n continue\n\n #check if exists\n elif overwrite:\n self._graph.remove((self._subject, predicate, None))\n\n if 'linkedSubjects' in attribute:\n if len(value) !=0:\n value=[subject.toPython() for subject in self.linkedSubjects]\n else:\n continue\n \n elif attribute in pathlist:\n if (self._graphPath):\n folderPath=ut.get_folder_path(self.graphPath)\n try:\n value=os.path.relpath(value,folderPath)\n except:\n pass\n if 'string' not in dataType.toPython(): \n self._graph.add((self._subject, predicate, Literal(value,datatype=dataType)))\n else:\n self._graph.add((self._subject, predicate, Literal(value)))\n\n #Save graph\n if(save):\n self.save_graph(graphPath) \n return self._graph\n\n def save_graph(self,graphPath : str = None) -> bool:\n \"\"\"Serialize the graph in an RDF file on drive.\n The RDF graph will be stored in self.graphPath or provided graphPath (str).\n\n Args:\n graphPath (str, optional)\\n\n\n Raises:\n ValueError: No valid graphPath if file/folder location is not found\\n\n ValueError: No valid extension if not in ut.RDF_EXTENSIONS\\n\n ValueError: Save failed despite valid graphPath and extension (serialization error).\\n\n\n Returns:\n bool: True if file is succesfully saved.\n \"\"\"\n #check path validity\n if(graphPath and ut.check_if_path_is_valid(graphPath)): \n self._graphPath=graphPath\n elif ut.check_if_path_is_valid(self._graphPath):\n pass\n else: \n raise ValueError(graphPath + ' is no valid graphPath.')\n #check extension\n if (ut.get_extension(graphPath) not in ut.RDF_EXTENSIONS):\n raise ValueError(''.join(ut.RDF_EXTENSIONS) + ' currently are only supported extensions.')\n\n try: \n # f= open(self._graphPath, 'w') \n # base=ut.get_folder(self.graphPath)\n self._graph.serialize(self._graphPath)#,base=base\n # f.close()\n if os.path.exists(self._graphPath): \n return True\n\n return False\n except:\n raise ValueError('Save failed despite valid graphPath.') " }, { "identifier": "ImageNode", "path": "geomapi/nodes/imagenode.py", "snippet": "class ImageNode(Node):\n # class attributes\n \n def __init__(self, graph : Graph = None, \n graphPath:str=None,\n subject : URIRef = None,\n path : str=None, \n xmpPath: str = None,\n xmlPath: str = None,\n getResource : bool = False,\n getMetaData : bool = True,\n **kwargs): \n \"\"\"Creates a Node from one or more of the following inputs. \n By default, no data is imported in the Node to speed up processing.\n If you also want the data, call node.get_resource() or set getResource() to True.\\n\n\n Args: \\n\n 1. graph (RDFlib Graph) : Graph with a single subject (if multiple subjects are present, only the first will be used to initialise the MeshNode)\\n\n 2. graphPath (str): Graph file path with a single subject (if multiple subjects are present, only the first will be used to initialise the MeshNode)\\n\n 3. path (str) : path to image file (Note that this node will also contain the data) \\n\n 4. resource (ndarray, PIL Image,Open3D) : OpenCV, PIL (Note that this node will also contain the data)\\n\n 5. xmlPath (str) : Xml file path from Agisoft Metashape\\n\n 6. xmpPath (str) : xmp file path from RealityCapture\n \\n\n - getResource (bool, optional= False): If True, the node will search for its physical resource on drive \\n\n - getMetaData (bool, optional= True): If True, the node will attempt to extract metadata from the resource if present \\n\n\n Returns:\n An ImageNode with metadata \n \"\"\" \n #private attributes \n self._xmlPath=None\n self._xmpPath=None\n self._orientedBoundingBox=None\n self.imageWidth = None # (int) number of pixels\n self.imageHeight = None # (int) number of pixels\n self.focalLength35mm = None # (Float) focal length in mm \n self.keypoints = None # (array) the image keypoints\n self.descriptors = None# (array) the image features\n\n super().__init__( graph= graph,\n graphPath= graphPath,\n subject= subject,\n path=path,\n **kwargs) \n\n #instance variables\n self.xmlPath=xmlPath\n\n #initialisation functionality\n if getMetaData:\n if self.get_metadata_from_xmp_path():\n pass\n elif self.get_metadata_from_xml_path():\n pass\n\n if getResource:\n self.get_resource()\n\n if getMetaData:\n self.get_metadata_from_exif_data()\n if getResource or self._resource is not None:\n self.get_metadata_from_resource()\n\n#---------------------PROPERTIES----------------------------\n\n #---------------------xmlPath----------------------------\n @property\n def xmlPath(self): \n \"\"\"Get the xmlPath (str) of the node.\"\"\"\n return ut.parse_path(self._xmlPath)\n\n @xmlPath.setter\n def xmlPath(self,value):\n if value is None:\n return None\n elif (str(value).endswith('xml') ):\n self._xmlPath=str(value)\n else:\n raise ValueError('self.xmlPath has invalid type, path or extension.') \n\n #---------------------xmpPath----------------------------\n @property\n def xmpPath(self): \n \"\"\"Get the xmpPath (str) of the node.\"\"\"\n return ut.parse_path(self._xmpPath)\n\n @xmpPath.setter\n def xmpPath(self,value):\n if value is None:\n return None\n elif (str(value).endswith('xmp') ):\n self._xmpPath=str(value)\n else:\n raise ValueError('self.xmpPath has invalid type, path or extension.') \n\n#---------------------orientedBoundingBox----------------------------\n @property\n def orientedBoundingBox(self): \n \"\"\"Get the orientedBoundingBox of the Node from various inputs. \\n\n\n Args:\n 1. Open3D.geometry.OrientedBoundingBox \\n\n 2. Open3D geometry\\n\n\n Returns:\n orientedBoundingBox (o3d.geometry.OrientedBoundingBox) \n \"\"\"\n return self._orientedBoundingBox\n\n @orientedBoundingBox.setter\n def orientedBoundingBox(self,value):\n if value is None:\n return None\n if 'orientedBoundingBox' in str(type(value)):\n self._orientedBoundingBox=value\n else: \n try: #geometry\n self._orientedBoundingBox=value.get_oriented_bounding_box()\n except:\n raise ValueError('Input must be orientedBoundingBox (o3d.geometry.OrientedBoundingBox) or an Open3D Geometry.')\n\n#---------------------METHODS----------------------------\n \n def set_resource(self,value):\n \"\"\"Set the resource of the Node from various inputs.\\n\n\n Args:\n 1. np.ndarray (OpenCV) \\n\n 2. PIL Image\\n\n 3. Open3D Image\\n\n\n Raises:\n ValueError: Resource must be np.ndarray (OpenCV), PIL Image or Open3D Image.\n \"\"\"\n\n if type(value) is np.ndarray : #OpenCV\n self._resource = value\n elif 'Image' in str(type(value)): # PIL\n self._resource= cv2.cvtColor(np.array(value), cv2.COLOR_RGB2BGR)\n else:\n raise ValueError('Resource must be np.ndarray (OpenCV) or PIL Image')\n\n def get_resource(self)->np.ndarray: \n \"\"\"Returns the resource (image) in the node. \n If none is present, it will search for the data on drive from the following inputs. \\n\n\n Args:\n 1. self.path\\n\n 2. self.graphPath\\n\n 3. self.name or self.subject\n\n Returns:\n np.ndarray or None\n \"\"\"\n if self._resource is not None :\n return self._resource\n elif self.get_path():\n self._resource = cv2.imread(self.path)\n return self._resource \n\n def get_path(self) -> str:\n \"\"\"Returns the full path of the resource.\n If none is present, it will search for the data on drive from the following inputs.\\n\n\n Args:\n 1. self.graphPath \\n\n 2. self.name \\n\n 3. self.subject\\n\n\n Returns:\n resource path (str)\n \"\"\" \n if self._path and os.path.exists(self._path):\n return self._path\n nodeExtensions=ut.get_node_resource_extensions(str(type(self)))\n if self._graphPath and (self._name or self._subject):\n folder=ut.get_folder_path(self._graphPath)\n allSessionFilePaths=ut.get_list_of_files(folder) \n for path in allSessionFilePaths:\n if ut.get_extension(path) in nodeExtensions:\n if self.get_name() in path or self.get_subject() in path :\n self._path = path \n return self._path\n if self._name:\n self._path=os.path.join(folder,self._name+nodeExtensions[0])\n else:\n self._path=os.path.join(folder,self._subject+nodeExtensions[0])\n return self._path\n elif self._xmpPath and os.path.exists(self._xmpPath):\n folder=ut.get_folder_path(self._xmpPath)\n allSessionFilePaths=ut.get_list_of_files(folder) \n for path in allSessionFilePaths:\n if ut.get_extension(path) in nodeExtensions:\n if ut.get_filename(self._xmpPath) in path :\n self.name=ut.get_filename(self._xmpPath)\n self.subject=self._name\n self.path = path \n return self._path\n elif self._xmlPath and os.path.exists(self._xmlPath):\n folder=ut.get_folder_path(self._xmlPath)\n allSessionFilePaths=ut.get_list_of_files(folder) \n for path in allSessionFilePaths:\n if ut.get_extension(path) in nodeExtensions:\n if self.get_name() in path or self.get_subject() in path :\n self._path = path \n return self._path\n else:\n # print(\"No file containing this object's name and extension is found in the graphPath folder\")\n return None\n\n def get_xmp_path(self)->str: \n \"\"\"Returns the xmpPath in the node. \n If none is present, it will search for the data on drive from the following inputs.\\n\n\n Args:\n 1. self.graphPath \\n\n 2. self.name \\n\n 3. self.subject\\n\n\n Returns:\n str or None\n \"\"\"\n if self._xmpPath and os.path.exists(self._xmpPath):\n return self._xmpPath \n elif self._graphPath and (self._name or self._subject):\n folder=ut.get_folder_path(self._graphPath)\n allSessionFilePaths=ut.get_list_of_files(folder) \n for path in allSessionFilePaths:\n if ut.get_extension(path).endswith('xmp'):\n if self.get_name() in path or self.get_subject() in path :\n self._xmpPath = path \n return self._xmpPath\n else:\n return None\n\n def save_resource(self, directory:str=None,extension :str = '.png') ->bool:\n \"\"\"Export the resource of the Node.\\n\n\n Args:\n 1. directory (str, optional): directory folder to store the data.\\n\n 2. extension (str, optional): file extension. Defaults to '.png'.\\n\n\n Raises:\n ValueError: Unsuitable extension. Please check permitted extension types in utils._init_.\\n\n\n Returns:\n bool: return True if export was succesful\n \"\"\" \n #check path\n if self.resource is None:\n return False\n \n #validate extension\n if extension not in ut.IMG_EXTENSION:\n raise ValueError('Invalid extension')\n\n # check if already exists\n if directory and os.path.exists(os.path.join(directory,self.get_name() + extension)):\n self.path=os.path.join(directory,self.get_name() + extension)\n return True\n elif not directory and self.get_path() and os.path.exists(self.path) and extension in ut.IMG_EXTENSION:\n return True \n \n #get directory\n if (directory):\n pass \n elif self.path is not None and os.path.exists(self.path): \n directory=ut.get_folder(self.path) \n elif(self.graphPath): \n dir=ut.get_folder(self.graphPath)\n directory=os.path.join(dir,'IMG') \n else:\n directory=os.path.join(os.getcwd(),'IMG')\n # create directory if not present\n if not os.path.exists(directory): \n os.mkdir(directory) \n\n self.path=os.path.join(directory,ut.get_filename(self.subject.toPython()) + extension)\n\n #write files\n if cv2.imwrite(self.path, self.resource):\n return True\n return False\n \n def get_oriented_bounding_box(self)->o3d.geometry.OrientedBoundingBox:\n \"\"\"Gets the Open3D OrientedBoundingBox of the node from the conical mesh representation based on the \n cartesianTransform, the focal length at 35mm and a viewing range. \\n\n\n Returns:\n o3d.geometry.orientedBoundingBox\n \"\"\" \n if self._orientedBoundingBox is not None:\n pass\n elif self._cartesianTransform is not None:\n mesh=self.get_mesh_geometry()\n self._orientedBoundingBox=mesh.get_oriented_bounding_box() \n else:\n return None\n return self._orientedBoundingBox\n\n def get_image_features(self, featureType = \"Orb\", max = 1000) -> Tuple[np.array, np.array]:\n \"\"\"Get the keypoints and the descriptors of this Nodes Image resource\n\n Args:\n featureType (str, optional): The featuretype to detect, use: orb, sift. Defaults to \"Orb\".\n max (int, optional): The max features to detect. Defaults to 1000.\n\n Returns:\n Tuple[np.array, np.array]: The keypoints and the descriptors\n \"\"\"\n\n if(self.keypoints is None or self.descriptors is None):\n self.keypoints, self.descriptors = it.get_features(self.resource, featureType, max = max)\n return self.keypoints, self.descriptors\n\n def get_metadata_from_resource(self) ->bool:\n \"\"\"Returns the metadata from a resource. \\n\n\n Features:\n 1. imageHeight\\n\n 2. imageWidth\\n\n\n Returns:\n bool: True if exif data is successfully parsed\n \"\"\"\n if self._resource is None:\n return False \n \n try:\n if getattr(self,'imageHeight',None) is None:\n self.imageHeight=self.resource.shape[0]\n if getattr(self,'imageWidth',None) is None:\n self.imageWidth=self.resource.shape[1]\n return True\n except:\n raise ValueError('Metadata extraction from resource failed')\n \n # def get_oriented_bounding_box(self)->o3d.geometry.OrientedBoundingBox:\n # \"\"\"Gets the Open3D geometry from cartesianTransform\n\n # Returns:\n # o3d.geometry.orientedBoundingBox\n # \"\"\"\n # if getattr(self,'orientedBoundingBox',None) is None: \n # if getattr(self,'cartesianTransform',None) is not None:\n # box=o3d.geometry.create_mesh_box(width=1.0, height=1.0, depth=1.0)\n # self.orientedBoundingBox= box.transform(self.cartesianTransform)\n # else:\n # return None\n # return self.orientedBoundingBox\n\n def get_mesh_geometry(self, depth:float=10, focalLength35mm:float=24)->o3d.geometry.TriangleMesh:\n \"\"\"Generate a concical mesh representation using the Image's cartesianTransform and focalLength35mm.\\n\n \n .. image:: ../../../docs/pics/virtual_image2.PNG\n\n Args:\n 1. depth (float, optional): Viewing depth of the image. Defaults to 10m.\\n\n 2. focalLength35mm (float,optional): standardised focal length on 35mm film (w=36mm, h = 24mm)\\n\n\n Returns:\n o3d.geometry.TriangleMesh \n \"\"\"\n if self.cartesianTransform is not None:\n radius=35/(focalLength35mm*2)*depth \n mesh= o3d.geometry.TriangleMesh.create_cone(radius=radius, height=depth, resolution=20, split=1)\n rotation=gmu.get_rotation_matrix(self.cartesianTransform)\n r=R.from_matrix(rotation)\n rz=R.from_euler('xyz' ,[0, 0, 0], degrees=True)\n t=gmu.get_translation(self.cartesianTransform)\n mesh=mesh.translate(t)\n r=rz*r\n # t2=r.as_matrix() * np.array([[1],[0],[0]]) *depth\n A = np.dot( r.as_matrix(),np.array([0,0,-1]) )*depth\n mesh=mesh.translate(A)\n rot=r.as_matrix()\n mesh=mesh.rotate(rot)\n return mesh\n else:\n return None\n\n def get_virtual_image(self, geometries: o3d.geometry, downsampling:int=2)-> o3d.geometry.Image:\n \"\"\"Generates a virtual image of a set of geometries given the ImageNode's pose and piholeModel.\n\n .. image:: ../../../docs/pics/rendering3.PNG\n\n\n Args:\n 1. geometries (o3d.geometry): geometries to include in the scene of the virtual image.\\n\n 2. downsampling (int, optional): pixel downsampling of the image both in height and width (each step reduces the density by factor 4). Defaults to 2.\n\n Returns:\n o3d.geometry.Image or None\n \"\"\"\n pinholeCamera=self.get_pinhole_camera_parameters(downsampling)\n if pinholeCamera is not None:\n return gmu.generate_virtual_image(geometries,pinholeCamera)\n else:\n return None\n\n def get_pinhole_camera_parameters(self, downsampling:int=1) -> o3d.camera.PinholeCameraParameters():\n \"\"\"Returns the intrinsic and extrinsix camera parameters based on the following attributes.\n\n .. image:: ../../../docs/pics/pinholemodel1.PNG\n\n Args:\n 1. self.imageWidth: width of the image in pixels (u) \\n\n 2. self.imageHeight: height of the image in pixels (v) \\n\n 3. self.focalLength35mm: focal length with a standardised Field-of-View.\\n \n 4. self.cartesianTransform: the inverted transform equals the external camera pose.\\n\n 2. downsampling (int, optional): pixel downsampling of the image both in height and width (each step reduces the density by factor 4). Defaults to 2.\n\n Returns:\n o3d.camera.PinholeCameraParameters()\n \"\"\"\n param=o3d.camera.PinholeCameraParameters()\n if getattr(self,'cartesianTransform',None) is not None:\n # param.extrinsic=np.linalg.inv(self.cartesianTransform) #! unsure why this was inverted\n param.extrinsic=self.cartesianTransform \n param.intrinsic=self.get_intrinsic_camera_parameters(downsampling)\n self.pinholeCamera=param\n return self.pinholeCamera\n else:\n return None\n\n def get_intrinsic_camera_parameters(self, downsampling:int=1) -> o3d.camera.PinholeCameraIntrinsic():\n \"\"\"Returns the intrinsic camera parameters based on the following attributes.\n \n Args:\n 1. self.imageWidth: width of the image in pixels (u). Defaults to 640p \\n\n 2. self.imageHeight: height of the image in pixels (v). Defaults to 480p \\n\n 3. self.focalLength35mm: focal length with a standardised Field-of-View. Defaults to 25mm \\n \n 4. self.PrincipalPointU: cx \\n\n 4. self.PrincipalPointV: cy \\n\n\n Returns:\n o3d.camera.PinholeCameraIntrinsic(width,height,fx,fy,cx,cy)\n \"\"\"\n #validate inputs\n width=int(self.imageWidth/downsampling) if getattr(self,'imageWidth',None) is not None else 640\n height=int(self.imageHeight/downsampling) if getattr(self,'imageHeight',None) is not None else 480\n f=self.focalLength35mm if getattr(self,'focalLength35mm',None) is not None else 2500\n\n #! deprecated\n # pixX=width/36 #these are standard 35mm film properties\n # pixY=height/24 #these are standard 35mm film properties\n # fx=pixX*f\n # fy=pixY*f \n\n if (getattr(self,'principalPointU',None) is not None and\n getattr(self,'principalPointV',None) is not None ):\n cx=width/2-0.5+self.principalPointU\n cy=height/2-0.5+self.principalPointV\n else:\n cx=width/2-0.5\n cy=height/2-0.5\n pinholeCameraIntrinsic = o3d.camera.PinholeCameraIntrinsic(width,height,f,f,cx,cy)\n self.intrinsic_matrix = pinholeCameraIntrinsic.intrinsic_matrix\n return pinholeCameraIntrinsic\n\n def get_metadata_from_exif_data(self) -> bool:\n \"\"\"Returns the metadata from a resource. \\n\n\n Features:\n 1. GPSInfo (geospatialTransform (np.array(3,1))\n 2. coordinateSystem (str) \\n\n 2. DateTime ('%Y-%m-%dT%H:%M:%S')\\n\n 3. XResolution (int)\\n\n 4. YResolution (int)\\n\n 5. ResolutionUnit (int)\\n\n 6. ExifImageWidth (int)\\n\n 7. ExifImageHeight (int)\\n\n\n Returns:\n bool: True if meta data is successfully parsed\n \"\"\"\n if self.get_path() is None or not os.path.exists(self.get_path()) :\n return False\n \n if getattr(self,'timestamp',None) is None :\n self.timestamp=ut.get_timestamp(self.path)\n \n if getattr(self,'name',None) is None:\n self.name=ut.get_filename(self.path)\n\n if (getattr(self,'imageWidth',None) is not None and\n getattr(self,'imageHeight',None) is not None and\n getattr(self,'geospatialTransform',None) is not None):\n return True\n\n # pix = PIL.Image.open(self.path) \n with PIL.Image.open(self.path) as pix:\n exifData=ut.get_exif_data(pix)\n\n if exifData is not None:\n self.timestamp=exifData.get(\"DateTime\")\n self.resolutionUnit=exifData.get(\"ResolutionUnit\")\n self.imageWidth=exifData.get(\"ExifImageWidth\")\n self.imageHeight=exifData.get(\"ExifImageHeight\")\n \n if 'GPSInfo' in exifData:\n gps_info = exifData[\"GPSInfo\"]\n if gps_info is not None:\n # self.GlobalPose=GlobalPose # (structure) SphericalTranslation(lat,long,alt), Quaternion(qw,qx,qy,qz)\n latitude=gps_info.get(\"GPSLatitude\")\n latReference=gps_info.get(\"GPSLatitudeRef\")\n newLatitude=ut.parse_exif_gps_data(latitude,latReference)\n longitude=gps_info.get( \"GPSLongitude\")\n longReference=gps_info.get(\"GPSLongitudeRef\")\n newLongitude=ut.parse_exif_gps_data(longitude,longReference)\n self.geospatialTransform=[ newLatitude, \n newLongitude,\n gps_info.get(\"GPSAltitude\")]\n self.coordinateSystem='geospatial-wgs84'\n \n return True\n else:\n return False\n \n def get_metadata_from_xmp_path(self)->bool:\n \"\"\"Read Metadata from .xmp file generated by https://www.capturingreality.com/.\n\n Features:\n 1. geospatialTransform (np.array(3x1))\\n\n 2. coordinateSystem (str)\\n\n 3. focalLength35mm (float)\\n\n 4. principalPointU (float)\\n\n 5. principalPointV (float)\\n\n 6. cartesianTransform (np.array(4x4))\\n\n\n Returns:\n bool: True if metadata is sucesfully parsed\n \"\"\"\n if self.xmpPath is None or not os.path.exists(self.xmpPath):\n return False\n\n if (getattr(self,'principalPointU',None) is not None and\n getattr(self,'principalPointV',None) is not None and\n getattr(self,'distortionCoeficients',None) is not None and\n getattr(self,'geospatialTransform',None) is not None ): \n return True\n \n mytree = ET.parse(self.xmpPath)\n root = mytree.getroot() \n \n self.timestamp=ut.get_timestamp(self.xmpPath)\n self.name=ut.get_filename(self.xmpPath)\n self.subject=self.name\n for child in root.iter('{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description'):\n\n #Attributes\n for attribute in child.attrib:\n if ('latitude' in attribute and\n 'longitude'in attribute and\n 'altitude' in attribute):\n lat=ut.xcr_to_lat(child.attrib['{http://www.capturingreality.com/ns/xcr/1.1#}latitude'])\n long=ut.xcr_to_long(child.attrib['{http://www.capturingreality.com/ns/xcr/1.1#}longitude'])\n alt=ut.xcr_to_alt(child.attrib['{http://www.capturingreality.com/ns/xcr/1.1#}altitude'])\n self.geospatialTransform=np.array([lat, long, alt])\n if 'Coordinates' in attribute:\n self.coordinateSystem=child.attrib[attribute]\n if 'FocalLength35mm' in attribute:\n f=ut.xml_to_float(child.attrib[attribute])\n self.focalLength35mm=f/36*self.imageWidth if getattr(self,'imageWdith',None) else f#! multiple definitions possible \n if 'PrincipalPointU' in attribute:\n self.principalPointU=ut.xml_to_float(child.attrib[attribute])\n if 'PrincipalPointV' in attribute:\n self.principalPointV=ut.xml_to_float(child.attrib[attribute])\n\n #Nodes\n rotationnode=child.find('{http://www.capturingreality.com/ns/xcr/1.1#}Rotation')\n rotation=None\n if rotationnode is not None:\n rotation=ut.string_to_rotation_matrix(rotationnode.text).T #! RC uses column-based rotaton matrix\n\n positionnode=child.find('{http://www.capturingreality.com/ns/xcr/1.1#}Position')\n translation=None\n if positionnode is not None:\n translation=np.asarray(ut.string_to_list(positionnode.text))\n \n self.cartesianTransform=gmu.get_cartesian_transform(translation=translation,rotation=rotation)\n \n coeficientnode=child.find('{http://www.capturingreality.com/ns/xcr/1.1#}DistortionCoeficients')\n if coeficientnode is not None:\n self.distortionCoeficients=ut.string_to_list(coeficientnode.text) \n return True \n\n def get_metadata_from_xml_path(self) ->bool:\n \"\"\"Extract image metadata from XML Node generated by Agisoft Metashape (self.xmlData and self.subject should be present).\n\n Features:\n 1. cartesianTransform (np.array(4x4))\\n\n 2. sxy: accuracy in XY (m)\\n\n 3. sz: accuracy in Z (m) \\n\n\n Returns:\n bool: True if metadata is successfully parsed\n \"\"\"\n if self.xmlPath is None or not os.path.exists(self.xmlPath):\n return False\n\n if (getattr(self,'cartesianTransform',None) is not None and\n getattr(self,'sxy',None) is not None and\n getattr(self,'sz',None) is not None ):\n return True\n \n self.timestamp=ut.get_timestamp(self.xmlPath) \n mytree = ET.parse(self.xmlPath)\n root = mytree.getroot() \n xmlNode = next(cam for cam in root.findall('.//camera') if (ut.get_filename(cam.get('label')) == self.name or ut.get_filename(cam.get('label')) == ut.get_subject_name(self.subject) ))\n \n if xmlNode:\n #AGISOFT PARSING 1\n for child in xmlNode.iter('reference'): \n #get translation\n x = child.get('x')\n y = child.get('y')\n z = child.get('z')\n if x and y and z:\n translation=np.array([float(x),float(y),float(z)])\n self.cartesianTransform= gmu.get_cartesian_transform(translation=translation)\n #get rotations\n yaw = child.get('yaw')\n pitch = child.get('pitch')\n roll = child.get('roll')\n if yaw and pitch and roll:\n rotation = gmu.get_rotation_matrix(np.array([float(yaw),float(pitch),float(roll)]))\n self.cartesianTransform=gmu.get_cartesian_transform(translation=translation, rotation=rotation)\n #get accuracies\n sxy = child.get('sxy')\n if sxy:\n self.sxy=float(sxy)\n sz = child.get('sz')\n if sz:\n self.sz=float(sz)\n \n #AGISOFT PARSING 2\n transform=xmlNode.find('transform')\n if transform is not None:\n self.cartesianTransform=ut.string_to_list(transform.text)\n #! this exception breaks the code\n # else:\n # raise ValueError ('subject not in xml file') \n\n def set_cartesianTransform(self,value):\n \"\"\"Set the cartesianTransform of the ImageNode from various inputs.\n \n Args:\n 1. cartesianTransform(np.ndarray(4x4))\\n\n 2. np.ndarray or Vector3dVector (1x3) \\n\n 3. cartesianBounds (np.ndarray (6x1))\\n\n 4. np.ndarray or Vector3dVector (8x3 or nx3)\\n\n 5. Open3D.geometry\n \"\"\" \n try: #np.ndarray (4x4) \n self._cartesianTransform=np.reshape(value,(4,4))\n except:\n try: #np.ndarray or Vector3dVector (1x3) \n self._cartesianTransform=gmu.get_cartesian_transform(translation=np.asarray(value))\n except: \n try: # cartesianBounds (np.ndarray (6x1))\n self._cartesianTransform=gmu.get_cartesian_transform(cartesianBounds=np.asarray(value))\n except:\n try: # np.ndarray or Vector3dVector (8x3 or nx3)\n center=np.mean(np.asarray(value),0)\n self._cartesianTransform=gmu.get_cartesian_transform(translation=center)\n except:\n try: # Open3D.geometry\n self._cartesianTransform=gmu.get_cartesian_transform(translation=value.get_center())\n except:\n raise ValueError('Input must be np.ndarray(6x1,4x4,3x1,nx3), an Open3D geometry or a list of Vector3dVector objects.')\n\n\n def get_cartesian_transform(self) -> np.ndarray:\n \"\"\"Get the cartesianTransform from various inputs.\n \n Args:\n 1. self.cartesianBounds (np.array(6x1)) \\n\n 2. self.orientedBounds (np.array(8x3)) or a list of Vector3dVector objects \\n\n 3. orientedBoundingBox\\n\n 4. Open3D.geometry\n\n Returns:\n cartesianTransform(np.ndarray(4x4))\n \"\"\"\n if self._cartesianTransform is not None:\n pass\n elif getattr(self,'cartesianTransform',None) is not None:\n self._cartesianTransform = np.reshape(self.cartesianTransform, (4,4))\n elif getattr(self,'_cartesianBounds',None) is not None:\n self._cartesianTransform=gmu.get_cartesian_transform(cartesianBounds=self._cartesianBounds)\n elif getattr(self,'_orientedBounds',None) is not None:\n center=np.mean(self._orientedBounds,0)\n self._cartesianTransform=gmu.get_cartesian_transform(translation=center)\n elif getattr(self,'_orientedBoundingBox',None) is not None:\n self._cartesianTransform=gmu.get_cartesian_transform(translation=self._orientedBoundingBox.get_center())\n elif self._resource is not None:\n self._cartesianTransform=gmu.get_cartesian_transform(translation=self._resource.get_center())\n else:\n return None\n return self._cartesianTransform\n \n def create_rays(self,imagePoints:np.array,depths:np.array=None)->o3d.core.Tensor:\n \"\"\"Generate a grid a rays from the camera location to a given set of imagePoints.\\n\n \n **NOTE**: This function targets a subselection of imagePoints, use o3d.t.geometry.RaycastingScene.create_rays_pinhole if you want a dense raytracing for the full image.\n \n .. image:: ../../../docs/pics/Raycasting_1.PNG\n \n Args:\n imagePoints (np.array[n,2]): imagePoints are conform uv image coordinates system. so top left is (0,0). The camera intrinsic matrix is used to map it to the proper image coordinates.\\n\n\n Returns:\n o3d.core.Tensor (n,6): [:,0:3] is the camera center and [:,3:6] are the directions of the rays towards the imagePoints.\n \"\"\"\n points=imagePoints\n #validate inputs\n assert points.shape[-1]==2 \n points=np.reshape(points,(-1,2)) if len(points.shape) >2 else points\n \n f=self.focalLength35mm \n k=self.get_intrinsic_camera_parameters().intrinsic_matrix\n m=self.cartesianTransform \n t=gmu.get_translation(m) \n n=points.shape[0]\n \n #transform pixels to image coordinates (rows are first)\n u=+points[:,1]-self.imageWidth/2\n v=+points[:,0]-self.imageHeight/2 \n camera_coordinates=np.vstack((u,v,np.ones(n)))\n \n #transform to world coordinates\n camera_coordinates=np.vstack((camera_coordinates[0:2,:],np.full(n, f).T,np.ones((n,1)).T))\n world_coordinates=m @ camera_coordinates\n \n #normalize direction\n displacement=world_coordinates[0:3,:].T-t\n direction=gmu.normalize_vectors(displacement)\n \n if depths is not None:\n direction=direction * depths[:, np.newaxis]\n \n \n #create rays [camera.center, direction]\n rays=np.hstack((np.full((n,3), t),direction)) \n return rays \n \n def world_to_pixel_coordinates(self,world_coordinates) -> np.ndarray:\n \"\"\"Converts 3D world coordinates to pixel coordinates in an image.\n\n This function takes 3D world coordinates and converts them to pixel coordinates in an image. It uses camera parameters such as the transformation matrix, focal length, image width, and image height.\n\n Args:\n world_coordinates (np.ndarray): A 3D point in world coordinates to be converted.\n\n Returns:\n np.ndarray: A 2D array containing the pixel coordinates (row, column) in the image.\n\n Example:\n n = CameraParameters() # Initialize camera parameters object\n point_world = np.array([x, y, z]) # 3D point in world coordinates\n pixel_coordinates = world_to_pixel(n, point_world)\n\n Note:\n - The function performs a series of transformations, including world to camera, camera to image, and image centering.\n - It returns the pixel coordinates as a 2D array.\n \"\"\"\n imageCoordinates= np.linalg.inv(self.cartesianTransform) @ world_coordinates.T\n\n xy=copy.deepcopy(imageCoordinates)\n xy[0]= imageCoordinates[0]/imageCoordinates[2]*self.focalLength35mm\n xy[1]= imageCoordinates[1]/imageCoordinates[2]*self.focalLength35mm\n xy[2]= imageCoordinates[2]/imageCoordinates[2]*self.focalLength35mm\n\n uv=copy.deepcopy(xy)\n uv[1]=xy[1]+self.imageHeight/2\n uv[0]=xy[0]+self.imageWidth/2\n uv=uv[0:2]\n \n return uv\n \n \n def project_lineset_on_image(self,linesets:List[o3d.geometry.LineSet],colorList:List[np.array]) ->None:\n \"\"\"Project Opend3D linesets onto the resource of the ImageNode.\n\n **NOTE**: this affects the original image\n\n This function takes a list of images (`imgNodes`) and their associated additional lines represented by beginning and ending points in homogeneous coordinates. It annotates the additional lines on each image and saves the annotated images to an output directory.\n\n Args:\n linesets (List[o3d.LineSet]): List[m] of linesets\n colorList (List[np.array]): mx3 array with colors [0;1]\n\n Returns:\n list: A list of image nodes with additional line information.\n\n Example:\n hom_begin_points_ad = [[point1, point2, ...], [point1, point2, ...], ...] # List of homogeneous coordinates of beginning points\n hom_end_points_ad = [[point1, point2, ...], [point1, point2, ...], ...] # List of homogeneous coordinates of ending points\n imgNodes = [ImageNode1, ImageNode2, ...] # List of image nodes\n ImagePath = \"path/to/image_directory\"\n output = \"path/to/output_directory\"\n updated_imgNodes = saveImageAdLine(hom_begin_points_ad, hom_end_points_ad, imgNodes, ImagePath, output)\n\n Note:\n - The function annotates the additional lines on each image and saves the annotated images to the output directory.\n \"\"\"\n \n hom_begin_points_ad,hom_end_points_ad= gmu.lineset_to_points(linesets)\n \n\n fig = plt.figure(figsize=(10, 10))\n plt.imshow(self.resource)\n\n istart_list = []\n istop_list = []\n\n adlist=[]\n for count, start_list in enumerate(hom_begin_points_ad):\n istart_sublist = []\n istop_sublist = []\n adlines=[]\n for i,start in enumerate (start_list):\n stop_list = hom_end_points_ad[count]\n stop = stop_list[i]\n uvCoordinates = self.world_to_pixel( np.array([[start],[stop]])) #this might be wrongly formatted\n\n\n u_start, v_start = uvCoordinates[0], uvCoordinates[1]\n u_stop, v_stop = uvCoordinates[0], uvCoordinates[1]\n istart=[u_start, v_start]\n istop=[u_stop, v_stop]\n istart_sublist.append(uvCoordinates[0,:])\n istop_sublist.append(uvCoordinates[1,:])\n \n plt.plot((u_start,u_stop),(v_start,v_stop), color=colorList[count], linewidth='0.2')\n adline=((u_start,v_start),(u_stop,v_stop))\n\n plt.axis(\"off\")\n plt.xlim([0, self.imageWidth])\n plt.ylim([self.imageHeight, 0])\n plt.close()\n\n \n def mask_image(hom_begin_points_ad,hom_end_points_ad,imgNodes,ImagePath,output):\n \"\"\"Saves masked images with annotated additional lines.\n\n This function takes a list of images (`imgNodes`) and their associated additional lines represented by beginning and ending points in homogeneous coordinates. It annotates the additional lines on each image, masks the annotated regions, and saves the masked images to an output directory.\n\n Args:\n hom_begin_points_ad (list): A list of lists, where each inner list contains the homogeneous coordinates of the beginning points of additional lines.\n hom_end_points_ad (list): A list of lists, where each inner list contains the homogeneous coordinates of the ending points of additional lines.\n imgNodes (list): A list of image nodes containing information about images.\n ImagePath (str): The path to the directory containing image files.\n output (str): The path to the output directory where masked images will be saved.\n\n Returns:\n str: The path to the output directory containing the saved masked images.\n\n Example:\n hom_begin_points_ad = [[point1, point2, ...], [point1, point2, ...], ...] # List of homogeneous coordinates of beginning points\n hom_end_points_ad = [[point1, point2, ...], [point1, point2, ...], ...] # List of homogeneous coordinates of ending points\n imgNodes = [ImageNode1, ImageNode2, ...] # List of image nodes\n ImagePath = \"path/to/image_directory\"\n output = \"path/to/output_directory\"\n masked_images_dir = saveMaskedImage(hom_begin_points_ad, hom_end_points_ad, imgNodes, ImagePath, output)\n\n Note:\n - The function annotates the additional lines on each image and creates masks for the annotated regions.\n - It saves the masked images to the output directory and returns the path to the directory.\n \"\"\"\n \n for im in imgNodes:\n if os.path.exists(image_cv2):\n image_vis = cv2.cvtColor(image_cv2, cv2.COLOR_BGR2RGB)\n mask = np.zeros(image_cv2.shape[:2], dtype=\"uint8\")\n\n for count, start_list in enumerate(hom_begin_points_ad):\n for i,start in enumerate (start_list):\n stop_list = hom_end_points_ad[count]\n stop = stop_list[i]\n im.uvCoordinates_start = world_to_pixel(im,start)\n im.uvCoordinates_stop = world_to_pixel(im,stop)\n u_start, v_start = int(im.uvCoordinates_start[0]), int(im.uvCoordinates_start[1])\n u_stop, v_stop = int(im.uvCoordinates_stop[0]), int(im.uvCoordinates_stop[1])\n cv2.line(mask, (u_start,v_start),(u_stop,v_stop), (255, 255, 255, 255), thickness=75)\n\n masked_img = cv2.bitwise_and(image_cv2, image_cv2, mask=mask)\n output_dir = os.path.join(output,\"Images\", \"Masked_Image\")\n\n return output_dir\n\n\n # def create_rays(self,imagePoints:np.array)->o3d.core.Tensor:\n # \"\"\"Generate a grid a rays from the camera location to a given set of imagePoints.\\n\n \n # **NOTE**: This function targets a subselection of imagePoints, use o3d.t.geometry.RaycastingScene.create_rays_pinhole if you want a dense raytracing for the full image.\n \n # .. image:: ../../../docs/pics/Raycasting_1.PNG\n \n # Args:\n # imagePoints (np.array[n,2]): imagePoints are conform uv image coordinates system. so top left is (0,0). The camera intrinsic matrix is used to map it to the proper image coordinates.\\n\n\n # Returns:\n # o3d.core.Tensor (n,6): [:,0:3] is the camera center and [:,3:6] are the directions of the rays towards the imagePoints.\n # \"\"\"\n # points=imagePoints\n # #validate inputs\n # assert points.shape[-1]==2 \n # points=np.reshape(points,(-1,2)) if len(points.shape) >2 else points\n \n # f=self.focalLength35mm \n # k=self.get_intrinsic_camera_parameters().intrinsic_matrix\n # m=self.cartesianTransform \n # t=gmu.get_translation(m) \n # n=points.shape[0]\n \n # #transform pixels to image coordinates (rows are first)\n # u=points[:,1]-self.imageWidth/2\n # v=-points[:,0]+self.imageHeight/2 \n # camera_coordinates=np.vstack((u,v,np.ones(n)))\n \n # #transform to world coordinates\n # camera_coordinates=np.vstack((camera_coordinates[0:2,:],np.full(n, f).T,np.ones((n,1)).T))\n # world_coordinates=m @ camera_coordinates\n # world_coordinates=gmu.normalize_vectors(world_coordinates[0:3,:].T)\n \n # #create rays [camera.center, direction(world_coordinates)]\n # rays=np.hstack((np.full((n,3), t),world_coordinates)) \n # return rays " } ]
from ast import Raise from distutils import extension from pathlib import Path from typing import Tuple from rdflib import Graph, URIRef from scipy.spatial.transform import Rotation as R from geomapi.nodes import Node from geomapi.nodes import ImageNode import xml.etree.ElementTree as ET import cv2 import PIL import numpy as np import os import open3d as o3d import math import uuid import matplotlib.pyplot as plt import geomapi.utils as ut import geomapi.utils.geometryutils as gmu import geomapi.utils.imageutils as it
15,659
""" Panonode is a Python Class to govern the data and metadata of panoramic data (OpenCV, PIL). This node builds upon the OpenCV and PIL API for the image definitions. It directly inherits from Node. Be sure to check the properties defined in the above classes to initialise the Node. """ #IMPORT PACKAGES #IMPORT MODULES
""" Panonode is a Python Class to govern the data and metadata of panoramic data (OpenCV, PIL). This node builds upon the OpenCV and PIL API for the image definitions. It directly inherits from Node. Be sure to check the properties defined in the above classes to initialise the Node. """ #IMPORT PACKAGES #IMPORT MODULES
class PanoNode(ImageNode):
1
2023-11-23 08:15:01+00:00
24k
Yifei-Y/Openset-RCNN
openset_rcnn/evaluation/os_coco_evaluation.py
[ { "identifier": "GRASPNET_KNOWN_IDS", "path": "openset_rcnn/data/graspnet_meta.py", "snippet": "GRASPNET_KNOWN_IDS = [graspnet_known_name_id_dic[name_cat] for name_cat in GRASPNET_KNOWN_CATEGORIES]" }, { "identifier": "GRASPNET_KNOWN_CATEGORIES", "path": "openset_rcnn/data/graspnet_meta.py", "snippet": "GRASPNET_KNOWN_CATEGORIES = [\n \"cracker_box\", \"tomato_soup_can\", \"banana\", \"mug\", \"power_drill\", \"scissors\", \"strawberry\",\n \"peach\", \"plum\", \"knife\", \"flat_screwdriver\", \"racquetball\", \"b_cups\", \"d_toy_airplane\",\n \"f_toy_airplane\", \"i_toy_airplane\", \"j_toy_airplane\", \"dabao_sod\", \"darlie_toothpaste\",\n \"camel\", \"large_elephant\", \"rhinocero\", \"darlie_box\", \"black_mouse\", \"dabao_facewash\",\n \"pantene\", \"head_shoulders_supreme\", \"head_shoulders_care\"\n]" }, { "identifier": "OpensetCOCOEval", "path": "openset_rcnn/evaluation/os_cocoeval.py", "snippet": "class OpensetCOCOEval(COCOeval):\n def _prepare(self):\n '''\n Prepare ._gts and ._dts for evaluation based on params\n :return: None\n '''\n def _toMask(anns, coco):\n # modify ann['segmentation'] by reference\n for ann in anns:\n rle = coco.annToRLE(ann)\n ann['segmentation'] = rle\n p = self.params\n k_gts=self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n unk_gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=1000))\n k_dts=self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))\n unk_dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=1000))\n\n # convert ground truth to mask if iouType == 'segm'\n if p.iouType == 'segm':\n _toMask(k_gts, self.cocoGt)\n _toMask(unk_gts, self.cocoGt)\n _toMask(k_dts, self.cocoDt)\n _toMask(unk_dts, self.cocoDt)\n # set ignore flag\n for kgt in k_gts:\n kgt['ignore'] = kgt['ignore'] if 'ignore' in kgt else 0\n kgt['ignore'] = 'iscrowd' in kgt and kgt['iscrowd']\n for ugt in unk_gts:\n ugt['ignore'] = ugt['ignore'] if 'ignore' in ugt else 0\n ugt['ignore'] = 'iscrowd' in ugt and ugt['iscrowd']\n self._k_gts = defaultdict(list) # gt for evaluation\n self._ok_gts = defaultdict(list)\n self._unk_gts = defaultdict(list)\n self._k_dts = defaultdict(list) # dt for evaluation\n self._unk_dts = defaultdict(list)\n for kgt in k_gts:\n self._k_gts[kgt['image_id'], kgt['category_id']].append(kgt)\n for cId in p.catIds:\n for kgt in k_gts:\n if kgt['category_id'] != cId:\n self._ok_gts[kgt['image_id'], cId].append(kgt)\n for ugt in unk_gts:\n self._unk_gts[ugt['image_id']].append(ugt)\n for kdt in k_dts:\n self._k_dts[kdt['image_id'], kdt['category_id']].append(kdt)\n for udt in unk_dts:\n self._unk_dts[udt['image_id']].append(udt)\n self.evalImgs_kdt = defaultdict(list) # per-image per-category evaluation results\n self.evalImgs_unkdt = defaultdict(list)\n self.eval_kdt = {} # accumulated evaluation results\n self.eval_unkdt = {}\n\n def evaluate(self):\n '''\n Run per image evaluation on given images and store results (a list of dict) in self.evalImgs\n :return: None\n '''\n tic = time.time()\n print('Running per image evaluation...')\n p = self.params\n # add backward compatibility if useSegm is specified in params\n if not p.useSegm is None:\n p.iouType = 'segm' if p.useSegm == 1 else 'bbox'\n print('useSegm (deprecated) is not None. Running {} evaluation'.format(p.iouType))\n print('Evaluate annotation type *{}*'.format(p.iouType))\n p.imgIds = list(np.unique(p.imgIds))\n if p.useCats:\n p.catIds = list(np.unique(p.catIds))\n p.maxDets = sorted(p.maxDets)\n self.params=p\n\n self._prepare()\n # loop through images, area range, max detection number\n catIds = p.catIds if p.useCats else [-1]\n\n self.ious_kdt_kgt = {(imgId, catId): self.computeIoU_kdt_kgt(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n self.ious_kdt_okgt = {(imgId, catId): self.computeIoU_kdt_okgt(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n self.ious_kdt_unkgt = {(imgId, catId): self.computeIoU_kdt_unkgt(imgId, catId) \\\n for imgId in p.imgIds\n for catId in catIds}\n self.ious_unkdt_kgt = {(imgId): self.computeIoU_unkdt_kgt(imgId) for imgId in p.imgIds}\n self.ious_unkdt_unkgt = {(imgId): self.computeIoU_unkdt_unkgt(imgId) for imgId in p.imgIds}\n \n maxDet = p.maxDets[-1]\n self.evalImgs_kdt = [self.evaluateImg_kdt(imgId, catId, areaRng, maxDet)\n for catId in catIds\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n self.evalImgs_unkdt = [self.evaluateImg_unkdt(imgId, areaRng, maxDet)\n for areaRng in p.areaRng\n for imgId in p.imgIds\n ]\n \n self._paramsEval = copy.deepcopy(self.params)\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format(toc-tic))\n\n def computeIoU_kdt_kgt(self, imgId, catId):\n p = self.params\n if p.useCats:\n gt = self._k_gts[imgId,catId]\n dt = self._k_dts[imgId,catId]\n else:\n gt = [_ for cId in p.catIds for _ in self._k_gts[imgId,cId]]\n dt = [_ for cId in p.catIds for _ in self._k_dts[imgId,cId]]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_kdt_okgt(self, imgId, catId):\n p = self.params\n gt = self._ok_gts[imgId, catId]\n dt = self._k_dts[imgId,catId]\n \n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_kdt_unkgt(self, imgId, catId):\n p = self.params\n gt = self._unk_gts[imgId]\n dt = self._k_dts[imgId,catId]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_unkdt_kgt(self, imgId):\n p = self.params\n gt = [_ for cId in p.catIds for _ in self._k_gts[imgId,cId]]\n dt = self._unk_dts[imgId]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n \n def computeIoU_unkdt_unkgt(self, imgId):\n p = self.params\n gt = self._unk_gts[imgId]\n dt = self._unk_dts[imgId]\n if len(gt) == 0 and len(dt) ==0:\n return []\n inds = np.argsort([-d['score'] for d in dt], kind='mergesort')\n dt = [dt[i] for i in inds]\n if len(dt) > p.maxDets[-1]:\n dt=dt[0:p.maxDets[-1]]\n\n if p.iouType == 'segm':\n g = [g['segmentation'] for g in gt]\n d = [d['segmentation'] for d in dt]\n elif p.iouType == 'bbox':\n g = [g['bbox'] for g in gt]\n d = [d['bbox'] for d in dt]\n else:\n raise Exception('unknown iouType for iou computation')\n\n # compute iou between each dt and gt region\n iscrowd = [int(o['iscrowd']) for o in gt]\n ious = maskUtils.iou(d,g,iscrowd)\n return ious\n\n def evaluateImg_kdt(self, imgId, catId, aRng, maxDet):\n '''\n perform evaluation for single category and image\n :return: dict (single image results)\n '''\n p = self.params\n\n k_gt = self._k_gts[imgId,catId]\n ok_gt = self._ok_gts[imgId,catId]\n unk_gt = self._unk_gts[imgId]\n k_dt = self._k_dts[imgId,catId]\n\n for kg in k_gt:\n if kg['ignore'] or (kg['area']<aRng[0] or kg['area']>aRng[1]):\n kg['_ignore'] = 1\n else:\n kg['_ignore'] = 0\n for okg in ok_gt:\n if okg['ignore'] or (okg['area']<aRng[0] or okg['area']>aRng[1]):\n okg['_ignore'] = 1\n else:\n okg['_ignore'] = 0\n for ug in unk_gt:\n if ug['ignore'] or (ug['area']<aRng[0] or ug['area']>aRng[1]):\n ug['_ignore'] = 1\n else:\n ug['_ignore'] = 0\n\n # sort dt highest score first, sort gt ignore last\n k_gtind = np.argsort([kg['_ignore'] for kg in k_gt], kind='mergesort')\n k_gt = [k_gt[i] for i in k_gtind]\n ok_gtind = np.argsort([okg['_ignore'] for okg in ok_gt], kind='mergesort')\n ok_gt = [ok_gt[i] for i in ok_gtind]\n unk_gtind = np.argsort([ug['_ignore'] for ug in unk_gt], kind='mergesort')\n unk_gt = [unk_gt[i] for i in unk_gtind]\n k_dtind = np.argsort([-kd['score'] for kd in k_dt], kind='mergesort')\n k_dt = [k_dt[i] for i in k_dtind[0:maxDet]]\n k_iscrowd = [int(o['iscrowd']) for o in k_gt]\n ok_iscrowd = [int(o['iscrowd']) for o in ok_gt]\n unk_iscrowd = [int(o['iscrowd']) for o in unk_gt]\n # load computed ious\n ious_kgt = (\n self.ious_kdt_kgt[imgId, catId][:, k_gtind] \\\n if len(self.ious_kdt_kgt[imgId, catId]) > 0 else self.ious_kdt_kgt[imgId, catId]\n )\n ious_okgt = (\n self.ious_kdt_okgt[imgId, catId][:, ok_gtind] \\\n if len(self.ious_kdt_okgt[imgId, catId]) > 0 else self.ious_kdt_okgt[imgId, catId]\n )\n ious_unkgt = (\n self.ious_kdt_unkgt[imgId, catId][:, unk_gtind] \\\n if len(self.ious_kdt_unkgt[imgId, catId]) > 0 else self.ious_kdt_unkgt[imgId, catId]\n )\n\n T = len(p.iouThrs)\n KG = len(k_gt)\n OKG = len(ok_gt)\n UG = len(unk_gt)\n KD = len(k_dt)\n kgtm = np.zeros((T,KG))\n okgtm = np.zeros((T,OKG))\n unkgtm = np.zeros((T,UG))\n kdtm_kgt = np.zeros((T,KD))\n kdtm_okgt = np.zeros((T,KD))\n kdtm_unkgt = np.zeros((T,KD))\n kgtIg = np.array([kg['_ignore'] for kg in k_gt])\n okgtIg = np.array([okg['_ignore'] for okg in ok_gt])\n unkgtIg = np.array([ug['_ignore'] for ug in unk_gt])\n kdtIg_kgt = np.zeros((T,KD))\n kdtIg_okgt = np.zeros((T,KD))\n kdtIg_unkgt = np.zeros((T,KD))\n\n if not len(ious_kgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for kdind, kd in enumerate(k_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for kgind, kg in enumerate(k_gt):\n # if this gt already matched, and not a crowd, continue\n if kgtm[tind,kgind]>0 and not k_iscrowd[kgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and kgtIg[m]==0 and kgtIg[kgind]==1:\n break\n # continue to next gt unless better match made\n if ious_kgt[kdind,kgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_kgt[kdind,kgind]\n m=kgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n kdtIg_kgt[tind,kdind] = kgtIg[m]\n kdtm_kgt[tind,kdind] = k_gt[m]['id']\n kgtm[tind,m] = kd['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([kd['area']<aRng[0] or kd['area']>aRng[1] for kd in k_dt]).reshape((1, len(k_dt)))\n kdtIg_kgt = np.logical_or(kdtIg_kgt, np.logical_and(kdtm_kgt==0, np.repeat(a,T,0)))\n\n if not len(ious_okgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for kdind, kd in enumerate(k_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for okgind, okg in enumerate(ok_gt):\n # if this gt already matched, and not a crowd, continue\n if okgtm[tind,okgind]>0 and not ok_iscrowd[okgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and okgtIg[m]==0 and okgtIg[okgind]==1:\n break\n # continue to next gt unless better match made\n if ious_okgt[kdind,okgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_okgt[kdind,okgind]\n m=okgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n kdtIg_okgt[tind,kdind] = okgtIg[m]\n kdtm_okgt[tind,kdind] = ok_gt[m]['id']\n okgtm[tind,m] = kd['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([kd['area']<aRng[0] or kd['area']>aRng[1] for kd in k_dt]).reshape((1, len(k_dt)))\n kdtIg_okgt = np.logical_or(kdtIg_okgt, np.logical_and(kdtm_okgt==0, np.repeat(a,T,0)))\n\n if not len(ious_unkgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for kdind, kd in enumerate(k_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for unkgind, unkg in enumerate(unk_gt):\n # if this gt already matched, and not a crowd, continue\n if unkgtm[tind,unkgind]>0 and not unk_iscrowd[unkgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and unkgtIg[m]==0 and unkgtIg[unkgind]==1:\n break\n # continue to next gt unless better match made\n if ious_unkgt[kdind,unkgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_unkgt[kdind,unkgind]\n m=unkgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n kdtIg_unkgt[tind,kdind] = unkgtIg[m]\n kdtm_unkgt[tind,kdind] = unk_gt[m]['id']\n unkgtm[tind,m] = kd['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([kd['area']<aRng[0] or kd['area']>aRng[1] for kd in k_dt]).reshape((1, len(k_dt)))\n kdtIg_unkgt = np.logical_or(kdtIg_unkgt, np.logical_and(kdtm_unkgt==0, np.repeat(a,T,0)))\n\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'category_id': catId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'kdtIds': [kd['id'] for kd in k_dt],\n 'kgtIds': [kg['id'] for kg in k_gt],\n 'okgtIds': [okg['id'] for okg in ok_gt],\n 'unkgtIds': [ug['id'] for ug in unk_gt],\n 'Matches_kdt_kgt': kdtm_kgt,\n 'Matches_kdt_okgt': kdtm_okgt,\n 'Matches_kdt_unkgt': kdtm_unkgt,\n 'kgtMatches': kgtm,\n 'okgtMatches': okgtm,\n 'unkgtMatches': unkgtm,\n 'kdtScores': [kd['score'] for kd in k_dt],\n 'kgtIgnore': kgtIg,\n 'okgtIgnore': okgtIg,\n 'unkgtIgnore': unkgtIg,\n 'kdtIgnore_kgt': kdtIg_kgt,\n 'kdtIgnore_okgt': kdtIg_okgt,\n 'kdtIgnore_unkgt': kdtIg_unkgt,\n }\n \n def evaluateImg_unkdt(self, imgId, aRng, maxDet):\n '''\n '''\n p = self.params\n k_gt = [_ for cId in p.catIds for _ in self._k_gts[imgId,cId]]\n unk_gt = self._unk_gts[imgId]\n unk_dt = self._unk_dts[imgId]\n if len(unk_gt) == 0 and len(unk_dt) == 0:\n return None\n \n for kg in k_gt:\n if kg['ignore'] or (kg['area']<aRng[0] or kg['area']>aRng[1]):\n kg['_ignore'] = 1\n else:\n kg['_ignore'] = 0\n for ug in unk_gt:\n if ug['ignore'] or (ug['area']<aRng[0] or ug['area']>aRng[1]):\n ug['_ignore'] = 1\n else:\n ug['_ignore'] = 0\n \n # sort dt highest score first, sort gt ignore last\n kgtind = np.argsort([kg['_ignore'] for kg in k_gt], kind='mergesort')\n k_gt = [k_gt[i] for i in kgtind]\n unk_gtind = np.argsort([ug['_ignore'] for ug in unk_gt], kind='mergesort')\n unk_gt = [unk_gt[i] for i in unk_gtind]\n udtind = np.argsort([-ud['score'] for ud in unk_dt], kind='mergesort')\n unk_dt = [unk_dt[i] for i in udtind[0:maxDet]]\n k_iscrowd = [int(o['iscrowd']) for o in k_gt]\n unk_iscrowd = [int(o['iscrowd']) for o in unk_gt]\n\n # load computed ious\n ious_kgt = (\n self.ious_unkdt_kgt[imgId][:, kgtind] \\\n if len(self.ious_unkdt_kgt[imgId]) > 0 else self.ious_unkdt_kgt[imgId]\n )\n ious_unkgt = (\n self.ious_unkdt_unkgt[imgId][:, unk_gtind] \\\n if len(self.ious_unkdt_unkgt[imgId]) > 0 else self.ious_unkdt_unkgt[imgId]\n )\n\n T = len(p.iouThrs)\n KG = len(k_gt)\n UG = len(unk_gt)\n UD = len(unk_dt)\n kgtm = np.zeros((T,KG))\n unkgtm = np.zeros((T,UG))\n unkdtm_kgt = np.zeros((T,UD))\n unkdtm_unkgt = np.zeros((T,UD))\n kgtIg = np.array([g['_ignore'] for g in k_gt])\n unkgtIg = np.array([ug['_ignore'] for ug in unk_gt])\n unkdtIg_kgt = np.zeros((T,UD))\n unkdtIg_unkgt = np.zeros((T,UD))\n\n if not len(ious_kgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for udind, ud in enumerate(unk_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for kgind, kg in enumerate(k_gt):\n # if this gt already matched, and not a crowd, continue\n if kgtm[tind,kgind]>0 and not k_iscrowd[kgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and kgtIg[m]==0 and kgtIg[kgind]==1:\n break\n # continue to next gt unless better match made\n if ious_kgt[udind,kgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_kgt[udind,kgind]\n m=kgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n unkdtIg_kgt[tind,udind] = kgtIg[m]\n unkdtm_kgt[tind,udind] = k_gt[m]['id']\n kgtm[tind,m] = ud['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([ud['area']<aRng[0] or ud['area']>aRng[1] for ud in unk_dt]).reshape((1, len(unk_dt)))\n unkdtIg_kgt = np.logical_or(unkdtIg_kgt, np.logical_and(unkdtm_kgt==0, np.repeat(a,T,0)))\n\n if not len(ious_unkgt)==0:\n for tind, t in enumerate(p.iouThrs):\n for udind, ud in enumerate(unk_dt):\n # information about best match so far (m=-1 -> unmatched)\n iou = min([t,1-1e-10])\n m = -1\n for unkgind, unkg in enumerate(unk_gt):\n # if this gt already matched, and not a crowd, continue\n if unkgtm[tind,unkgind]>0 and not unk_iscrowd[unkgind]:\n continue\n # if dt matched to reg gt, and on ignore gt, stop\n if m>-1 and unkgtIg[m]==0 and unkgtIg[unkgind]==1:\n break\n # continue to next gt unless better match made\n if ious_unkgt[udind,unkgind] < iou:\n continue\n # if match successful and best so far, store appropriately\n iou=ious_unkgt[udind,unkgind]\n m=unkgind\n # if match made store id of match for both dt and gt\n if m ==-1:\n continue\n unkdtIg_unkgt[tind,udind] = unkgtIg[m]\n unkdtm_unkgt[tind,udind] = unk_gt[m]['id']\n unkgtm[tind,m] = ud['id']\n # set unmatched detections outside of area range to ignore\n a = np.array([ud['area']<aRng[0] or ud['area']>aRng[1] for ud in unk_dt]).reshape((1, len(unk_dt)))\n unkdtIg_unkgt = np.logical_or(unkdtIg_unkgt, np.logical_and(unkdtm_unkgt==0, np.repeat(a,T,0)))\n\n # store results for given image and category\n return {\n 'image_id': imgId,\n 'aRng': aRng,\n 'maxDet': maxDet,\n 'unkdtIds': [ud['id'] for ud in unk_dt],\n 'kgtIds': [kg['id'] for kg in k_gt],\n 'unkgtIds': [ug['id'] for ug in unk_gt],\n 'Matches_unkdt_kgt': unkdtm_kgt,\n 'Matches_unkdt_unkgt': unkdtm_unkgt,\n 'kgtMatches': kgtm,\n 'unkgtMatches': unkgtm,\n 'unkdtScores': [ud['score'] for ud in unk_dt],\n 'kgtIgnore': kgtIg,\n 'unkgtIgnore': unkgtIg,\n 'unkdtIgnore_kgt': unkdtIg_kgt,\n 'unkdtIgnore_unkgt': unkdtIg_unkgt,\n }\n\n def accumulate(self, p = None):\n '''\n Accumulate per image evaluation results and store the result in self.eval\n :param p: input params for evaluation\n :return: None\n '''\n print('Accumulating evaluation results of known detections...')\n tic = time.time()\n if not self.evalImgs_kdt or not self.evalImgs_unkdt:\n print('Please run evaluate() first')\n # allows input customized parameters\n if p is None:\n p = self.params\n p.catIds = p.catIds if p.useCats == 1 else [-1]\n T = len(p.iouThrs)\n R = len(p.recThrs)\n K = len(p.catIds) if p.useCats else 1\n A = len(p.areaRng)\n M = len(p.maxDets)\n precision = -np.ones((T,R,K,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,K,A,M))\n scores = -np.ones((T,R,K,A,M))\n ok_det_as_known = np.zeros((T,K,A,M))\n unk_det_as_known = np.zeros((T,K,A,M))\n fp_os = np.zeros((T,R,K,A,M))\n tp_plus_fp_cs = np.zeros((T,R,K,A,M))\n\n # create dictionary for future indexing\n _pe = self._paramsEval\n catIds = _pe.catIds if _pe.useCats else [-1]\n setK = set(catIds)\n setA = set(map(tuple, _pe.areaRng))\n setM = set(_pe.maxDets)\n setI = set(_pe.imgIds)\n # get inds to evaluate\n k_list = [n for n, k in enumerate(p.catIds) if k in setK]\n m_list = [m for n, m in enumerate(p.maxDets) if m in setM]\n a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]\n i_list = [n for n, i in enumerate(p.imgIds) if i in setI]\n I0 = len(_pe.imgIds)\n A0 = len(_pe.areaRng)\n # retrieve E at each category, area range, and max number of detections\n for k, k0 in enumerate(k_list):\n Nk = k0*A0*I0\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs_kdt[Nk + Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n dtScores = np.concatenate([e['kdtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-dtScores, kind='mergesort')\n dtScoresSorted = dtScores[inds]\n \n dtScoresSortedExpand = np.expand_dims(dtScoresSorted, 0)\n dtScoresSortedExpand = np.repeat(dtScoresSortedExpand, T, 0)\n kdtm_kgt = np.concatenate([e['Matches_kdt_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtm_okgt = np.concatenate([e['Matches_kdt_okgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtm_unkgt = np.concatenate([e['Matches_kdt_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtIg_kgt = np.concatenate([e['kdtIgnore_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtIg_okgt = np.concatenate([e['kdtIgnore_okgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kdtIg_unkgt = np.concatenate([e['kdtIgnore_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kgtIg = np.concatenate([e['kgtIgnore'] for e in E])\n npig = np.count_nonzero(kgtIg==0)\n if npig == 0:\n continue\n tps = np.logical_and(kdtm_kgt, np.logical_not(kdtIg_kgt) )\n fps = np.logical_and(np.logical_not(kdtm_kgt), np.logical_not(kdtIg_kgt) )\n okfps = np.logical_and(kdtm_okgt, np.logical_not(kdtIg_okgt))\n ufps = np.logical_and(kdtm_unkgt, np.logical_not(kdtIg_unkgt))\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float)\n tp_fp_sum = tp_sum + fp_sum\n okfp_sum = np.sum(okfps, axis=1).astype(dtype=np.float)\n ufp_sum = np.cumsum(ufps, axis=1).astype(dtype=np.float)\n for t, (tp, fp, tp_fp, ufp) in enumerate(zip(tp_sum, fp_sum, tp_fp_sum, ufp_sum)):\n if len(ufp):\n unk_det_as_known[t,k,a,m] = ufp[-1]\n\n ok_det_as_known[t,k,a,m] = okfp_sum[t]\n\n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n tf = np.zeros((R,))\n fo = np.zeros((R,))\n\n if nd:\n recall[t,k,a,m] = rc[-1]\n else:\n recall[t,k,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = dtScoresSorted[pi]\n except:\n pass\n l = len(tp_fp)\n if l:\n for ri, pi in enumerate(inds):\n if pi == l:\n pi -= 1\n tf[ri] = tp_fp[pi]\n fo[ri] = ufp[pi]\n precision[t,:,k,a,m] = np.array(q)\n scores[t,:,k,a,m] = np.array(ss)\n tp_plus_fp_cs[t,:,k,a,m] = np.array(tf)\n fp_os[t,:,k,a,m] = np.array(fo)\n self.eval_kdt = {\n 'params': p,\n 'counts': [T, R, K, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n 'ok_det_as_known': ok_det_as_known,\n 'unk_det_as_known': unk_det_as_known,\n 'tp_plus_fp_cs': tp_plus_fp_cs,\n 'fp_os': fp_os\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n print('Accumulating evaluation results of unknown detections...')\n tic = time.time()\n if not self.evalImgs_unkdt:\n print('Please run evaluate() first')\n \n precision = -np.ones((T,R,A,M)) # -1 for the precision of absent categories\n recall = -np.ones((T,A,M))\n scores = -np.ones((T,R,A,M))\n\n num_k_det_as_unk = np.zeros((T,A,M))\n\n # retrieve E at each category, area range, and max number of detections\n for a, a0 in enumerate(a_list):\n Na = a0*I0\n for m, maxDet in enumerate(m_list):\n E = [self.evalImgs_unkdt[Na + i] for i in i_list]\n E = [e for e in E if not e is None]\n if len(E) == 0:\n continue\n udtScores = np.concatenate([e['unkdtScores'][0:maxDet] for e in E])\n\n # different sorting method generates slightly different results.\n # mergesort is used to be consistent as Matlab implementation.\n inds = np.argsort(-udtScores, kind='mergesort')\n udtScoresSorted = udtScores[inds]\n\n udtm_kgt = np.concatenate([e['Matches_unkdt_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n udtm_unkgt = np.concatenate([e['Matches_unkdt_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n udtIg_kgt = np.concatenate([e['unkdtIgnore_kgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n udtIg_unkgt = np.concatenate([e['unkdtIgnore_unkgt'][:,0:maxDet] for e in E], axis=1)[:,inds]\n kgtIg = np.concatenate([e['kgtIgnore'] for e in E])\n unkgtIg = np.concatenate([e['unkgtIgnore'] for e in E])\n npig = np.count_nonzero(unkgtIg==0 )\n if npig == 0:\n continue\n\n tps = np.logical_and(udtm_unkgt, np.logical_not(udtIg_unkgt) )\n fps = np.logical_and(np.logical_not(udtm_unkgt), np.logical_not(udtIg_unkgt) )\n k_det_as_unk_fps = np.logical_and(udtm_kgt, np.logical_not(udtIg_kgt))\n\n tp_sum = np.cumsum(tps, axis=1).astype(dtype=float)\n fp_sum = np.cumsum(fps, axis=1).astype(dtype=float)\n k_det_as_unk_fp_sum = np.cumsum(k_det_as_unk_fps, axis=1).astype(dtype=np.float)\n for t, (tp, fp, k_det_as_unk_fp) in enumerate(zip(tp_sum, fp_sum, k_det_as_unk_fp_sum)):\n if len(k_det_as_unk_fp):\n num_k_det_as_unk[t,a,m] = k_det_as_unk_fp[-1]\n \n tp = np.array(tp)\n fp = np.array(fp)\n nd = len(tp)\n rc = tp / npig\n pr = tp / (fp+tp+np.spacing(1))\n q = np.zeros((R,))\n ss = np.zeros((R,))\n\n if nd:\n recall[t,a,m] = rc[-1]\n else:\n recall[t,a,m] = 0\n\n # numpy is slow without cython optimization for accessing elements\n # use python array gets significant speed improvement\n pr = pr.tolist(); q = q.tolist()\n\n for i in range(nd-1, 0, -1):\n if pr[i] > pr[i-1]:\n pr[i-1] = pr[i]\n\n inds = np.searchsorted(rc, p.recThrs, side='left')\n try:\n for ri, pi in enumerate(inds):\n q[ri] = pr[pi]\n ss[ri] = udtScoresSorted[pi]\n except:\n pass\n precision[t,:,a,m] = np.array(q)\n scores[t,:,a,m] = np.array(ss)\n \n self.eval_unkdt = {\n 'params': p,\n 'counts': [T, R, A, M],\n 'date': datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),\n 'precision': precision,\n 'recall': recall,\n 'scores': scores,\n 'k_det_as_unk': num_k_det_as_unk\n }\n toc = time.time()\n print('DONE (t={:0.2f}s).'.format( toc-tic))\n\n def summarize(self):\n '''\n Compute and display summary metrics for evaluation results.\n Note this functin can *only* be applied on the default parameter setting\n '''\n def _num_unk_det_as_known(iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {}'\n titleStr = 'UNK_det_as_K'\n typeStr = '(AOSE)'\n iouStr = '{:0.2f}'.format(iouThr)\n tind = [i for i, iouT in enumerate(p.iouThrs) if iouT == iouThr]\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n unk_det_as_known = self.eval_kdt['unk_det_as_known']\n\n self.unk_det_as_known = unk_det_as_known[tind,:,aind,mind]\n\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, np.sum(unk_det_as_known[tind,:,aind,mind])))\n print(unk_det_as_known[tind,:,aind,mind])\n \n return np.sum(unk_det_as_known[tind,:,aind,mind])\n\n def _num_k_det_as_unk(iouThr=None, areaRng='all', maxDets=100):\n p = self.params\n iStr = ' {:<18} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {}'\n titleStr = 'K_det_as_UNK'\n iouStr = '{:0.2f}'.format(iouThr)\n tind = [i for i, iouT in enumerate(p.iouThrs) if iouT == iouThr]\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n k_det_as_unk = self.eval_unkdt['k_det_as_unk']\n\n self.k_det_as_unk = k_det_as_unk[tind,aind,mind]\n\n print(iStr.format(titleStr, iouStr, areaRng, maxDets, k_det_as_unk[tind,aind,mind]))\n \n return k_det_as_unk[tind,aind,mind]\n \n def _wi(iouThr=None, areaRng='all', maxDets=100, recall_level=0.8):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Wilderness Impact'\n typeStr = '(WI)'\n iouStr = '{:0.2f}'.format(iouThr)\n\n tind = [i for i, iouT in enumerate(p.iouThrs) if iouT == iouThr]\n rind = [i for i, recT in enumerate(p.recThrs) if recT == recall_level]\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n tp_plus_fp_cs = self.eval_kdt['tp_plus_fp_cs']\n fp_os = self.eval_kdt['fp_os']\n\n wi = np.mean(fp_os[tind,rind,:,aind,mind]) / np.mean(tp_plus_fp_cs[tind,rind,:,aind,mind])\n \n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, wi))\n\n return wi\n \n def _print_precision(iouThr=.5, areaRng='all', maxDets=100 ):\n p = self.params\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n\n # dimension of precision: [TxRxKxAxM]\n s = self.eval_kdt['precision']\n # IoU\n\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = np.squeeze(s[:,:,:,aind,mind])\n s = s[[10, 20, 30, 40, 50, 60, 70, 80, 90, 100],:]\n \n for i in range(s.shape[1]):\n print(s[:,i])\n\n def _summarize( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Known Average Precision' if ap == 1 else 'Known Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval_kdt['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval_kdt['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n\n def _summarize_unk( ap=1, iouThr=None, areaRng='all', maxDets=100 ):\n p = self.params\n iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'\n titleStr = 'Unknown Average Precision' if ap == 1 else 'Unknown Average Recall'\n typeStr = '(AP)' if ap==1 else '(AR)'\n iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \\\n if iouThr is None else '{:0.2f}'.format(iouThr)\n\n aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]\n mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]\n if ap == 1:\n # dimension of precision: [TxRxKxAxM]\n s = self.eval_unkdt['precision']\n # IoU\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,:,aind,mind]\n else:\n # dimension of recall: [TxKxAxM]\n s = self.eval_unkdt['recall']\n if iouThr is not None:\n t = np.where(iouThr == p.iouThrs)[0]\n s = s[t]\n s = s[:,aind,mind]\n if len(s[s>-1])==0:\n mean_s = -1\n else:\n mean_s = np.mean(s[s>-1])\n print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))\n return mean_s\n\n def _summarizeDets():\n stats = np.zeros((30,))\n stats[0] = _summarize(1)\n stats[1] = _summarize(1, iouThr=.5, maxDets=self.params.maxDets[-1])\n stats[2] = _summarize(1, iouThr=.75, maxDets=self.params.maxDets[-1])\n stats[3] = _summarize(1, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[4] = _summarize(1, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[5] = _summarize(1, areaRng='large', maxDets=self.params.maxDets[-1])\n stats[6] = _summarize(0, maxDets=self.params.maxDets[0])\n stats[7] = _summarize(0, maxDets=self.params.maxDets[1])\n stats[8] = _summarize(0, maxDets=self.params.maxDets[2])\n stats[9] = _summarize(0, maxDets=self.params.maxDets[3])\n stats[10] = _summarize(0, maxDets=self.params.maxDets[4])\n stats[11] = _summarize(0, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[12] = _summarize(0, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[13] = _summarize(0, areaRng='large', maxDets=self.params.maxDets[-1])\n stats[14] = _wi(iouThr=.5, areaRng='all', maxDets=100, recall_level=0.8)\n stats[15] = _num_unk_det_as_known(iouThr=.5, areaRng='all', maxDets=100)\n \n stats[16] = _summarize_unk(1)\n stats[17] = _summarize_unk(1, iouThr=.5, maxDets=self.params.maxDets[-1])\n stats[18] = _summarize_unk(1, iouThr=.75, maxDets=self.params.maxDets[-1])\n stats[19] = _summarize_unk(1, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[20] = _summarize_unk(1, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[21] = _summarize_unk(1, areaRng='large', maxDets=self.params.maxDets[-1])\n stats[22] = _summarize_unk(0, maxDets=self.params.maxDets[0])\n stats[23] = _summarize_unk(0, maxDets=self.params.maxDets[1])\n stats[24] = _summarize_unk(0, maxDets=self.params.maxDets[2])\n stats[25] = _summarize_unk(0, maxDets=self.params.maxDets[3])\n stats[26] = _summarize_unk(0, maxDets=self.params.maxDets[4])\n stats[27] = _summarize_unk(0, areaRng='small', maxDets=self.params.maxDets[-1])\n stats[28] = _summarize_unk(0, areaRng='medium', maxDets=self.params.maxDets[-1])\n stats[29] = _summarize_unk(0, areaRng='large', maxDets=self.params.maxDets[-1])\n return stats\n \n if not self.eval_kdt or not self.eval_unkdt:\n raise Exception('Please run accumulate() first')\n iouType = self.params.iouType\n if iouType == 'segm' or iouType == 'bbox':\n summarize = _summarizeDets\n self.stats = summarize()" } ]
import contextlib import copy import io import itertools import json import logging import numpy as np import os import pickle import pycocotools.mask as mask_util import torch import detectron2.utils.comm as comm from collections import OrderedDict from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tabulate import tabulate from detectron2.config import CfgNode from detectron2.data import MetadataCatalog from detectron2.data.datasets.coco import convert_to_coco_json from detectron2.structures import Boxes, BoxMode, pairwise_iou from detectron2.utils.file_io import PathManager from detectron2.utils.logger import create_small_table from detectron2.evaluation.evaluator import DatasetEvaluator from detectron2.evaluation.coco_evaluation import instances_to_coco_json from openset_rcnn.data.graspnet_meta import GRASPNET_KNOWN_IDS, GRASPNET_KNOWN_CATEGORIES from .os_cocoeval import OpensetCOCOEval
14,921
# Copyright (c) Facebook, Inc. and its affiliates. class OpensetCOCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, eval_type, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (list[int]): limit on the maximum number of detections per image. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self.known_names = GRASPNET_KNOWN_CATEGORIES
# Copyright (c) Facebook, Inc. and its affiliates. class OpensetCOCOEvaluator(DatasetEvaluator): """ Evaluate AR for object proposals, AP for instance detection/segmentation, AP for keypoint detection outputs using COCO's metrics. See http://cocodataset.org/#detection-eval and http://cocodataset.org/#keypoints-eval to understand its metrics. The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means the metric cannot be computed (e.g. due to no predictions made). In addition to COCO, this evaluator is able to support any bounding box detection, instance segmentation, or keypoint detection dataset. """ def __init__( self, dataset_name, eval_type, tasks=None, distributed=True, output_dir=None, *, max_dets_per_image=None, use_fast_impl=True, kpt_oks_sigmas=(), ): """ Args: dataset_name (str): name of the dataset to be evaluated. It must have either the following corresponding metadata: "json_file": the path to the COCO format annotation Or it must be in detectron2's standard dataset format so it can be converted to COCO format automatically. tasks (tuple[str]): tasks that can be evaluated under the given configuration. A task is one of "bbox", "segm", "keypoints". By default, will infer this automatically from predictions. distributed (True): if True, will collect results from all ranks and run evaluation in the main process. Otherwise, will only evaluate the results in the current process. output_dir (str): optional, an output directory to dump all results predicted on the dataset. The dump contains two files: 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and contains all the results in the format they are produced by the model. 2. "coco_instances_results.json" a json file in COCO's result format. max_dets_per_image (list[int]): limit on the maximum number of detections per image. use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP. Although the results should be very close to the official implementation in COCO API, it is still recommended to compute results with the official API for use in papers. The faster implementation also uses more RAM. """ self._logger = logging.getLogger(__name__) self._distributed = distributed self._output_dir = output_dir self._use_fast_impl = use_fast_impl self._max_dets_per_image = max_dets_per_image if tasks is not None and isinstance(tasks, CfgNode): kpt_oks_sigmas = ( tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas ) self._logger.warn( "COCO Evaluator instantiated using config, this is deprecated behavior." " Please pass in explicit arguments instead." ) self._tasks = None # Infering it from predictions should be better else: self._tasks = tasks self._cpu_device = torch.device("cpu") self.known_names = GRASPNET_KNOWN_CATEGORIES
self.known_ids = GRASPNET_KNOWN_IDS
0
2023-11-21 01:47:01+00:00
24k
jiawei-ren/dreamgaussian4d
diffusers/src/diffusers/models/unet_3d_blocks.py
[ { "identifier": "is_torch_version", "path": "diffusers/src/diffusers/utils/import_utils.py", "snippet": "def is_torch_version(operation: str, version: str):\n \"\"\"\n Args:\n Compares the current PyTorch version to a given reference with an operation.\n operation (`str`):\n A string representation of an operator, such as `\">\"` or `\"<=\"`\n version (`str`):\n A string version of PyTorch\n \"\"\"\n return compare_versions(parse(_torch_version), operation, version)" }, { "identifier": "apply_freeu", "path": "diffusers/src/diffusers/utils/torch_utils.py", "snippet": "def apply_freeu(\n resolution_idx: int, hidden_states: torch.Tensor, res_hidden_states: torch.Tensor, **freeu_kwargs\n) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Applies the FreeU mechanism as introduced in https:\n //arxiv.org/abs/2309.11497. Adapted from the official code repository: https://github.com/ChenyangSi/FreeU.\n\n Args:\n resolution_idx (`int`): Integer denoting the UNet block where FreeU is being applied.\n hidden_states (`torch.Tensor`): Inputs to the underlying block.\n res_hidden_states (`torch.Tensor`): Features from the skip block corresponding to the underlying block.\n s1 (`float`): Scaling factor for stage 1 to attenuate the contributions of the skip features.\n s2 (`float`): Scaling factor for stage 2 to attenuate the contributions of the skip features.\n b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.\n b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.\n \"\"\"\n if resolution_idx == 0:\n num_half_channels = hidden_states.shape[1] // 2\n hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs[\"b1\"]\n res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs[\"s1\"])\n if resolution_idx == 1:\n num_half_channels = hidden_states.shape[1] // 2\n hidden_states[:, :num_half_channels] = hidden_states[:, :num_half_channels] * freeu_kwargs[\"b2\"]\n res_hidden_states = fourier_filter(res_hidden_states, threshold=1, scale=freeu_kwargs[\"s2\"])\n\n return hidden_states, res_hidden_states" }, { "identifier": "Attention", "path": "diffusers/src/diffusers/models/attention.py", "snippet": "def _chunked_feed_forward(\n ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int, lora_scale: Optional[float] = None\n):\n def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int):\n def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor:\n def __init__(\n self,\n dim: int,\n num_attention_heads: int,\n attention_head_dim: int,\n dropout=0.0,\n cross_attention_dim: Optional[int] = None,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n attention_bias: bool = False,\n only_cross_attention: bool = False,\n double_self_attention: bool = False,\n upcast_attention: bool = False,\n norm_elementwise_affine: bool = True,\n norm_type: str = \"layer_norm\", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single'\n norm_eps: float = 1e-5,\n final_dropout: bool = False,\n attention_type: str = \"default\",\n positional_embeddings: Optional[str] = None,\n num_positional_embeddings: Optional[int] = None,\n ):\n def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n ) -> torch.FloatTensor:\n def __init__(\n self,\n dim: int,\n time_mix_inner_dim: int,\n num_attention_heads: int,\n attention_head_dim: int,\n cross_attention_dim: Optional[int] = None,\n ):\n def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs):\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n num_frames: int,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n ) -> torch.FloatTensor:\n def __init__(\n self,\n dim: int,\n dim_out: Optional[int] = None,\n mult: int = 4,\n dropout: float = 0.0,\n activation_fn: str = \"geglu\",\n final_dropout: bool = False,\n ):\n def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:\nclass GatedSelfAttentionDense(nn.Module):\nclass BasicTransformerBlock(nn.Module):\nclass TemporalBasicTransformerBlock(nn.Module):\nclass FeedForward(nn.Module):" }, { "identifier": "DualTransformer2DModel", "path": "diffusers/src/diffusers/models/dual_transformer_2d.py", "snippet": "class DualTransformer2DModel(nn.Module):\n \"\"\"\n Dual transformer wrapper that combines two `Transformer2DModel`s for mixed inference.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.\n in_channels (`int`, *optional*):\n Pass if the input is continuous. The number of channels in the input and output.\n num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.\n dropout (`float`, *optional*, defaults to 0.1): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use.\n sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images.\n Note that this is fixed at training time as it is used for learning a number of position embeddings. See\n `ImagePositionalEmbeddings`.\n num_vector_embeds (`int`, *optional*):\n Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels.\n Includes the class for the masked latent pixel.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`): Activation function to be used in feed-forward.\n num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`.\n The number of diffusion steps used during training. Note that this is fixed at training time as it is used\n to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for\n up to but not more than steps than `num_embeds_ada_norm`.\n attention_bias (`bool`, *optional*):\n Configure if the TransformerBlocks' attention should contain a bias parameter.\n \"\"\"\n\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n num_vector_embeds: Optional[int] = None,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n ):\n super().__init__()\n self.transformers = nn.ModuleList(\n [\n Transformer2DModel(\n num_attention_heads=num_attention_heads,\n attention_head_dim=attention_head_dim,\n in_channels=in_channels,\n num_layers=num_layers,\n dropout=dropout,\n norm_num_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attention_bias=attention_bias,\n sample_size=sample_size,\n num_vector_embeds=num_vector_embeds,\n activation_fn=activation_fn,\n num_embeds_ada_norm=num_embeds_ada_norm,\n )\n for _ in range(2)\n ]\n )\n\n # Variables that can be set by a pipeline:\n\n # The ratio of transformer1 to transformer2's output states to be combined during inference\n self.mix_ratio = 0.5\n\n # The shape of `encoder_hidden_states` is expected to be\n # `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`\n self.condition_lengths = [77, 257]\n\n # Which transformer to use to encode which condition.\n # E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`\n self.transformer_index_for_condition = [1, 0]\n\n def forward(\n self,\n hidden_states,\n encoder_hidden_states,\n timestep=None,\n attention_mask=None,\n cross_attention_kwargs=None,\n return_dict: bool = True,\n ):\n \"\"\"\n Args:\n hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`.\n When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input\n hidden_states.\n encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):\n Conditional embeddings for cross attention layer. If not given, cross-attention defaults to\n self-attention.\n timestep ( `torch.long`, *optional*):\n Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step.\n attention_mask (`torch.FloatTensor`, *optional*):\n Optional attention mask to be applied in Attention.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`:\n [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n input_states = hidden_states\n\n encoded_states = []\n tokens_start = 0\n # attention_mask is not used yet\n for i in range(2):\n # for each of the two transformers, pass the corresponding condition tokens\n condition_state = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]\n transformer_index = self.transformer_index_for_condition[i]\n encoded_state = self.transformers[transformer_index](\n input_states,\n encoder_hidden_states=condition_state,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n encoded_states.append(encoded_state - input_states)\n tokens_start += self.condition_lengths[i]\n\n output_states = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)\n output_states = output_states + input_states\n\n if not return_dict:\n return (output_states,)\n\n return Transformer2DModelOutput(sample=output_states)" }, { "identifier": "Downsample2D", "path": "diffusers/src/diffusers/models/resnet.py", "snippet": "class Downsample2D(nn.Module):\n \"\"\"A 2D downsampling layer with an optional convolution.\n\n Parameters:\n channels (`int`):\n number of channels in the inputs and outputs.\n use_conv (`bool`, default `False`):\n option to use a convolution.\n out_channels (`int`, optional):\n number of output channels. Defaults to `channels`.\n padding (`int`, default `1`):\n padding for the convolution.\n name (`str`, default `conv`):\n name of the downsampling 2D layer.\n \"\"\"\n\n def __init__(\n self,\n channels: int,\n use_conv: bool = False,\n out_channels: Optional[int] = None,\n padding: int = 1,\n name: str = \"conv\",\n ):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.padding = padding\n stride = 2\n self.name = name\n conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv\n\n if use_conv:\n conv = conv_cls(self.channels, self.out_channels, 3, stride=stride, padding=padding)\n else:\n assert self.channels == self.out_channels\n conv = nn.AvgPool2d(kernel_size=stride, stride=stride)\n\n # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed\n if name == \"conv\":\n self.Conv2d_0 = conv\n self.conv = conv\n elif name == \"Conv2d_0\":\n self.conv = conv\n else:\n self.conv = conv\n\n def forward(self, hidden_states: torch.FloatTensor, scale: float = 1.0) -> torch.FloatTensor:\n assert hidden_states.shape[1] == self.channels\n\n if self.use_conv and self.padding == 0:\n pad = (0, 1, 0, 1)\n hidden_states = F.pad(hidden_states, pad, mode=\"constant\", value=0)\n\n assert hidden_states.shape[1] == self.channels\n\n if not USE_PEFT_BACKEND:\n if isinstance(self.conv, LoRACompatibleConv):\n hidden_states = self.conv(hidden_states, scale)\n else:\n hidden_states = self.conv(hidden_states)\n else:\n hidden_states = self.conv(hidden_states)\n\n return hidden_states" }, { "identifier": "ResnetBlock2D", "path": "diffusers/src/diffusers/models/resnet.py", "snippet": "class ResnetBlock2D(nn.Module):\n r\"\"\"\n A Resnet block.\n\n Parameters:\n in_channels (`int`): The number of channels in the input.\n out_channels (`int`, *optional*, default to be `None`):\n The number of output channels for the first conv2d layer. If None, same as `in_channels`.\n dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use.\n temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding.\n groups (`int`, *optional*, default to `32`): The number of groups to use for the first normalization layer.\n groups_out (`int`, *optional*, default to None):\n The number of groups to use for the second normalization layer. if set to None, same as `groups`.\n eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the normalization.\n non_linearity (`str`, *optional*, default to `\"swish\"`): the activation function to use.\n time_embedding_norm (`str`, *optional*, default to `\"default\"` ): Time scale shift config.\n By default, apply timestep embedding conditioning with a simple shift mechanism. Choose \"scale_shift\" or\n \"ada_group\" for a stronger conditioning with scale and shift.\n kernel (`torch.FloatTensor`, optional, default to None): FIR filter, see\n [`~models.resnet.FirUpsample2D`] and [`~models.resnet.FirDownsample2D`].\n output_scale_factor (`float`, *optional*, default to be `1.0`): the scale factor to use for the output.\n use_in_shortcut (`bool`, *optional*, default to `True`):\n If `True`, add a 1x1 nn.conv2d layer for skip-connection.\n up (`bool`, *optional*, default to `False`): If `True`, add an upsample layer.\n down (`bool`, *optional*, default to `False`): If `True`, add a downsample layer.\n conv_shortcut_bias (`bool`, *optional*, default to `True`): If `True`, adds a learnable bias to the\n `conv_shortcut` output.\n conv_2d_out_channels (`int`, *optional*, default to `None`): the number of channels in the output.\n If None, same as `out_channels`.\n \"\"\"\n\n def __init__(\n self,\n *,\n in_channels: int,\n out_channels: Optional[int] = None,\n conv_shortcut: bool = False,\n dropout: float = 0.0,\n temb_channels: int = 512,\n groups: int = 32,\n groups_out: Optional[int] = None,\n pre_norm: bool = True,\n eps: float = 1e-6,\n non_linearity: str = \"swish\",\n skip_time_act: bool = False,\n time_embedding_norm: str = \"default\", # default, scale_shift, ada_group, spatial\n kernel: Optional[torch.FloatTensor] = None,\n output_scale_factor: float = 1.0,\n use_in_shortcut: Optional[bool] = None,\n up: bool = False,\n down: bool = False,\n conv_shortcut_bias: bool = True,\n conv_2d_out_channels: Optional[int] = None,\n ):\n super().__init__()\n self.pre_norm = pre_norm\n self.pre_norm = True\n self.in_channels = in_channels\n out_channels = in_channels if out_channels is None else out_channels\n self.out_channels = out_channels\n self.use_conv_shortcut = conv_shortcut\n self.up = up\n self.down = down\n self.output_scale_factor = output_scale_factor\n self.time_embedding_norm = time_embedding_norm\n self.skip_time_act = skip_time_act\n\n linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear\n conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv\n\n if groups_out is None:\n groups_out = groups\n\n if self.time_embedding_norm == \"ada_group\":\n self.norm1 = AdaGroupNorm(temb_channels, in_channels, groups, eps=eps)\n elif self.time_embedding_norm == \"spatial\":\n self.norm1 = SpatialNorm(in_channels, temb_channels)\n else:\n self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)\n\n self.conv1 = conv_cls(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n\n if temb_channels is not None:\n if self.time_embedding_norm == \"default\":\n self.time_emb_proj = linear_cls(temb_channels, out_channels)\n elif self.time_embedding_norm == \"scale_shift\":\n self.time_emb_proj = linear_cls(temb_channels, 2 * out_channels)\n elif self.time_embedding_norm == \"ada_group\" or self.time_embedding_norm == \"spatial\":\n self.time_emb_proj = None\n else:\n raise ValueError(f\"unknown time_embedding_norm : {self.time_embedding_norm} \")\n else:\n self.time_emb_proj = None\n\n if self.time_embedding_norm == \"ada_group\":\n self.norm2 = AdaGroupNorm(temb_channels, out_channels, groups_out, eps=eps)\n elif self.time_embedding_norm == \"spatial\":\n self.norm2 = SpatialNorm(out_channels, temb_channels)\n else:\n self.norm2 = torch.nn.GroupNorm(num_groups=groups_out, num_channels=out_channels, eps=eps, affine=True)\n\n self.dropout = torch.nn.Dropout(dropout)\n conv_2d_out_channels = conv_2d_out_channels or out_channels\n self.conv2 = conv_cls(out_channels, conv_2d_out_channels, kernel_size=3, stride=1, padding=1)\n\n self.nonlinearity = get_activation(non_linearity)\n\n self.upsample = self.downsample = None\n if self.up:\n if kernel == \"fir\":\n fir_kernel = (1, 3, 3, 1)\n self.upsample = lambda x: upsample_2d(x, kernel=fir_kernel)\n elif kernel == \"sde_vp\":\n self.upsample = partial(F.interpolate, scale_factor=2.0, mode=\"nearest\")\n else:\n self.upsample = Upsample2D(in_channels, use_conv=False)\n elif self.down:\n if kernel == \"fir\":\n fir_kernel = (1, 3, 3, 1)\n self.downsample = lambda x: downsample_2d(x, kernel=fir_kernel)\n elif kernel == \"sde_vp\":\n self.downsample = partial(F.avg_pool2d, kernel_size=2, stride=2)\n else:\n self.downsample = Downsample2D(in_channels, use_conv=False, padding=1, name=\"op\")\n\n self.use_in_shortcut = self.in_channels != conv_2d_out_channels if use_in_shortcut is None else use_in_shortcut\n\n self.conv_shortcut = None\n if self.use_in_shortcut:\n self.conv_shortcut = conv_cls(\n in_channels,\n conv_2d_out_channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=conv_shortcut_bias,\n )\n\n def forward(\n self,\n input_tensor: torch.FloatTensor,\n temb: torch.FloatTensor,\n scale: float = 1.0,\n ) -> torch.FloatTensor:\n hidden_states = input_tensor\n\n if self.time_embedding_norm == \"ada_group\" or self.time_embedding_norm == \"spatial\":\n hidden_states = self.norm1(hidden_states, temb)\n else:\n hidden_states = self.norm1(hidden_states)\n\n hidden_states = self.nonlinearity(hidden_states)\n\n if self.upsample is not None:\n # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984\n if hidden_states.shape[0] >= 64:\n input_tensor = input_tensor.contiguous()\n hidden_states = hidden_states.contiguous()\n input_tensor = (\n self.upsample(input_tensor, scale=scale)\n if isinstance(self.upsample, Upsample2D)\n else self.upsample(input_tensor)\n )\n hidden_states = (\n self.upsample(hidden_states, scale=scale)\n if isinstance(self.upsample, Upsample2D)\n else self.upsample(hidden_states)\n )\n elif self.downsample is not None:\n input_tensor = (\n self.downsample(input_tensor, scale=scale)\n if isinstance(self.downsample, Downsample2D)\n else self.downsample(input_tensor)\n )\n hidden_states = (\n self.downsample(hidden_states, scale=scale)\n if isinstance(self.downsample, Downsample2D)\n else self.downsample(hidden_states)\n )\n\n hidden_states = self.conv1(hidden_states, scale) if not USE_PEFT_BACKEND else self.conv1(hidden_states)\n\n if self.time_emb_proj is not None:\n if not self.skip_time_act:\n temb = self.nonlinearity(temb)\n temb = (\n self.time_emb_proj(temb, scale)[:, :, None, None]\n if not USE_PEFT_BACKEND\n else self.time_emb_proj(temb)[:, :, None, None]\n )\n\n if temb is not None and self.time_embedding_norm == \"default\":\n hidden_states = hidden_states + temb\n\n if self.time_embedding_norm == \"ada_group\" or self.time_embedding_norm == \"spatial\":\n hidden_states = self.norm2(hidden_states, temb)\n else:\n hidden_states = self.norm2(hidden_states)\n\n if temb is not None and self.time_embedding_norm == \"scale_shift\":\n scale, shift = torch.chunk(temb, 2, dim=1)\n hidden_states = hidden_states * (1 + scale) + shift\n\n hidden_states = self.nonlinearity(hidden_states)\n\n hidden_states = self.dropout(hidden_states)\n hidden_states = self.conv2(hidden_states, scale) if not USE_PEFT_BACKEND else self.conv2(hidden_states)\n\n if self.conv_shortcut is not None:\n input_tensor = (\n self.conv_shortcut(input_tensor, scale) if not USE_PEFT_BACKEND else self.conv_shortcut(input_tensor)\n )\n\n output_tensor = (input_tensor + hidden_states) / self.output_scale_factor\n\n return output_tensor" }, { "identifier": "SpatioTemporalResBlock", "path": "diffusers/src/diffusers/models/resnet.py", "snippet": "class SpatioTemporalResBlock(nn.Module):\n r\"\"\"\n A SpatioTemporal Resnet block.\n\n Parameters:\n in_channels (`int`): The number of channels in the input.\n out_channels (`int`, *optional*, default to be `None`):\n The number of output channels for the first conv2d layer. If None, same as `in_channels`.\n temb_channels (`int`, *optional*, default to `512`): the number of channels in timestep embedding.\n eps (`float`, *optional*, defaults to `1e-6`): The epsilon to use for the spatial resenet.\n temporal_eps (`float`, *optional*, defaults to `eps`): The epsilon to use for the temporal resnet.\n merge_factor (`float`, *optional*, defaults to `0.5`): The merge factor to use for the temporal mixing.\n merge_strategy (`str`, *optional*, defaults to `learned_with_images`):\n The merge strategy to use for the temporal mixing.\n switch_spatial_to_temporal_mix (`bool`, *optional*, defaults to `False`):\n If `True`, switch the spatial and temporal mixing.\n \"\"\"\n\n def __init__(\n self,\n in_channels: int,\n out_channels: Optional[int] = None,\n temb_channels: int = 512,\n eps: float = 1e-6,\n temporal_eps: Optional[float] = None,\n merge_factor: float = 0.5,\n merge_strategy=\"learned_with_images\",\n switch_spatial_to_temporal_mix: bool = False,\n ):\n super().__init__()\n\n self.spatial_res_block = ResnetBlock2D(\n in_channels=in_channels,\n out_channels=out_channels,\n temb_channels=temb_channels,\n eps=eps,\n )\n\n self.temporal_res_block = TemporalResnetBlock(\n in_channels=out_channels if out_channels is not None else in_channels,\n out_channels=out_channels if out_channels is not None else in_channels,\n temb_channels=temb_channels,\n eps=temporal_eps if temporal_eps is not None else eps,\n )\n\n self.time_mixer = AlphaBlender(\n alpha=merge_factor,\n merge_strategy=merge_strategy,\n switch_spatial_to_temporal_mix=switch_spatial_to_temporal_mix,\n )\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n image_only_indicator: Optional[torch.Tensor] = None,\n ):\n num_frames = image_only_indicator.shape[-1]\n hidden_states = self.spatial_res_block(hidden_states, temb)\n\n batch_frames, channels, height, width = hidden_states.shape\n batch_size = batch_frames // num_frames\n\n hidden_states_mix = (\n hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4)\n )\n hidden_states = (\n hidden_states[None, :].reshape(batch_size, num_frames, channels, height, width).permute(0, 2, 1, 3, 4)\n )\n\n if temb is not None:\n temb = temb.reshape(batch_size, num_frames, -1)\n\n hidden_states = self.temporal_res_block(hidden_states, temb)\n hidden_states = self.time_mixer(\n x_spatial=hidden_states_mix,\n x_temporal=hidden_states,\n image_only_indicator=image_only_indicator,\n )\n\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(batch_frames, channels, height, width)\n return hidden_states" }, { "identifier": "TemporalConvLayer", "path": "diffusers/src/diffusers/models/resnet.py", "snippet": "class TemporalConvLayer(nn.Module):\n \"\"\"\n Temporal convolutional layer that can be used for video (sequence of images) input Code mostly copied from:\n https://github.com/modelscope/modelscope/blob/1509fdb973e5871f37148a4b5e5964cafd43e64d/modelscope/models/multi_modal/video_synthesis/unet_sd.py#L1016\n\n Parameters:\n in_dim (`int`): Number of input channels.\n out_dim (`int`): Number of output channels.\n dropout (`float`, *optional*, defaults to `0.0`): The dropout probability to use.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: Optional[int] = None,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n ):\n super().__init__()\n out_dim = out_dim or in_dim\n self.in_dim = in_dim\n self.out_dim = out_dim\n\n # conv layers\n self.conv1 = nn.Sequential(\n nn.GroupNorm(norm_num_groups, in_dim),\n nn.SiLU(),\n nn.Conv3d(in_dim, out_dim, (3, 1, 1), padding=(1, 0, 0)),\n )\n self.conv2 = nn.Sequential(\n nn.GroupNorm(norm_num_groups, out_dim),\n nn.SiLU(),\n nn.Dropout(dropout),\n nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),\n )\n self.conv3 = nn.Sequential(\n nn.GroupNorm(norm_num_groups, out_dim),\n nn.SiLU(),\n nn.Dropout(dropout),\n nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),\n )\n self.conv4 = nn.Sequential(\n nn.GroupNorm(norm_num_groups, out_dim),\n nn.SiLU(),\n nn.Dropout(dropout),\n nn.Conv3d(out_dim, in_dim, (3, 1, 1), padding=(1, 0, 0)),\n )\n\n # zero out the last layer params,so the conv block is identity\n nn.init.zeros_(self.conv4[-1].weight)\n nn.init.zeros_(self.conv4[-1].bias)\n\n def forward(self, hidden_states: torch.Tensor, num_frames: int = 1) -> torch.Tensor:\n hidden_states = (\n hidden_states[None, :].reshape((-1, num_frames) + hidden_states.shape[1:]).permute(0, 2, 1, 3, 4)\n )\n\n identity = hidden_states\n hidden_states = self.conv1(hidden_states)\n hidden_states = self.conv2(hidden_states)\n hidden_states = self.conv3(hidden_states)\n hidden_states = self.conv4(hidden_states)\n\n hidden_states = identity + hidden_states\n\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4).reshape(\n (hidden_states.shape[0] * hidden_states.shape[2], -1) + hidden_states.shape[3:]\n )\n return hidden_states" }, { "identifier": "Upsample2D", "path": "diffusers/src/diffusers/models/resnet.py", "snippet": "class Upsample2D(nn.Module):\n \"\"\"A 2D upsampling layer with an optional convolution.\n\n Parameters:\n channels (`int`):\n number of channels in the inputs and outputs.\n use_conv (`bool`, default `False`):\n option to use a convolution.\n use_conv_transpose (`bool`, default `False`):\n option to use a convolution transpose.\n out_channels (`int`, optional):\n number of output channels. Defaults to `channels`.\n name (`str`, default `conv`):\n name of the upsampling 2D layer.\n \"\"\"\n\n def __init__(\n self,\n channels: int,\n use_conv: bool = False,\n use_conv_transpose: bool = False,\n out_channels: Optional[int] = None,\n name: str = \"conv\",\n ):\n super().__init__()\n self.channels = channels\n self.out_channels = out_channels or channels\n self.use_conv = use_conv\n self.use_conv_transpose = use_conv_transpose\n self.name = name\n conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv\n\n conv = None\n if use_conv_transpose:\n conv = nn.ConvTranspose2d(channels, self.out_channels, 4, 2, 1)\n elif use_conv:\n conv = conv_cls(self.channels, self.out_channels, 3, padding=1)\n\n # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed\n if name == \"conv\":\n self.conv = conv\n else:\n self.Conv2d_0 = conv\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n output_size: Optional[int] = None,\n scale: float = 1.0,\n ) -> torch.FloatTensor:\n assert hidden_states.shape[1] == self.channels\n\n if self.use_conv_transpose:\n return self.conv(hidden_states)\n\n # Cast to float32 to as 'upsample_nearest2d_out_frame' op does not support bfloat16\n # TODO(Suraj): Remove this cast once the issue is fixed in PyTorch\n # https://github.com/pytorch/pytorch/issues/86679\n dtype = hidden_states.dtype\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(torch.float32)\n\n # upsample_nearest_nhwc fails with large batch sizes. see https://github.com/huggingface/diffusers/issues/984\n if hidden_states.shape[0] >= 64:\n hidden_states = hidden_states.contiguous()\n\n # if `output_size` is passed we force the interpolation output\n # size and do not make use of `scale_factor=2`\n if output_size is None:\n hidden_states = F.interpolate(hidden_states, scale_factor=2.0, mode=\"nearest\")\n else:\n hidden_states = F.interpolate(hidden_states, size=output_size, mode=\"nearest\")\n\n # If the input is bfloat16, we cast back to bfloat16\n if dtype == torch.bfloat16:\n hidden_states = hidden_states.to(dtype)\n\n # TODO(Suraj, Patrick) - clean up after weight dicts are correctly renamed\n if self.use_conv:\n if self.name == \"conv\":\n if isinstance(self.conv, LoRACompatibleConv) and not USE_PEFT_BACKEND:\n hidden_states = self.conv(hidden_states, scale)\n else:\n hidden_states = self.conv(hidden_states)\n else:\n if isinstance(self.Conv2d_0, LoRACompatibleConv) and not USE_PEFT_BACKEND:\n hidden_states = self.Conv2d_0(hidden_states, scale)\n else:\n hidden_states = self.Conv2d_0(hidden_states)\n\n return hidden_states" }, { "identifier": "Transformer2DModel", "path": "diffusers/src/diffusers/models/transformer_2d.py", "snippet": "class Transformer2DModel(ModelMixin, ConfigMixin):\n \"\"\"\n A 2D Transformer model for image-like data.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.\n in_channels (`int`, *optional*):\n The number of channels in the input and output (specify if the input is **continuous**).\n num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.\n sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).\n This is fixed during training since it is used to learn a number of position embeddings.\n num_vector_embeds (`int`, *optional*):\n The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**).\n Includes the class for the masked latent pixel.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`): Activation function to use in feed-forward.\n num_embeds_ada_norm ( `int`, *optional*):\n The number of diffusion steps used during training. Pass if at least one of the norm_layers is\n `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are\n added to the hidden states.\n\n During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`.\n attention_bias (`bool`, *optional*):\n Configure if the `TransformerBlocks` attention should contain a bias parameter.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n num_vector_embeds: Optional[int] = None,\n patch_size: Optional[int] = None,\n activation_fn: str = \"geglu\",\n num_embeds_ada_norm: Optional[int] = None,\n use_linear_projection: bool = False,\n only_cross_attention: bool = False,\n double_self_attention: bool = False,\n upcast_attention: bool = False,\n norm_type: str = \"layer_norm\",\n norm_elementwise_affine: bool = True,\n norm_eps: float = 1e-5,\n attention_type: str = \"default\",\n caption_channels: int = None,\n ):\n super().__init__()\n self.use_linear_projection = use_linear_projection\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv\n linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear\n\n # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)`\n # Define whether input is continuous or discrete depending on configuration\n self.is_input_continuous = (in_channels is not None) and (patch_size is None)\n self.is_input_vectorized = num_vector_embeds is not None\n self.is_input_patches = in_channels is not None and patch_size is not None\n\n if norm_type == \"layer_norm\" and num_embeds_ada_norm is not None:\n deprecation_message = (\n f\"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or\"\n \" incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config.\"\n \" Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect\"\n \" results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it\"\n \" would be very nice if you could open a Pull request for the `transformer/config.json` file\"\n )\n deprecate(\"norm_type!=num_embeds_ada_norm\", \"1.0.0\", deprecation_message, standard_warn=False)\n norm_type = \"ada_norm\"\n\n if self.is_input_continuous and self.is_input_vectorized:\n raise ValueError(\n f\"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make\"\n \" sure that either `in_channels` or `num_vector_embeds` is None.\"\n )\n elif self.is_input_vectorized and self.is_input_patches:\n raise ValueError(\n f\"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make\"\n \" sure that either `num_vector_embeds` or `num_patches` is None.\"\n )\n elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches:\n raise ValueError(\n f\"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:\"\n f\" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None.\"\n )\n\n # 2. Define input layers\n if self.is_input_continuous:\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n if use_linear_projection:\n self.proj_in = linear_cls(in_channels, inner_dim)\n else:\n self.proj_in = conv_cls(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)\n elif self.is_input_vectorized:\n assert sample_size is not None, \"Transformer2DModel over discrete input must provide sample_size\"\n assert num_vector_embeds is not None, \"Transformer2DModel over discrete input must provide num_embed\"\n\n self.height = sample_size\n self.width = sample_size\n self.num_vector_embeds = num_vector_embeds\n self.num_latent_pixels = self.height * self.width\n\n self.latent_image_embedding = ImagePositionalEmbeddings(\n num_embed=num_vector_embeds, embed_dim=inner_dim, height=self.height, width=self.width\n )\n elif self.is_input_patches:\n assert sample_size is not None, \"Transformer2DModel over patched input must provide sample_size\"\n\n self.height = sample_size\n self.width = sample_size\n\n self.patch_size = patch_size\n interpolation_scale = self.config.sample_size // 64 # => 64 (= 512 pixart) has interpolation scale 1\n interpolation_scale = max(interpolation_scale, 1)\n self.pos_embed = PatchEmbed(\n height=sample_size,\n width=sample_size,\n patch_size=patch_size,\n in_channels=in_channels,\n embed_dim=inner_dim,\n interpolation_scale=interpolation_scale,\n )\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n num_embeds_ada_norm=num_embeds_ada_norm,\n attention_bias=attention_bias,\n only_cross_attention=only_cross_attention,\n double_self_attention=double_self_attention,\n upcast_attention=upcast_attention,\n norm_type=norm_type,\n norm_elementwise_affine=norm_elementwise_affine,\n norm_eps=norm_eps,\n attention_type=attention_type,\n )\n for d in range(num_layers)\n ]\n )\n\n # 4. Define output layers\n self.out_channels = in_channels if out_channels is None else out_channels\n if self.is_input_continuous:\n # TODO: should use out_channels for continuous projections\n if use_linear_projection:\n self.proj_out = linear_cls(inner_dim, in_channels)\n else:\n self.proj_out = conv_cls(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)\n elif self.is_input_vectorized:\n self.norm_out = nn.LayerNorm(inner_dim)\n self.out = nn.Linear(inner_dim, self.num_vector_embeds - 1)\n elif self.is_input_patches and norm_type != \"ada_norm_single\":\n self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)\n self.proj_out_1 = nn.Linear(inner_dim, 2 * inner_dim)\n self.proj_out_2 = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)\n elif self.is_input_patches and norm_type == \"ada_norm_single\":\n self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)\n self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5)\n self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)\n\n # 5. PixArt-Alpha blocks.\n self.adaln_single = None\n self.use_additional_conditions = False\n if norm_type == \"ada_norm_single\":\n self.use_additional_conditions = self.config.sample_size == 128\n # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use\n # additional conditions until we find better name\n self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=self.use_additional_conditions)\n\n self.caption_projection = None\n if caption_channels is not None:\n self.caption_projection = CaptionProjection(in_features=caption_channels, hidden_size=inner_dim)\n\n self.gradient_checkpointing = False\n\n def _set_gradient_checkpointing(self, module, value=False):\n if hasattr(module, \"gradient_checkpointing\"):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n added_cond_kwargs: Dict[str, torch.Tensor] = None,\n class_labels: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n attention_mask: Optional[torch.Tensor] = None,\n encoder_attention_mask: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ):\n \"\"\"\n The [`Transformer2DModel`] forward method.\n\n Args:\n hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):\n Input `hidden_states`.\n encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):\n Conditional embeddings for cross attention layer. If not given, cross-attention defaults to\n self-attention.\n timestep ( `torch.LongTensor`, *optional*):\n Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.\n class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):\n Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in\n `AdaLayerZeroNorm`.\n cross_attention_kwargs ( `Dict[str, Any]`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).\n attention_mask ( `torch.Tensor`, *optional*):\n An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask\n is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large\n negative values to the attention scores corresponding to \"discard\" tokens.\n encoder_attention_mask ( `torch.Tensor`, *optional*):\n Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:\n\n * Mask `(batch, sequence_length)` True = keep, False = discard.\n * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.\n\n If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format\n above. This bias will be added to the cross-attention scores.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n\n Returns:\n If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a\n `tuple` where the first element is the sample tensor.\n \"\"\"\n # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.\n # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.\n # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.\n # expects mask of shape:\n # [batch, key_tokens]\n # adds singleton query_tokens dimension:\n # [batch, 1, key_tokens]\n # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:\n # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)\n # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)\n if attention_mask is not None and attention_mask.ndim == 2:\n # assume that mask is expressed as:\n # (1 = keep, 0 = discard)\n # convert mask into a bias that can be added to attention scores:\n # (keep = +0, discard = -10000.0)\n attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # convert encoder_attention_mask to a bias the same way we do for attention_mask\n if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:\n encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0\n encoder_attention_mask = encoder_attention_mask.unsqueeze(1)\n\n # Retrieve lora scale.\n lora_scale = cross_attention_kwargs.get(\"scale\", 1.0) if cross_attention_kwargs is not None else 1.0\n\n # 1. Input\n if self.is_input_continuous:\n batch, _, height, width = hidden_states.shape\n residual = hidden_states\n\n hidden_states = self.norm(hidden_states)\n if not self.use_linear_projection:\n hidden_states = (\n self.proj_in(hidden_states, scale=lora_scale)\n if not USE_PEFT_BACKEND\n else self.proj_in(hidden_states)\n )\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)\n else:\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)\n hidden_states = (\n self.proj_in(hidden_states, scale=lora_scale)\n if not USE_PEFT_BACKEND\n else self.proj_in(hidden_states)\n )\n\n elif self.is_input_vectorized:\n hidden_states = self.latent_image_embedding(hidden_states)\n elif self.is_input_patches:\n height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size\n hidden_states = self.pos_embed(hidden_states)\n\n if self.adaln_single is not None:\n if self.use_additional_conditions and added_cond_kwargs is None:\n raise ValueError(\n \"`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`.\"\n )\n batch_size = hidden_states.shape[0]\n timestep, embedded_timestep = self.adaln_single(\n timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype\n )\n\n # 2. Blocks\n if self.caption_projection is not None:\n batch_size = hidden_states.shape[0]\n encoder_hidden_states = self.caption_projection(encoder_hidden_states)\n encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1])\n\n for block in self.transformer_blocks:\n if self.training and self.gradient_checkpointing:\n\n def create_custom_forward(module, return_dict=None):\n def custom_forward(*inputs):\n if return_dict is not None:\n return module(*inputs, return_dict=return_dict)\n else:\n return module(*inputs)\n\n return custom_forward\n\n ckpt_kwargs: Dict[str, Any] = {\"use_reentrant\": False} if is_torch_version(\">=\", \"1.11.0\") else {}\n hidden_states = torch.utils.checkpoint.checkpoint(\n create_custom_forward(block),\n hidden_states,\n attention_mask,\n encoder_hidden_states,\n encoder_attention_mask,\n timestep,\n cross_attention_kwargs,\n class_labels,\n **ckpt_kwargs,\n )\n else:\n hidden_states = block(\n hidden_states,\n attention_mask=attention_mask,\n encoder_hidden_states=encoder_hidden_states,\n encoder_attention_mask=encoder_attention_mask,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n )\n\n # 3. Output\n if self.is_input_continuous:\n if not self.use_linear_projection:\n hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()\n hidden_states = (\n self.proj_out(hidden_states, scale=lora_scale)\n if not USE_PEFT_BACKEND\n else self.proj_out(hidden_states)\n )\n else:\n hidden_states = (\n self.proj_out(hidden_states, scale=lora_scale)\n if not USE_PEFT_BACKEND\n else self.proj_out(hidden_states)\n )\n hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()\n\n output = hidden_states + residual\n elif self.is_input_vectorized:\n hidden_states = self.norm_out(hidden_states)\n logits = self.out(hidden_states)\n # (batch, self.num_vector_embeds - 1, self.num_latent_pixels)\n logits = logits.permute(0, 2, 1)\n\n # log(p(x_0))\n output = F.log_softmax(logits.double(), dim=1).float()\n\n if self.is_input_patches:\n if self.config.norm_type != \"ada_norm_single\":\n conditioning = self.transformer_blocks[0].norm1.emb(\n timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1)\n hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None]\n hidden_states = self.proj_out_2(hidden_states)\n elif self.config.norm_type == \"ada_norm_single\":\n shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1)\n hidden_states = self.norm_out(hidden_states)\n # Modulation\n hidden_states = hidden_states * (1 + scale) + shift\n hidden_states = self.proj_out(hidden_states)\n hidden_states = hidden_states.squeeze(1)\n\n # unpatchify\n if self.adaln_single is None:\n height = width = int(hidden_states.shape[1] ** 0.5)\n hidden_states = hidden_states.reshape(\n shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)\n )\n hidden_states = torch.einsum(\"nhwpqc->nchpwq\", hidden_states)\n output = hidden_states.reshape(\n shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)\n )\n\n if not return_dict:\n return (output,)\n\n return Transformer2DModelOutput(sample=output)" }, { "identifier": "TransformerSpatioTemporalModel", "path": "diffusers/src/diffusers/models/transformer_temporal.py", "snippet": "class TransformerSpatioTemporalModel(nn.Module):\n \"\"\"\n A Transformer model for video-like data.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.\n in_channels (`int`, *optional*):\n The number of channels in the input and output (specify if the input is **continuous**).\n out_channels (`int`, *optional*):\n The number of channels in the output (specify if the input is **continuous**).\n num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.\n cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.\n \"\"\"\n\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: int = 320,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n cross_attention_dim: Optional[int] = None,\n ):\n super().__init__()\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n\n inner_dim = num_attention_heads * attention_head_dim\n self.inner_dim = inner_dim\n\n # 2. Define input layers\n self.in_channels = in_channels\n self.norm = torch.nn.GroupNorm(num_groups=32, num_channels=in_channels, eps=1e-6)\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n cross_attention_dim=cross_attention_dim,\n )\n for d in range(num_layers)\n ]\n )\n\n time_mix_inner_dim = inner_dim\n self.temporal_transformer_blocks = nn.ModuleList(\n [\n TemporalBasicTransformerBlock(\n inner_dim,\n time_mix_inner_dim,\n num_attention_heads,\n attention_head_dim,\n cross_attention_dim=cross_attention_dim,\n )\n for _ in range(num_layers)\n ]\n )\n\n time_embed_dim = in_channels * 4\n self.time_pos_embed = TimestepEmbedding(in_channels, time_embed_dim, out_dim=in_channels)\n self.time_proj = Timesteps(in_channels, True, 0)\n self.time_mixer = AlphaBlender(alpha=0.5, merge_strategy=\"learned_with_images\")\n\n # 4. Define output layers\n self.out_channels = in_channels if out_channels is None else out_channels\n # TODO: should use out_channels for continuous projections\n self.proj_out = nn.Linear(inner_dim, in_channels)\n\n self.gradient_checkpointing = False\n\n def forward(\n self,\n hidden_states: torch.Tensor,\n encoder_hidden_states: Optional[torch.Tensor] = None,\n image_only_indicator: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ):\n \"\"\"\n Args:\n hidden_states (`torch.FloatTensor` of shape `(batch size, channel, height, width)`):\n Input hidden_states.\n num_frames (`int`):\n The number of frames to be processed per batch. This is used to reshape the hidden states.\n encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):\n Conditional embeddings for cross attention layer. If not given, cross-attention defaults to\n self-attention.\n image_only_indicator (`torch.LongTensor` of shape `(batch size, num_frames)`, *optional*):\n A tensor indicating whether the input contains only images. 1 indicates that the input contains only\n images, 0 indicates that the input contains video frames.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.transformer_temporal.TransformerTemporalModelOutput`] instead of a plain\n tuple.\n\n Returns:\n [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:\n If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is\n returned, otherwise a `tuple` where the first element is the sample tensor.\n \"\"\"\n # 1. Input\n batch_frames, _, height, width = hidden_states.shape\n num_frames = image_only_indicator.shape[-1]\n batch_size = batch_frames // num_frames\n\n time_context = encoder_hidden_states\n time_context_first_timestep = time_context[None, :].reshape(\n batch_size, num_frames, -1, time_context.shape[-1]\n )[:, 0]\n time_context = time_context_first_timestep[None, :].broadcast_to(\n height * width, batch_size, 1, time_context.shape[-1]\n )\n time_context = time_context.reshape(height * width * batch_size, 1, time_context.shape[-1])\n\n residual = hidden_states\n\n hidden_states = self.norm(hidden_states)\n inner_dim = hidden_states.shape[1]\n hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch_frames, height * width, inner_dim)\n hidden_states = self.proj_in(hidden_states)\n\n num_frames_emb = torch.arange(num_frames, device=hidden_states.device)\n num_frames_emb = num_frames_emb.repeat(batch_size, 1)\n num_frames_emb = num_frames_emb.reshape(-1)\n t_emb = self.time_proj(num_frames_emb)\n\n # `Timesteps` does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=hidden_states.dtype)\n\n emb = self.time_pos_embed(t_emb)\n emb = emb[:, None, :]\n\n # 2. Blocks\n for block, temporal_block in zip(self.transformer_blocks, self.temporal_transformer_blocks):\n if self.training and self.gradient_checkpointing:\n hidden_states = torch.utils.checkpoint.checkpoint(\n block,\n hidden_states,\n None,\n encoder_hidden_states,\n None,\n use_reentrant=False,\n )\n else:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n )\n\n hidden_states_mix = hidden_states\n hidden_states_mix = hidden_states_mix + emb\n\n hidden_states_mix = temporal_block(\n hidden_states_mix,\n num_frames=num_frames,\n encoder_hidden_states=time_context,\n )\n hidden_states = self.time_mixer(\n x_spatial=hidden_states,\n x_temporal=hidden_states_mix,\n image_only_indicator=image_only_indicator,\n )\n\n # 3. Output\n hidden_states = self.proj_out(hidden_states)\n hidden_states = hidden_states.reshape(batch_frames, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()\n\n output = hidden_states + residual\n\n if not return_dict:\n return (output,)\n\n return TransformerTemporalModelOutput(sample=output)" }, { "identifier": "TransformerTemporalModel", "path": "diffusers/src/diffusers/models/transformer_temporal.py", "snippet": "class TransformerTemporalModel(ModelMixin, ConfigMixin):\n \"\"\"\n A Transformer model for video-like data.\n\n Parameters:\n num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.\n attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.\n in_channels (`int`, *optional*):\n The number of channels in the input and output (specify if the input is **continuous**).\n num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.\n dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.\n cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.\n attention_bias (`bool`, *optional*):\n Configure if the `TransformerBlock` attention should contain a bias parameter.\n sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).\n This is fixed during training since it is used to learn a number of position embeddings.\n activation_fn (`str`, *optional*, defaults to `\"geglu\"`):\n Activation function to use in feed-forward. See `diffusers.models.activations.get_activation` for supported\n activation functions.\n norm_elementwise_affine (`bool`, *optional*):\n Configure if the `TransformerBlock` should use learnable elementwise affine parameters for normalization.\n double_self_attention (`bool`, *optional*):\n Configure if each `TransformerBlock` should contain two self-attention layers.\n positional_embeddings: (`str`, *optional*):\n The type of positional embeddings to apply to the sequence input before passing use.\n num_positional_embeddings: (`int`, *optional*):\n The maximum length of the sequence over which to apply positional embeddings.\n \"\"\"\n\n @register_to_config\n def __init__(\n self,\n num_attention_heads: int = 16,\n attention_head_dim: int = 88,\n in_channels: Optional[int] = None,\n out_channels: Optional[int] = None,\n num_layers: int = 1,\n dropout: float = 0.0,\n norm_num_groups: int = 32,\n cross_attention_dim: Optional[int] = None,\n attention_bias: bool = False,\n sample_size: Optional[int] = None,\n activation_fn: str = \"geglu\",\n norm_elementwise_affine: bool = True,\n double_self_attention: bool = True,\n positional_embeddings: Optional[str] = None,\n num_positional_embeddings: Optional[int] = None,\n ):\n super().__init__()\n self.num_attention_heads = num_attention_heads\n self.attention_head_dim = attention_head_dim\n inner_dim = num_attention_heads * attention_head_dim\n\n self.in_channels = in_channels\n\n self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)\n self.proj_in = nn.Linear(in_channels, inner_dim)\n\n # 3. Define transformers blocks\n self.transformer_blocks = nn.ModuleList(\n [\n BasicTransformerBlock(\n inner_dim,\n num_attention_heads,\n attention_head_dim,\n dropout=dropout,\n cross_attention_dim=cross_attention_dim,\n activation_fn=activation_fn,\n attention_bias=attention_bias,\n double_self_attention=double_self_attention,\n norm_elementwise_affine=norm_elementwise_affine,\n positional_embeddings=positional_embeddings,\n num_positional_embeddings=num_positional_embeddings,\n )\n for d in range(num_layers)\n ]\n )\n\n self.proj_out = nn.Linear(inner_dim, in_channels)\n\n def forward(\n self,\n hidden_states: torch.FloatTensor,\n encoder_hidden_states: Optional[torch.LongTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n class_labels: torch.LongTensor = None,\n num_frames: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> TransformerTemporalModelOutput:\n \"\"\"\n The [`TransformerTemporal`] forward method.\n\n Args:\n hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):\n Input hidden_states.\n encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*):\n Conditional embeddings for cross attention layer. If not given, cross-attention defaults to\n self-attention.\n timestep ( `torch.LongTensor`, *optional*):\n Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.\n class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):\n Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in\n `AdaLayerZeroNorm`.\n num_frames (`int`, *optional*, defaults to 1):\n The number of frames to be processed per batch. This is used to reshape the hidden states.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain\n tuple.\n\n Returns:\n [`~models.transformer_temporal.TransformerTemporalModelOutput`] or `tuple`:\n If `return_dict` is True, an [`~models.transformer_temporal.TransformerTemporalModelOutput`] is\n returned, otherwise a `tuple` where the first element is the sample tensor.\n \"\"\"\n # 1. Input\n batch_frames, channel, height, width = hidden_states.shape\n batch_size = batch_frames // num_frames\n\n residual = hidden_states\n\n hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, channel, height, width)\n hidden_states = hidden_states.permute(0, 2, 1, 3, 4)\n\n hidden_states = self.norm(hidden_states)\n hidden_states = hidden_states.permute(0, 3, 4, 2, 1).reshape(batch_size * height * width, num_frames, channel)\n\n hidden_states = self.proj_in(hidden_states)\n\n # 2. Blocks\n for block in self.transformer_blocks:\n hidden_states = block(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n timestep=timestep,\n cross_attention_kwargs=cross_attention_kwargs,\n class_labels=class_labels,\n )\n\n # 3. Output\n hidden_states = self.proj_out(hidden_states)\n hidden_states = (\n hidden_states[None, None, :]\n .reshape(batch_size, height, width, num_frames, channel)\n .permute(0, 3, 4, 1, 2)\n .contiguous()\n )\n hidden_states = hidden_states.reshape(batch_frames, channel, height, width)\n\n output = hidden_states + residual\n\n if not return_dict:\n return (output,)\n\n return TransformerTemporalModelOutput(sample=output)" } ]
from typing import Any, Dict, Optional, Tuple, Union from torch import nn from ..utils import is_torch_version from ..utils.torch_utils import apply_freeu from .attention import Attention from .dual_transformer_2d import DualTransformer2DModel from .resnet import ( Downsample2D, ResnetBlock2D, SpatioTemporalResBlock, TemporalConvLayer, Upsample2D, ) from .transformer_2d import Transformer2DModel from .transformer_temporal import ( TransformerSpatioTemporalModel, TransformerTemporalModel, ) import torch
19,280
name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, num_frames: int = 1, ) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: output_states = () for resnet, temp_conv in zip(self.resnets, self.temp_convs): hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class CrossAttnUpBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, add_upsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, resolution_idx: Optional[int] = None, ): super().__init__() resnets = [] temp_convs = [] attentions = [] temp_attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) attentions.append( Transformer2DModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) temp_attentions.append( TransformerTemporalModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions) if add_upsample:
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def get_down_block( down_block_type: str, num_layers: int, in_channels: int, out_channels: int, temb_channels: int, add_downsample: bool, resnet_eps: float, resnet_act_fn: str, num_attention_heads: int, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, downsample_padding: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = True, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", temporal_num_attention_heads: int = 8, temporal_max_seq_length: int = 32, transformer_layers_per_block: int = 1, ) -> Union[ "DownBlock3D", "CrossAttnDownBlock3D", "DownBlockMotion", "CrossAttnDownBlockMotion", "DownBlockSpatioTemporal", "CrossAttnDownBlockSpatioTemporal", ]: if down_block_type == "DownBlock3D": return DownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, ) elif down_block_type == "CrossAttnDownBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlock3D") return CrossAttnDownBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, ) if down_block_type == "DownBlockMotion": return DownBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, resnet_time_scale_shift=resnet_time_scale_shift, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) elif down_block_type == "CrossAttnDownBlockMotion": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockMotion") return CrossAttnDownBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, downsample_padding=downsample_padding, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) elif down_block_type == "DownBlockSpatioTemporal": # added for SDV return DownBlockSpatioTemporal( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, add_downsample=add_downsample, ) elif down_block_type == "CrossAttnDownBlockSpatioTemporal": # added for SDV if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnDownBlockSpatioTemporal") return CrossAttnDownBlockSpatioTemporal( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, add_downsample=add_downsample, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, ) raise ValueError(f"{down_block_type} does not exist.") def get_up_block( up_block_type: str, num_layers: int, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, add_upsample: bool, resnet_eps: float, resnet_act_fn: str, num_attention_heads: int, resolution_idx: Optional[int] = None, resnet_groups: Optional[int] = None, cross_attention_dim: Optional[int] = None, dual_cross_attention: bool = False, use_linear_projection: bool = True, only_cross_attention: bool = False, upcast_attention: bool = False, resnet_time_scale_shift: str = "default", temporal_num_attention_heads: int = 8, temporal_cross_attention_dim: Optional[int] = None, temporal_max_seq_length: int = 32, transformer_layers_per_block: int = 1, dropout: float = 0.0, ) -> Union[ "UpBlock3D", "CrossAttnUpBlock3D", "UpBlockMotion", "CrossAttnUpBlockMotion", "UpBlockSpatioTemporal", "CrossAttnUpBlockSpatioTemporal", ]: if up_block_type == "UpBlock3D": return UpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, ) elif up_block_type == "CrossAttnUpBlock3D": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlock3D") return CrossAttnUpBlock3D( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, ) if up_block_type == "UpBlockMotion": return UpBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) elif up_block_type == "CrossAttnUpBlockMotion": if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockMotion") return CrossAttnUpBlockMotion( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, add_upsample=add_upsample, resnet_eps=resnet_eps, resnet_act_fn=resnet_act_fn, resnet_groups=resnet_groups, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, dual_cross_attention=dual_cross_attention, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, resnet_time_scale_shift=resnet_time_scale_shift, resolution_idx=resolution_idx, temporal_num_attention_heads=temporal_num_attention_heads, temporal_max_seq_length=temporal_max_seq_length, ) elif up_block_type == "UpBlockSpatioTemporal": # added for SDV return UpBlockSpatioTemporal( num_layers=num_layers, in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, resolution_idx=resolution_idx, add_upsample=add_upsample, ) elif up_block_type == "CrossAttnUpBlockSpatioTemporal": # added for SDV if cross_attention_dim is None: raise ValueError("cross_attention_dim must be specified for CrossAttnUpBlockSpatioTemporal") return CrossAttnUpBlockSpatioTemporal( in_channels=in_channels, out_channels=out_channels, prev_output_channel=prev_output_channel, temb_channels=temb_channels, num_layers=num_layers, transformer_layers_per_block=transformer_layers_per_block, add_upsample=add_upsample, cross_attention_dim=cross_attention_dim, num_attention_heads=num_attention_heads, resolution_idx=resolution_idx, ) raise ValueError(f"{up_block_type} does not exist.") class UNetMidBlock3DCrossAttn(nn.Module): def __init__( self, in_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, output_scale_factor: float = 1.0, cross_attention_dim: int = 1280, dual_cross_attention: bool = False, use_linear_projection: bool = True, upcast_attention: bool = False, ): super().__init__() self.has_cross_attention = True self.num_attention_heads = num_attention_heads resnet_groups = resnet_groups if resnet_groups is not None else min(in_channels // 4, 32) # there is always at least one resnet resnets = [ ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ] temp_convs = [ TemporalConvLayer( in_channels, in_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ] attentions = [] temp_attentions = [] for _ in range(num_layers): attentions.append( Transformer2DModel( in_channels // num_attention_heads, num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, upcast_attention=upcast_attention, ) ) temp_attentions.append( TransformerTemporalModel( in_channels // num_attention_heads, num_attention_heads, in_channels=in_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=in_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( in_channels, in_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions) def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, num_frames: int = 1, cross_attention_kwargs: Optional[Dict[str, Any]] = None, ) -> torch.FloatTensor: hidden_states = self.resnets[0](hidden_states, temb) hidden_states = self.temp_convs[0](hidden_states, num_frames=num_frames) for attn, temp_attn, resnet, temp_conv in zip( self.attentions, self.temp_attentions, self.resnets[1:], self.temp_convs[1:] ): hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] hidden_states = temp_attn( hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) return hidden_states class CrossAttnDownBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, downsample_padding: int = 1, add_downsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, ): super().__init__() resnets = [] attentions = [] temp_attentions = [] temp_convs = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) attentions.append( Transformer2DModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) temp_attentions.append( TransformerTemporalModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, encoder_hidden_states: Optional[torch.FloatTensor] = None, attention_mask: Optional[torch.FloatTensor] = None, num_frames: int = 1, cross_attention_kwargs: Dict[str, Any] = None, ) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: # TODO(Patrick, William) - attention mask is not used output_states = () for resnet, temp_conv, attn, temp_attn in zip( self.resnets, self.temp_convs, self.attentions, self.temp_attentions ): hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) hidden_states = attn( hidden_states, encoder_hidden_states=encoder_hidden_states, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] hidden_states = temp_attn( hidden_states, num_frames=num_frames, cross_attention_kwargs=cross_attention_kwargs, return_dict=False, )[0] output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class DownBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, output_scale_factor: float = 1.0, add_downsample: bool = True, downsample_padding: int = 1, ): super().__init__() resnets = [] temp_convs = [] for i in range(num_layers): in_channels = in_channels if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=in_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) if add_downsample: self.downsamplers = nn.ModuleList( [ Downsample2D( out_channels, use_conv=True, out_channels=out_channels, padding=downsample_padding, name="op", ) ] ) else: self.downsamplers = None self.gradient_checkpointing = False def forward( self, hidden_states: torch.FloatTensor, temb: Optional[torch.FloatTensor] = None, num_frames: int = 1, ) -> Union[torch.FloatTensor, Tuple[torch.FloatTensor, ...]]: output_states = () for resnet, temp_conv in zip(self.resnets, self.temp_convs): hidden_states = resnet(hidden_states, temb) hidden_states = temp_conv(hidden_states, num_frames=num_frames) output_states += (hidden_states,) if self.downsamplers is not None: for downsampler in self.downsamplers: hidden_states = downsampler(hidden_states) output_states += (hidden_states,) return hidden_states, output_states class CrossAttnUpBlock3D(nn.Module): def __init__( self, in_channels: int, out_channels: int, prev_output_channel: int, temb_channels: int, dropout: float = 0.0, num_layers: int = 1, resnet_eps: float = 1e-6, resnet_time_scale_shift: str = "default", resnet_act_fn: str = "swish", resnet_groups: int = 32, resnet_pre_norm: bool = True, num_attention_heads: int = 1, cross_attention_dim: int = 1280, output_scale_factor: float = 1.0, add_upsample: bool = True, dual_cross_attention: bool = False, use_linear_projection: bool = False, only_cross_attention: bool = False, upcast_attention: bool = False, resolution_idx: Optional[int] = None, ): super().__init__() resnets = [] temp_convs = [] attentions = [] temp_attentions = [] self.has_cross_attention = True self.num_attention_heads = num_attention_heads for i in range(num_layers): res_skip_channels = in_channels if (i == num_layers - 1) else out_channels resnet_in_channels = prev_output_channel if i == 0 else out_channels resnets.append( ResnetBlock2D( in_channels=resnet_in_channels + res_skip_channels, out_channels=out_channels, temb_channels=temb_channels, eps=resnet_eps, groups=resnet_groups, dropout=dropout, time_embedding_norm=resnet_time_scale_shift, non_linearity=resnet_act_fn, output_scale_factor=output_scale_factor, pre_norm=resnet_pre_norm, ) ) temp_convs.append( TemporalConvLayer( out_channels, out_channels, dropout=0.1, norm_num_groups=resnet_groups, ) ) attentions.append( Transformer2DModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, use_linear_projection=use_linear_projection, only_cross_attention=only_cross_attention, upcast_attention=upcast_attention, ) ) temp_attentions.append( TransformerTemporalModel( out_channels // num_attention_heads, num_attention_heads, in_channels=out_channels, num_layers=1, cross_attention_dim=cross_attention_dim, norm_num_groups=resnet_groups, ) ) self.resnets = nn.ModuleList(resnets) self.temp_convs = nn.ModuleList(temp_convs) self.attentions = nn.ModuleList(attentions) self.temp_attentions = nn.ModuleList(temp_attentions) if add_upsample:
self.upsamplers = nn.ModuleList([Upsample2D(out_channels, use_conv=True, out_channels=out_channels)])
8
2023-12-28 08:17:40+00:00
24k
FoundationVision/UniRef
detectron2/utils/visualizer.py
[ { "identifier": "MetadataCatalog", "path": "detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "Boxes", "path": "detectron2/structures/boxes.py", "snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "BoxMode", "path": "detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "Keypoints", "path": "detectron2/structures/keypoints.py", "snippet": "class Keypoints:\n \"\"\"\n Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property\n containing the x,y location and visibility flag of each keypoint. This tensor has shape\n (N, K, 3) where N is the number of instances and K is the number of keypoints per instance.\n\n The visibility flag follows the COCO format and must be one of three integers:\n\n * v=0: not labeled (in which case x=y=0)\n * v=1: labeled but not visible\n * v=2: labeled and visible\n \"\"\"\n\n def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]):\n \"\"\"\n Arguments:\n keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint.\n The shape should be (N, K, 3) where N is the number of\n instances, and K is the number of keypoints per instance.\n \"\"\"\n device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device(\"cpu\")\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)\n assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape\n self.tensor = keypoints\n\n def __len__(self) -> int:\n return self.tensor.size(0)\n\n def to(self, *args: Any, **kwargs: Any) -> \"Keypoints\":\n return type(self)(self.tensor.to(*args, **kwargs))\n\n @property\n def device(self) -> torch.device:\n return self.tensor.device\n\n def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor:\n \"\"\"\n Convert keypoint annotations to a heatmap of one-hot labels for training,\n as described in :paper:`Mask R-CNN`.\n\n Arguments:\n boxes: Nx4 tensor, the boxes to draw the keypoints to\n\n Returns:\n heatmaps:\n A tensor of shape (N, K), each element is integer spatial label\n in the range [0, heatmap_size**2 - 1] for each keypoint in the input.\n valid:\n A tensor of shape (N, K) containing whether each keypoint is in the roi or not.\n \"\"\"\n return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size)\n\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Keypoints\":\n \"\"\"\n Create a new `Keypoints` by indexing on this `Keypoints`.\n\n The following usage are allowed:\n\n 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance.\n 2. `new_kpts = kpts[2:10]`: return a slice of key points.\n 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor\n with `length = len(kpts)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Keypoints might share storage with this Keypoints,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Keypoints([self.tensor[item]])\n return Keypoints(self.tensor[item])\n\n def __repr__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={})\".format(len(self.tensor))\n return s\n\n @staticmethod\n def cat(keypoints_list: List[\"Keypoints\"]) -> \"Keypoints\":\n \"\"\"\n Concatenates a list of Keypoints into a single Keypoints\n\n Arguments:\n keypoints_list (list[Keypoints])\n\n Returns:\n Keypoints: the concatenated Keypoints\n \"\"\"\n assert isinstance(keypoints_list, (list, tuple))\n assert len(keypoints_list) > 0\n assert all(isinstance(keypoints, Keypoints) for keypoints in keypoints_list)\n\n cat_kpts = type(keypoints_list[0])(\n torch.cat([kpts.tensor for kpts in keypoints_list], dim=0)\n )\n return cat_kpts" }, { "identifier": "BitMasks", "path": "detectron2/structures/masks.py", "snippet": "class BitMasks:\n \"\"\"\n This class stores the segmentation masks for all objects in one image, in\n the form of bitmaps.\n\n Attributes:\n tensor: bool Tensor of N,H,W, representing N instances in the image.\n \"\"\"\n\n def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):\n \"\"\"\n Args:\n tensor: bool Tensor of N,H,W, representing N instances in the image.\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.bool, device=device)\n assert tensor.dim() == 3, tensor.size()\n self.image_size = tensor.shape[1:]\n self.tensor = tensor\n\n @torch.jit.unused\n def to(self, *args: Any, **kwargs: Any) -> \"BitMasks\":\n return BitMasks(self.tensor.to(*args, **kwargs))\n\n @property\n def device(self) -> torch.device:\n return self.tensor.device\n\n @torch.jit.unused\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"BitMasks\":\n \"\"\"\n Returns:\n BitMasks: Create a new :class:`BitMasks` by indexing.\n\n The following usage are allowed:\n\n 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.\n 2. `new_masks = masks[2:10]`: return a slice of masks.\n 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor\n with `length = len(masks)`. Nonzero elements in the vector will be selected.\n\n Note that the returned object might share storage with this object,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return BitMasks(self.tensor[item].unsqueeze(0))\n m = self.tensor[item]\n assert m.dim() == 3, \"Indexing on BitMasks with {} returns a tensor with shape {}!\".format(\n item, m.shape\n )\n return BitMasks(m)\n\n @torch.jit.unused\n def __iter__(self) -> torch.Tensor:\n yield from self.tensor\n\n @torch.jit.unused\n def __repr__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={})\".format(len(self.tensor))\n return s\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def nonempty(self) -> torch.Tensor:\n \"\"\"\n Find masks that are non-empty.\n\n Returns:\n Tensor: a BoolTensor which represents\n whether each mask is empty (False) or non-empty (True).\n \"\"\"\n return self.tensor.flatten(1).any(dim=1)\n\n @staticmethod\n def from_polygon_masks(\n polygon_masks: Union[\"PolygonMasks\", List[List[np.ndarray]]], height: int, width: int\n ) -> \"BitMasks\":\n \"\"\"\n Args:\n polygon_masks (list[list[ndarray]] or PolygonMasks)\n height, width (int)\n \"\"\"\n if isinstance(polygon_masks, PolygonMasks):\n polygon_masks = polygon_masks.polygons\n masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]\n if len(masks):\n return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))\n else:\n return BitMasks(torch.empty(0, height, width, dtype=torch.bool))\n\n @staticmethod\n def from_roi_masks(roi_masks: \"ROIMasks\", height: int, width: int) -> \"BitMasks\":\n \"\"\"\n Args:\n roi_masks:\n height, width (int):\n \"\"\"\n return roi_masks.to_bitmasks(height, width)\n\n def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\n \"\"\"\n Crop each bitmask by the given box, and resize results to (mask_size, mask_size).\n This can be used to prepare training targets for Mask R-CNN.\n It has less reconstruction error compared to rasterization with polygons.\n However we observe no difference in accuracy,\n but BitMasks requires more memory to store all the masks.\n\n Args:\n boxes (Tensor): Nx4 tensor storing the boxes for each mask\n mask_size (int): the size of the rasterized mask.\n\n Returns:\n Tensor:\n A bool tensor of shape (N, mask_size, mask_size), where\n N is the number of predicted boxes for this image.\n \"\"\"\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\n device = self.tensor.device\n\n batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]\n rois = torch.cat([batch_inds, boxes], dim=1) # Nx5\n\n bit_masks = self.tensor.to(dtype=torch.float32)\n rois = rois.to(device=device)\n output = (\n ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)\n .forward(bit_masks[:, None, :, :], rois)\n .squeeze(1)\n )\n output = output >= 0.5\n return output\n\n def get_bounding_boxes(self) -> Boxes:\n \"\"\"\n Returns:\n Boxes: tight bounding boxes around bitmasks.\n If a mask is empty, it's bounding box will be all zero.\n \"\"\"\n boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)\n x_any = torch.any(self.tensor, dim=1)\n y_any = torch.any(self.tensor, dim=2)\n for idx in range(self.tensor.shape[0]):\n x = torch.where(x_any[idx, :])[0]\n y = torch.where(y_any[idx, :])[0]\n if len(x) > 0 and len(y) > 0:\n boxes[idx, :] = torch.as_tensor(\n [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32\n )\n return Boxes(boxes)\n\n @staticmethod\n def cat(bitmasks_list: List[\"BitMasks\"]) -> \"BitMasks\":\n \"\"\"\n Concatenates a list of BitMasks into a single BitMasks\n\n Arguments:\n bitmasks_list (list[BitMasks])\n\n Returns:\n BitMasks: the concatenated BitMasks\n \"\"\"\n assert isinstance(bitmasks_list, (list, tuple))\n assert len(bitmasks_list) > 0\n assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)\n\n cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))\n return cat_bitmasks" }, { "identifier": "PolygonMasks", "path": "detectron2/structures/masks.py", "snippet": "class PolygonMasks:\n \"\"\"\n This class stores the segmentation masks for all objects in one image, in the form of polygons.\n\n Attributes:\n polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.\n \"\"\"\n\n def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):\n \"\"\"\n Arguments:\n polygons (list[list[np.ndarray]]): The first\n level of the list correspond to individual instances,\n the second level to all the polygons that compose the\n instance, and the third level to the polygon coordinates.\n The third level array should have the format of\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\n \"\"\"\n if not isinstance(polygons, list):\n raise ValueError(\n \"Cannot create PolygonMasks: Expect a list of list of polygons per image. \"\n \"Got '{}' instead.\".format(type(polygons))\n )\n\n def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:\n # Use float64 for higher precision, because why not?\n # Always put polygons on CPU (self.to is a no-op) since they\n # are supposed to be small tensors.\n # May need to change this assumption if GPU placement becomes useful\n if isinstance(t, torch.Tensor):\n t = t.cpu().numpy()\n return np.asarray(t).astype(\"float64\")\n\n def process_polygons(\n polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]\n ) -> List[np.ndarray]:\n if not isinstance(polygons_per_instance, list):\n raise ValueError(\n \"Cannot create polygons: Expect a list of polygons per instance. \"\n \"Got '{}' instead.\".format(type(polygons_per_instance))\n )\n # transform each polygon to a numpy array\n polygons_per_instance = [_make_array(p) for p in polygons_per_instance]\n for polygon in polygons_per_instance:\n if len(polygon) % 2 != 0 or len(polygon) < 6:\n raise ValueError(f\"Cannot create a polygon from {len(polygon)} coordinates.\")\n return polygons_per_instance\n\n self.polygons: List[List[np.ndarray]] = [\n process_polygons(polygons_per_instance) for polygons_per_instance in polygons\n ]\n\n def to(self, *args: Any, **kwargs: Any) -> \"PolygonMasks\":\n return self\n\n @property\n def device(self) -> torch.device:\n return torch.device(\"cpu\")\n\n def get_bounding_boxes(self) -> Boxes:\n \"\"\"\n Returns:\n Boxes: tight bounding boxes around polygon masks.\n \"\"\"\n boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)\n for idx, polygons_per_instance in enumerate(self.polygons):\n minxy = torch.as_tensor([float(\"inf\"), float(\"inf\")], dtype=torch.float32)\n maxxy = torch.zeros(2, dtype=torch.float32)\n for polygon in polygons_per_instance:\n coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)\n minxy = torch.min(minxy, torch.min(coords, dim=0).values)\n maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)\n boxes[idx, :2] = minxy\n boxes[idx, 2:] = maxxy\n return Boxes(boxes)\n\n def nonempty(self) -> torch.Tensor:\n \"\"\"\n Find masks that are non-empty.\n\n Returns:\n Tensor:\n a BoolTensor which represents whether each mask is empty (False) or not (True).\n \"\"\"\n keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]\n return torch.from_numpy(np.asarray(keep, dtype=np.bool))\n\n def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> \"PolygonMasks\":\n \"\"\"\n Support indexing over the instances and return a `PolygonMasks` object.\n `item` can be:\n\n 1. An integer. It will return an object with only one instance.\n 2. A slice. It will return an object with the selected instances.\n 3. A list[int]. It will return an object with the selected instances,\n correpsonding to the indices in the list.\n 4. A vector mask of type BoolTensor, whose length is num_instances.\n It will return an object with the instances whose mask is nonzero.\n \"\"\"\n if isinstance(item, int):\n selected_polygons = [self.polygons[item]]\n elif isinstance(item, slice):\n selected_polygons = self.polygons[item]\n elif isinstance(item, list):\n selected_polygons = [self.polygons[i] for i in item]\n elif isinstance(item, torch.Tensor):\n # Polygons is a list, so we have to move the indices back to CPU.\n if item.dtype == torch.bool:\n assert item.dim() == 1, item.shape\n item = item.nonzero().squeeze(1).cpu().numpy().tolist()\n elif item.dtype in [torch.int32, torch.int64]:\n item = item.cpu().numpy().tolist()\n else:\n raise ValueError(\"Unsupported tensor dtype={} for indexing!\".format(item.dtype))\n selected_polygons = [self.polygons[i] for i in item]\n return PolygonMasks(selected_polygons)\n\n def __iter__(self) -> Iterator[List[np.ndarray]]:\n \"\"\"\n Yields:\n list[ndarray]: the polygons for one instance.\n Each Tensor is a float64 vector representing a polygon.\n \"\"\"\n return iter(self.polygons)\n\n def __repr__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={})\".format(len(self.polygons))\n return s\n\n def __len__(self) -> int:\n return len(self.polygons)\n\n def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\n \"\"\"\n Crop each mask by the given box, and resize results to (mask_size, mask_size).\n This can be used to prepare training targets for Mask R-CNN.\n\n Args:\n boxes (Tensor): Nx4 tensor storing the boxes for each mask\n mask_size (int): the size of the rasterized mask.\n\n Returns:\n Tensor: A bool tensor of shape (N, mask_size, mask_size), where\n N is the number of predicted boxes for this image.\n \"\"\"\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\n\n device = boxes.device\n # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise\n # (several small tensors for representing a single instance mask)\n boxes = boxes.to(torch.device(\"cpu\"))\n\n results = [\n rasterize_polygons_within_box(poly, box.numpy(), mask_size)\n for poly, box in zip(self.polygons, boxes)\n ]\n \"\"\"\n poly: list[list[float]], the polygons for one instance\n box: a tensor of shape (4,)\n \"\"\"\n if len(results) == 0:\n return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)\n return torch.stack(results, dim=0).to(device=device)\n\n def area(self):\n \"\"\"\n Computes area of the mask.\n Only works with Polygons, using the shoelace formula:\n https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n\n Returns:\n Tensor: a vector, area for each instance\n \"\"\"\n\n area = []\n for polygons_per_instance in self.polygons:\n area_per_instance = 0\n for p in polygons_per_instance:\n area_per_instance += polygon_area(p[0::2], p[1::2])\n area.append(area_per_instance)\n\n return torch.tensor(area)\n\n @staticmethod\n def cat(polymasks_list: List[\"PolygonMasks\"]) -> \"PolygonMasks\":\n \"\"\"\n Concatenates a list of PolygonMasks into a single PolygonMasks\n\n Arguments:\n polymasks_list (list[PolygonMasks])\n\n Returns:\n PolygonMasks: the concatenated PolygonMasks\n \"\"\"\n assert isinstance(polymasks_list, (list, tuple))\n assert len(polymasks_list) > 0\n assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)\n\n cat_polymasks = type(polymasks_list[0])(\n list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))\n )\n return cat_polymasks" }, { "identifier": "RotatedBoxes", "path": "detectron2/structures/rotated_boxes.py", "snippet": "class RotatedBoxes(Boxes):\n \"\"\"\n This structure stores a list of rotated boxes as a Nx5 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx5 matrix. Each row is\n (x_center, y_center, width, height, angle),\n in which angle is represented in degrees.\n While there's no strict range restriction for it,\n the recommended principal range is between [-180, 180) degrees.\n\n Assume we have a horizontal box B = (x_center, y_center, width, height),\n where width is along the x-axis and height is along the y-axis.\n The rotated box B_rot (x_center, y_center, width, height, angle)\n can be seen as:\n\n 1. When angle == 0:\n B_rot == B\n 2. When angle > 0:\n B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;\n 3. When angle < 0:\n B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.\n\n Mathematically, since the right-handed coordinate system for image space\n is (y, x), where y is top->down and x is left->right, the 4 vertices of the\n rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from\n the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)\n in the following way (:math:`\\\\theta = angle*\\\\pi/180` is the angle in radians,\n :math:`(y_c, x_c)` is the center of the rectangle):\n\n .. math::\n\n yr_i = \\\\cos(\\\\theta) (y_i - y_c) - \\\\sin(\\\\theta) (x_i - x_c) + y_c,\n\n xr_i = \\\\sin(\\\\theta) (y_i - y_c) + \\\\cos(\\\\theta) (x_i - x_c) + x_c,\n\n which is the standard rigid-body rotation transformation.\n\n Intuitively, the angle is\n (1) the rotation angle from y-axis in image space\n to the height vector (top->down in the box's local coordinate system)\n of the box in CCW, and\n (2) the rotation angle from x-axis in image space\n to the width vector (left->right in the box's local coordinate system)\n of the box in CCW.\n\n More intuitively, consider the following horizontal box ABCD represented\n in (x1, y1, x2, y2): (3, 2, 7, 4),\n covering the [3, 7] x [2, 4] region of the continuous coordinate system\n which looks like this:\n\n .. code:: none\n\n O--------> x\n |\n | A---B\n | | |\n | D---C\n |\n v y\n\n Note that each capital letter represents one 0-dimensional geometric point\n instead of a 'square pixel' here.\n\n In the example above, using (x, y) to represent a point we have:\n\n .. math::\n\n O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)\n\n We name vector AB = vector DC as the width vector in box's local coordinate system, and\n vector AD = vector BC as the height vector in box's local coordinate system. Initially,\n when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis\n in the image space, respectively.\n\n For better illustration, we denote the center of the box as E,\n\n .. code:: none\n\n O--------> x\n |\n | A---B\n | | E |\n | D---C\n |\n v y\n\n where the center E = ((3+7)/2, (2+4)/2) = (5, 3).\n\n Also,\n\n .. math::\n\n width = |AB| = |CD| = 7 - 3 = 4,\n height = |AD| = |BC| = 4 - 2 = 2.\n\n Therefore, the corresponding representation for the same shape in rotated box in\n (x_center, y_center, width, height, angle) format is:\n\n (5, 3, 4, 2, 0),\n\n Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees\n CCW (counter-clockwise) by definition. It looks like this:\n\n .. code:: none\n\n O--------> x\n | B-C\n | | |\n | |E|\n | | |\n | A-D\n v y\n\n The center E is still located at the same point (5, 3), while the vertices\n ABCD are rotated by 90 degrees CCW with regard to E:\n A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)\n\n Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to\n vector AD or vector BC (the top->down height vector in box's local coordinate system),\n or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right\n width vector in box's local coordinate system).\n\n .. math::\n\n width = |AB| = |CD| = 5 - 1 = 4,\n height = |AD| = |BC| = 6 - 4 = 2.\n\n Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)\n by definition? It looks like this:\n\n .. code:: none\n\n O--------> x\n | D-A\n | | |\n | |E|\n | | |\n | C-B\n v y\n\n The center E is still located at the same point (5, 3), while the vertices\n ABCD are rotated by 90 degrees CW with regard to E:\n A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)\n\n .. math::\n\n width = |AB| = |CD| = 5 - 1 = 4,\n height = |AD| = |BC| = 6 - 4 = 2.\n\n This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU\n will be 1. However, these two will generate different RoI Pooling results and\n should not be treated as an identical box.\n\n On the other hand, it's easy to see that (X, Y, W, H, A) is identical to\n (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be\n identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is\n equivalent to rotating the same shape 90 degrees CW.\n\n We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):\n\n .. code:: none\n\n O--------> x\n |\n | C---D\n | | E |\n | B---A\n |\n v y\n\n .. math::\n\n A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),\n\n width = |AB| = |CD| = 7 - 3 = 4,\n height = |AD| = |BC| = 4 - 2 = 2.\n\n Finally, this is a very inaccurate (heavily quantized) illustration of\n how (5, 3, 4, 2, 60) looks like in case anyone wonders:\n\n .. code:: none\n\n O--------> x\n | B\\\n | / C\n | /E /\n | A /\n | `D\n v y\n\n It's still a rectangle with center of (5, 3), width of 4 and height of 2,\n but its angle (and thus orientation) is somewhere between\n (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"RotatedBoxes\":\n \"\"\"\n Clone the RotatedBoxes.\n\n Returns:\n RotatedBoxes\n \"\"\"\n return RotatedBoxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return RotatedBoxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = box[:, 2] * box[:, 3]\n return area\n\n def normalize_angles(self) -> None:\n \"\"\"\n Restrict angles to the range of [-180, 180) degrees\n \"\"\"\n self.tensor[:, 4] = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0\n\n def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n For RRPN:\n Only clip boxes that are almost horizontal with a tolerance of\n clip_angle_threshold to maintain backward compatibility.\n\n Rotated boxes beyond this threshold are not clipped for two reasons:\n\n 1. There are potentially multiple ways to clip a rotated box to make it\n fit within the image.\n 2. It's tricky to make the entire rectangular box fit within the image\n and still be able to not leave out pixels of interest.\n\n Therefore we rely on ops like RoIAlignRotated to safely handle this.\n\n Args:\n box_size (height, width): The clipping box's size.\n clip_angle_threshold:\n Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),\n we do the clipping as horizontal boxes.\n \"\"\"\n h, w = box_size\n\n # normalize angles to be within (-180, 180] degrees\n self.normalize_angles()\n\n idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]\n\n # convert to (x1, y1, x2, y2)\n x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0\n y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0\n x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0\n y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0\n\n # clip\n x1.clamp_(min=0, max=w)\n y1.clamp_(min=0, max=h)\n x2.clamp_(min=0, max=w)\n y2.clamp_(min=0, max=h)\n\n # convert back to (xc, yc, w, h)\n self.tensor[idx, 0] = (x1 + x2) / 2.0\n self.tensor[idx, 1] = (y1 + y2) / 2.0\n # make sure widths and heights do not increase due to numerical errors\n self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)\n self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor: a binary vector which represents\n whether each box is empty (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2]\n heights = box[:, 3]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"RotatedBoxes\":\n \"\"\"\n Returns:\n RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned RotatedBoxes might share storage with this RotatedBoxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return RotatedBoxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on RotatedBoxes with {} failed to return a matrix!\".format(\n item\n )\n return RotatedBoxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"RotatedBoxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box covering\n [0, width] x [0, height]\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n For RRPN, it might not be necessary to call this function since it's common\n for rotated box to extend to outside of the image boundaries\n (the clip function only clips the near-horizontal boxes)\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n\n cnt_x = self.tensor[..., 0]\n cnt_y = self.tensor[..., 1]\n half_w = self.tensor[..., 2] / 2.0\n half_h = self.tensor[..., 3] / 2.0\n a = self.tensor[..., 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n max_rect_dx = c * half_w + s * half_h\n max_rect_dy = c * half_h + s * half_w\n\n inds_inside = (\n (cnt_x - max_rect_dx >= -boundary_threshold)\n & (cnt_y - max_rect_dy >= -boundary_threshold)\n & (cnt_x + max_rect_dx < width + boundary_threshold)\n & (cnt_y + max_rect_dy < height + boundary_threshold)\n )\n\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return self.tensor[:, :2]\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the rotated box with horizontal and vertical scaling factors\n Note: when scale_factor_x != scale_factor_y,\n the rotated box does not preserve the rectangular shape when the angle\n is not a multiple of 90 degrees under resize transformation.\n Instead, the shape is a parallelogram (that has skew)\n Here we make an approximation by fitting a rotated rectangle to the parallelogram.\n \"\"\"\n self.tensor[:, 0] *= scale_x\n self.tensor[:, 1] *= scale_y\n theta = self.tensor[:, 4] * math.pi / 180.0\n c = torch.cos(theta)\n s = torch.sin(theta)\n\n # In image space, y is top->down and x is left->right\n # Consider the local coordintate system for the rotated box,\n # where the box center is located at (0, 0), and the four vertices ABCD are\n # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)\n # the midpoint of the left edge AD of the rotated box E is:\n # E = (A+D)/2 = (-w / 2, 0)\n # the midpoint of the top edge AB of the rotated box F is:\n # F(0, -h / 2)\n # To get the old coordinates in the global system, apply the rotation transformation\n # (Note: the right-handed coordinate system for image space is yOx):\n # (old_x, old_y) = (s * y + c * x, c * y - s * x)\n # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)\n # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)\n # After applying the scaling factor (sfx, sfy):\n # E(new) = (-sfx * c * w / 2, sfy * s * w / 2)\n # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)\n # The new width after scaling tranformation becomes:\n\n # w(new) = |E(new) - O| * 2\n # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2\n # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w\n # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]\n #\n # For example,\n # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;\n # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y\n self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)\n\n # h(new) = |F(new) - O| * 2\n # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2\n # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h\n # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]\n #\n # For example,\n # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;\n # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x\n self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)\n\n # The angle is the rotation angle from y-axis in image space to the height\n # vector (top->down in the box's local coordinate system) of the box in CCW.\n #\n # angle(new) = angle_yOx(O - F(new))\n # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )\n # = atan2(sfx * s * h / 2, sfy * c * h / 2)\n # = atan2(sfx * s, sfy * c)\n #\n # For example,\n # when sfx == sfy, angle(new) == atan2(s, c) == angle(old)\n self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi\n\n @classmethod\n def cat(cls, boxes_list: List[\"RotatedBoxes\"]) -> \"RotatedBoxes\":\n \"\"\"\n Concatenates a list of RotatedBoxes into a single RotatedBoxes\n\n Arguments:\n boxes_list (list[RotatedBoxes])\n\n Returns:\n RotatedBoxes: the concatenated RotatedBoxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, RotatedBoxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> torch.device:\n return self.tensor.device\n\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (5,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "PathManager", "path": "detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "random_color", "path": "detectron2/utils/colormap.py", "snippet": "def random_color(rgb=False, maximum=255):\n \"\"\"\n Args:\n rgb (bool): whether to return RGB colors or BGR colors.\n maximum (int): either 255 or 1\n\n Returns:\n ndarray: a vector of 3 numbers\n \"\"\"\n idx = np.random.randint(0, len(_COLORS))\n ret = _COLORS[idx] * maximum\n if not rgb:\n ret = ret[::-1]\n return ret" } ]
import colorsys import logging import math import cv2 import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import numpy as np import pycocotools.mask as mask_util import torch from enum import Enum, unique from detectron2.data import MetadataCatalog from detectron2.structures import ( BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes, ) from detectron2.utils.file_io import PathManager from matplotlib.backends.backend_agg import FigureCanvasAgg from PIL import Image from .colormap import random_color from panopticapi.utils import rgb2id
16,641
dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. Returns: output (VisImage): image object with visualizations. """ annos = dic.get("annotations", None) if annos: if "segmentation" in annos[0]: masks = [x["segmentation"] for x in annos] else: masks = None if "keypoints" in annos[0]: keypts = [x["keypoints"] for x in annos] keypts = np.array(keypts).reshape(len(annos), -1, 3) else: keypts = None boxes = [ BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) if len(x["bbox"]) == 4 else x["bbox"] for x in annos ] colors = None category_ids = [x["category_id"] for x in annos] if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] names = self.metadata.get("thing_classes", None) labels = _create_text_labels( category_ids, scores=None, class_names=names, is_crowd=[x.get("iscrowd", 0) for x in annos], ) self.overlay_instances( labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors ) sem_seg = dic.get("sem_seg", None) if sem_seg is None and "sem_seg_file_name" in dic: with PathManager.open(dic["sem_seg_file_name"], "rb") as f: sem_seg = Image.open(f) sem_seg = np.asarray(sem_seg, dtype="uint8") if sem_seg is not None: self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) pan_seg = dic.get("pan_seg", None) if pan_seg is None and "pan_seg_file_name" in dic: with PathManager.open(dic["pan_seg_file_name"], "rb") as f: pan_seg = Image.open(f) pan_seg = np.asarray(pan_seg) pan_seg = rgb2id(pan_seg) if pan_seg is not None: segments_info = dic["segments_info"] pan_seg = torch.tensor(pan_seg) self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) return self.output def overlay_instances( self, *, boxes=None, labels=None, masks=None, keypoints=None, assigned_colors=None, alpha=0.5, ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. masks (masks-like object): Supported types are: * :class:`detectron2.structures.PolygonMasks`, :class:`detectron2.structures.BitMasks`. * list[list[ndarray]]: contains the segmentation masks for all objects in one image. The first level of the list corresponds to individual instances. The second level to all the polygon that compose the instance, and the third level to the polygon coordinates. The third level should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). * list[ndarray]: each ndarray is a binary mask of shape (H, W). * list[dict]: each dict is a COCO-style RLE. keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), where the N is the number of instances and K is the number of keypoints. The last dimension corresponds to (x, y, visibility or score). assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 if boxes is not None: boxes = self._convert_boxes(boxes) num_instances = len(boxes) if masks is not None: masks = self._convert_masks(masks) if num_instances: assert len(masks) == num_instances else: num_instances = len(masks) if keypoints is not None: if num_instances: assert len(keypoints) == num_instances else: num_instances = len(keypoints) keypoints = self._convert_keypoints(keypoints) if labels is not None: assert len(labels) == num_instances if assigned_colors is None:
# Copyright (c) Facebook, Inc. and its affiliates. logger = logging.getLogger(__name__) __all__ = ["ColorMode", "VisImage", "Visualizer"] _SMALL_OBJECT_AREA_THRESH = 1000 _LARGE_MASK_AREA_THRESH = 120000 _OFF_WHITE = (1.0, 1.0, 240.0 / 255) _BLACK = (0, 0, 0) _RED = (1.0, 0, 0) _KEYPOINT_THRESHOLD = 0.05 @unique class ColorMode(Enum): """ Enum of different color modes to use for instance visualizations. """ IMAGE = 0 """ Picks a random color for every instance and overlay segmentations with low opacity. """ SEGMENTATION = 1 """ Let instances of the same category have similar colors (from metadata.thing_colors), and overlay them with high opacity. This provides more attention on the quality of segmentation. """ IMAGE_BW = 2 """ Same as IMAGE, but convert all areas without masks to gray-scale. Only available for drawing per-instance mask predictions. """ class GenericMask: """ Attribute: polygons (list[ndarray]): list[ndarray]: polygons for this mask. Each ndarray has format [x, y, x, y, ...] mask (ndarray): a binary mask """ def __init__(self, mask_or_polygons, height, width): self._mask = self._polygons = self._has_holes = None self.height = height self.width = width m = mask_or_polygons if isinstance(m, dict): # RLEs assert "counts" in m and "size" in m if isinstance(m["counts"], list): # uncompressed RLEs h, w = m["size"] assert h == height and w == width m = mask_util.frPyObjects(m, h, w) self._mask = mask_util.decode(m)[:, :] return if isinstance(m, list): # list[ndarray] self._polygons = [np.asarray(x).reshape(-1) for x in m] return if isinstance(m, np.ndarray): # assumed to be a binary mask assert m.shape[1] != 2, m.shape assert m.shape == ( height, width, ), f"mask shape: {m.shape}, target dims: {height}, {width}" self._mask = m.astype("uint8") return raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) @property def mask(self): if self._mask is None: self._mask = self.polygons_to_mask(self._polygons) return self._mask @property def polygons(self): if self._polygons is None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) return self._polygons @property def has_holes(self): if self._has_holes is None: if self._mask is not None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) else: self._has_holes = False # if original format is polygon, does not have holes return self._has_holes def mask_to_polygons(self, mask): # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. # Internal contours (holes) are placed in hierarchy-2. # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) hierarchy = res[-1] if hierarchy is None: # empty mask return [], False has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 res = res[-2] res = [x.flatten() for x in res] # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. # We add 0.5 to turn them into real-value coordinate space. A better solution # would be to first +0.5 and then dilate the returned polygon by 0.5. res = [x + 0.5 for x in res if len(x) >= 6] return res, has_holes def polygons_to_mask(self, polygons): rle = mask_util.frPyObjects(polygons, self.height, self.width) rle = mask_util.merge(rle) return mask_util.decode(rle)[:, :] def area(self): return self.mask.sum() def bbox(self): p = mask_util.frPyObjects(self.polygons, self.height, self.width) p = mask_util.merge(p) bbox = mask_util.toBbox(p) bbox[2] += bbox[0] bbox[3] += bbox[1] return bbox class _PanopticPrediction: """ Unify different panoptic annotation/prediction formats """ def __init__(self, panoptic_seg, segments_info, metadata=None): if segments_info is None: assert metadata is not None # If "segments_info" is None, we assume "panoptic_img" is a # H*W int32 image storing the panoptic_id in the format of # category_id * label_divisor + instance_id. We reserve -1 for # VOID label. label_divisor = metadata.label_divisor segments_info = [] for panoptic_label in np.unique(panoptic_seg.numpy()): if panoptic_label == -1: # VOID region. continue pred_class = panoptic_label // label_divisor isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() segments_info.append( { "id": int(panoptic_label), "category_id": int(pred_class), "isthing": bool(isthing), } ) del metadata self._seg = panoptic_seg self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) areas = areas.numpy() sorted_idxs = np.argsort(-areas) self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] self._seg_ids = self._seg_ids.tolist() for sid, area in zip(self._seg_ids, self._seg_areas): if sid in self._sinfo: self._sinfo[sid]["area"] = float(area) def non_empty_mask(self): """ Returns: (H, W) array, a mask for all pixels that have a prediction """ empty_ids = [] for id in self._seg_ids: if id not in self._sinfo: empty_ids.append(id) if len(empty_ids) == 0: return np.zeros(self._seg.shape, dtype=np.uint8) assert ( len(empty_ids) == 1 ), ">1 ids corresponds to no labels. This is currently not supported" return (self._seg != empty_ids[0]).numpy().astype(np.bool) def semantic_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or sinfo["isthing"]: # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. continue yield (self._seg == sid).numpy().astype(np.bool), sinfo def instance_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or not sinfo["isthing"]: continue mask = (self._seg == sid).numpy().astype(np.bool) if mask.sum() > 0: yield mask, sinfo def _create_text_labels(classes, scores, class_names, is_crowd=None): """ Args: classes (list[int] or None): scores (list[float] or None): class_names (list[str] or None): is_crowd (list[bool] or None): Returns: list[str] or None """ labels = None if classes is not None: if class_names is not None and len(class_names) > 0: labels = [class_names[i] for i in classes] else: labels = [str(i) for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] if labels is not None and is_crowd is not None: labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] return labels class VisImage: def __init__(self, img, scale=1.0): """ Args: img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. scale (float): scale the input image """ self.img = img self.scale = scale self.width, self.height = img.shape[1], img.shape[0] self._setup_figure(img) def _setup_figure(self, img): """ Args: Same as in :meth:`__init__()`. Returns: fig (matplotlib.pyplot.figure): top level container for all the image plot elements. ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. """ fig = mplfigure.Figure(frameon=False) self.dpi = fig.get_dpi() # add a small 1e-2 to avoid precision lost due to matplotlib's truncation # (https://github.com/matplotlib/matplotlib/issues/15363) fig.set_size_inches( (self.width * self.scale + 1e-2) / self.dpi, (self.height * self.scale + 1e-2) / self.dpi, ) self.canvas = FigureCanvasAgg(fig) # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.axis("off") self.fig = fig self.ax = ax self.reset_image(img) def reset_image(self, img): """ Args: img: same as in __init__ """ img = img.astype("uint8") self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") def save(self, filepath): """ Args: filepath (str): a string that contains the absolute path, including the file name, where the visualized image will be saved. """ self.fig.savefig(filepath) def get_image(self): """ Returns: ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type. The shape is scaled w.r.t the input image using the given `scale` argument. """ canvas = self.canvas s, (width, height) = canvas.print_to_buffer() # buf = io.BytesIO() # works for cairo backend # canvas.print_rgba(buf) # width, height = self.width, self.height # s = buf.getvalue() buffer = np.frombuffer(s, dtype="uint8") img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) return rgb.astype("uint8") class Visualizer: """ Visualizer that draws data about detection/segmentation on images. It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` that draw primitive objects to images, as well as high-level wrappers like `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` that draw composite data in some pre-defined style. Note that the exact visualization style for the high-level wrappers are subject to change. Style such as color, opacity, label contents, visibility of labels, or even the visibility of objects themselves (e.g. when the object is too small) may change according to different heuristics, as long as the results still look visually reasonable. To obtain a consistent style, you can implement custom drawing functions with the abovementioned primitive methods instead. If you need more customized visualization styles, you can process the data yourself following their format documented in tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not intend to satisfy everyone's preference on drawing styles. This visualizer focuses on high rendering quality rather than performance. It is not designed to be used for real-time applications. """ # TODO implement a fast, rasterized version using OpenCV def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): """ Args: img_rgb: a numpy array of shape (H, W, C), where H and W correspond to the height and width of the image respectively. C is the number of color channels. The image is required to be in RGB format since that is a requirement of the Matplotlib library. The image is also expected to be in the range [0, 255]. metadata (Metadata): dataset metadata (e.g. class names and colors) instance_mode (ColorMode): defines one of the pre-defined style for drawing instances on an image. """ self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) if metadata is None: metadata = MetadataCatalog.get("__nonexist__") self.metadata = metadata self.output = VisImage(self.img, scale=scale) self.cpu_device = torch.device("cpu") # too small texts are useless, therefore clamp to 9 self._default_font_size = max( np.sqrt(self.output.height * self.output.width) // 90, 10 // scale ) self._instance_mode = instance_mode self.keypoint_threshold = _KEYPOINT_THRESHOLD def draw_instance_predictions(self, predictions): """ Draw instance-level prediction results on an image. Args: predictions (Instances): the output of an instance detection/segmentation model. Following fields will be used to draw: "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). Returns: output (VisImage): image object with visualizations. """ boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None scores = predictions.scores if predictions.has("scores") else None classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None if predictions.has("pred_masks"): masks = np.asarray(predictions.pred_masks) masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] else: masks = None if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes ] alpha = 0.8 else: colors = None alpha = 0.5 if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image( self._create_grayscale_image( (predictions.pred_masks.any(dim=0) > 0).numpy() if predictions.has("pred_masks") else None ) ) alpha = 0.3 self.overlay_instances( masks=masks, boxes=boxes, labels=labels, keypoints=keypoints, assigned_colors=colors, alpha=alpha, ) return self.output def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): """ Draw semantic segmentation predictions/labels. Args: sem_seg (Tensor or ndarray): the segmentation of shape (H, W). Each value is the integer label of the pixel. area_threshold (int): segments with less than `area_threshold` are not drawn. alpha (float): the larger it is, the more opaque the segmentations are. Returns: output (VisImage): image object with visualizations. """ if isinstance(sem_seg, torch.Tensor): sem_seg = sem_seg.numpy() labels, areas = np.unique(sem_seg, return_counts=True) sorted_idxs = np.argsort(-areas).tolist() labels = labels[sorted_idxs] for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): try: mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] except (AttributeError, IndexError): mask_color = None binary_mask = (sem_seg == label).astype(np.uint8) text = self.metadata.stuff_classes[label] self.draw_binary_mask( binary_mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) return self.output def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): """ Draw panoptic prediction annotations or results. Args: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. If it is a ``list[dict]``, each dict contains keys "id", "category_id". If None, category id of each pixel is computed by ``pixel // metadata.label_divisor``. area_threshold (int): stuff segments with less than `area_threshold` are not drawn. Returns: output (VisImage): image object with visualizations. """ pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) # draw mask for all semantic segments first i.e. "stuff" for mask, sinfo in pred.semantic_masks(): category_idx = sinfo["category_id"] try: mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] except AttributeError: mask_color = None text = self.metadata.stuff_classes[category_idx] self.draw_binary_mask( mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) # draw mask for all instances second all_instances = list(pred.instance_masks()) if len(all_instances) == 0: return self.output masks, sinfo = list(zip(*all_instances)) category_ids = [x["category_id"] for x in sinfo] try: scores = [x["score"] for x in sinfo] except KeyError: scores = None labels = _create_text_labels( category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo] ) try: colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] except AttributeError: colors = None self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) return self.output draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility def draw_dataset_dict(self, dic): """ Draw annotations/segmentaions in Detectron2 Dataset format. Args: dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. Returns: output (VisImage): image object with visualizations. """ annos = dic.get("annotations", None) if annos: if "segmentation" in annos[0]: masks = [x["segmentation"] for x in annos] else: masks = None if "keypoints" in annos[0]: keypts = [x["keypoints"] for x in annos] keypts = np.array(keypts).reshape(len(annos), -1, 3) else: keypts = None boxes = [ BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) if len(x["bbox"]) == 4 else x["bbox"] for x in annos ] colors = None category_ids = [x["category_id"] for x in annos] if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] names = self.metadata.get("thing_classes", None) labels = _create_text_labels( category_ids, scores=None, class_names=names, is_crowd=[x.get("iscrowd", 0) for x in annos], ) self.overlay_instances( labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors ) sem_seg = dic.get("sem_seg", None) if sem_seg is None and "sem_seg_file_name" in dic: with PathManager.open(dic["sem_seg_file_name"], "rb") as f: sem_seg = Image.open(f) sem_seg = np.asarray(sem_seg, dtype="uint8") if sem_seg is not None: self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) pan_seg = dic.get("pan_seg", None) if pan_seg is None and "pan_seg_file_name" in dic: with PathManager.open(dic["pan_seg_file_name"], "rb") as f: pan_seg = Image.open(f) pan_seg = np.asarray(pan_seg) pan_seg = rgb2id(pan_seg) if pan_seg is not None: segments_info = dic["segments_info"] pan_seg = torch.tensor(pan_seg) self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) return self.output def overlay_instances( self, *, boxes=None, labels=None, masks=None, keypoints=None, assigned_colors=None, alpha=0.5, ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. masks (masks-like object): Supported types are: * :class:`detectron2.structures.PolygonMasks`, :class:`detectron2.structures.BitMasks`. * list[list[ndarray]]: contains the segmentation masks for all objects in one image. The first level of the list corresponds to individual instances. The second level to all the polygon that compose the instance, and the third level to the polygon coordinates. The third level should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). * list[ndarray]: each ndarray is a binary mask of shape (H, W). * list[dict]: each dict is a COCO-style RLE. keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), where the N is the number of instances and K is the number of keypoints. The last dimension corresponds to (x, y, visibility or score). assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 if boxes is not None: boxes = self._convert_boxes(boxes) num_instances = len(boxes) if masks is not None: masks = self._convert_masks(masks) if num_instances: assert len(masks) == num_instances else: num_instances = len(masks) if keypoints is not None: if num_instances: assert len(keypoints) == num_instances else: num_instances = len(keypoints) keypoints = self._convert_keypoints(keypoints) if labels is not None: assert len(labels) == num_instances if assigned_colors is None:
assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]
8
2023-12-22 13:31:33+00:00
24k
xhuangcv/humannorm
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n # improve the resolution of DMTET at these steps\n progressive_resolution_steps: Optional[int] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n self.cached_sdf = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # adjust the position of mesh\n if \"full_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.3\n elif \"half_body\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.1\n elif \"head_only\" in mesh_path:\n mesh.vertices[:,2] = mesh.vertices[:,2] + 0.15\n elif \"t-pose\" in mesh_path:\n mesh.vertices[:,1] = mesh.vertices[:,1] + 0.4\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(2000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((40000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n\n sdf_loss: Optional[Float[Tensor, \"*N 1\"]] = None\n if self.cfg.use_sdf_loss and self.cached_sdf is not None:\n selected_points_idx = torch.LongTensor(random.sample(range(points_unscaled.shape[0]), 100000))\n gt_sdf = torch.from_numpy(-self.cached_sdf(points_unscaled[selected_points_idx].cpu().numpy())).to(\n points_unscaled\n )[..., None]\n sdf_loss = F.mse_loss(gt_sdf, sdf[selected_points_idx], reduction='sum')\n return sdf, deformation, sdf_loss\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n\n if global_step >= (self.cfg.start_sdf_loss_step + 1) and self.cached_sdf is None:\n\n from pysdf import SDF\n import trimesh\n\n mesh_v_pos = np.load('.threestudio_cache/mesh_v_pos.npy')\n mesh_t_pos_idx = np.load('.threestudio_cache/mesh_t_pos_idx.npy')\n cached_mesh = trimesh.Trimesh(\n vertices=mesh_v_pos,\n faces=mesh_t_pos_idx,\n )\n self.cached_sdf = SDF(cached_mesh.vertices, cached_mesh.faces)\n\n if self.cfg.progressive_resolution_steps is not None:\n if global_step >= self.cfg.progressive_resolution_steps[0] and self.cfg.isosurface_resolution < 256:\n self.cfg.isosurface_resolution = 256\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n if global_step >= self.cfg.progressive_resolution_steps[1] and self.cfg.isosurface_resolution < 512:\n self.cfg.isosurface_resolution = 512\n self.isosurface_helper = None\n self._initilize_isosurface_helper()\n\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n \n setattr(co, 'max_cost', 2.0)\n setattr(po, 'resolution', 4096)\n \n atlas.generate(co, po, verbose=True)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF from tqdm import tqdm import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh
16,107
def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) # Initialize SDF to a given shape when no weights are provided or force_shape_init is True optim = torch.optim.Adam(self.parameters(), lr=1e-3) for _ in tqdm( range(1000), desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:", disable=get_rank() != 0, ): points_rand = ( torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0 ) sdf_gt = get_gt_sdf(points_rand) sdf_pred = self.forward_sdf(points_rand) loss = F.mse_loss(sdf_pred, sdf_gt) optim.zero_grad() loss.backward() optim.step() # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh, _ = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return if self.cfg.sdf_bias != 0.0: threestudio.warn( "shape_init and sdf_bias are both specified, which may lead to unexpected results." ) get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") scene = trimesh.load(mesh_path) if isinstance(scene, trimesh.Trimesh): mesh = scene elif isinstance(scene, trimesh.scene.Scene): mesh = trimesh.Trimesh() for obj in scene.geometry.values(): mesh = trimesh.util.concatenate([mesh, obj]) else: raise ValueError(f"Unknown mesh type at {mesh_path}.") # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) # Initialize SDF to a given shape when no weights are provided or force_shape_init is True optim = torch.optim.Adam(self.parameters(), lr=1e-3) for _ in tqdm( range(1000), desc=f"Initializing SDF to a(n) {self.cfg.shape_init}:", disable=get_rank() != 0, ): points_rand = ( torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0 ) sdf_gt = get_gt_sdf(points_rand) sdf_pred = self.forward_sdf(points_rand) loss = F.mse_loss(sdf_pred, sdf_gt) optim.zero_grad() loss.backward() optim.step() # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh, _ = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF):
3
2023-12-23 12:37:48+00:00
24k
dakpinaroglu/Frame2seq
frame2seq/openfold/model/structure_module.py
[ { "identifier": "Linear", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class Linear(nn.Linear):\n \"\"\"\n A Linear layer with built-in nonstandard initializations. Called just\n like torch.nn.Linear.\n\n Implements the initializers in 1.11.4, plus some additional ones found\n in the code.\n \"\"\"\n\n def __init__(\n self,\n in_dim: int,\n out_dim: int,\n bias: bool = True,\n init: str = \"default\",\n init_fn: Optional[Callable[[torch.Tensor, torch.Tensor], None]] = None,\n ):\n \"\"\"\n Args:\n in_dim:\n The final dimension of inputs to the layer\n out_dim:\n The final dimension of layer outputs\n bias:\n Whether to learn an additive bias. True by default\n init:\n The initializer to use. Choose from:\n\n \"default\": LeCun fan-in truncated normal initialization\n \"relu\": He initialization w/ truncated normal distribution\n \"glorot\": Fan-average Glorot uniform initialization\n \"gating\": Weights=0, Bias=1\n \"normal\": Normal initialization with std=1/sqrt(fan_in)\n \"final\": Weights=0, Bias=0\n\n Overridden by init_fn if the latter is not None.\n init_fn:\n A custom initializer taking weight and bias as inputs.\n Overrides init if not None.\n \"\"\"\n super(Linear, self).__init__(in_dim, out_dim, bias=bias)\n\n if bias:\n with torch.no_grad():\n self.bias.fill_(0)\n\n with torch.no_grad():\n if init_fn is not None:\n init_fn(self.weight, self.bias)\n else:\n if init == \"default\":\n lecun_normal_init_(self.weight)\n elif init == \"relu\":\n he_normal_init_(self.weight)\n elif init == \"glorot\":\n glorot_uniform_init_(self.weight)\n elif init == \"gating\":\n gating_init_(self.weight)\n if bias:\n self.bias.fill_(1.0)\n elif init == \"normal\":\n normal_init_(self.weight)\n elif init == \"final\":\n final_init_(self.weight)\n else:\n raise ValueError(\"Invalid init string.\")" }, { "identifier": "LayerNorm", "path": "frame2seq/openfold/model/primitives.py", "snippet": "class LayerNorm(nn.Module):\n def __init__(self, c_in, eps=1e-5):\n super(LayerNorm, self).__init__()\n \n self.c_in = (c_in,)\n self.eps = eps\n\n self.weight = nn.Parameter(torch.ones(c_in))\n self.bias = nn.Parameter(torch.zeros(c_in))\n\n def forward(self, x): \n d = x.dtype\n # deepspeed_is_initialized = (\n # deepspeed_is_installed and \n # deepspeed.utils.is_initialized()\n # )\n # if(d is torch.bfloat16 and not deepspeed_is_initialized):\n # with torch.cuda.amp.autocast(enabled=False):\n # out = nn.functional.layer_norm(\n # x, \n # self.c_in, \n # self.weight.to(dtype=d), \n # self.bias.to(dtype=d), \n # self.eps\n # )\n # else:\n out = nn.functional.layer_norm(\n x,\n self.c_in,\n self.weight,\n self.bias,\n self.eps,\n )\n\n return out" }, { "identifier": "ipa_point_weights_init_", "path": "frame2seq/openfold/model/primitives.py", "snippet": "def ipa_point_weights_init_(weights):\n with torch.no_grad():\n softplus_inverse_1 = 0.541324854612918\n weights.fill_(softplus_inverse_1)" }, { "identifier": "restype_rigid_group_default_frame", "path": "frame2seq/openfold/np/residue_constants.py", "snippet": "def load_stereo_chemical_props() -> Tuple[\n def make_bond_key(atom1_name, atom2_name):\ndef sequence_to_onehot(\n sequence: str, mapping: Mapping[str, int], map_unknown_to_x: bool = False\n) -> np.ndarray:\ndef _make_standard_atom_mask() -> np.ndarray:\ndef chi_angle_atom(atom_index: int) -> np.ndarray:\ndef _make_rigid_transformation_4x4(ex, ey, translation):\ndef _make_rigid_group_constants():\ndef make_atom14_dists_bounds(\n overlap_tolerance=1.5, bond_length_tolerance_factor=15\n):\ndef _make_atom14_ambiguity_feats():\ndef aatype_to_str_sequence(aatype):\nHHBLITS_AA_TO_ID = {\n \"A\": 0,\n \"B\": 2,\n \"C\": 1,\n \"D\": 2,\n \"E\": 3,\n \"F\": 4,\n \"G\": 5,\n \"H\": 6,\n \"I\": 7,\n \"J\": 20,\n \"K\": 8,\n \"L\": 9,\n \"M\": 10,\n \"N\": 11,\n \"O\": 20,\n \"P\": 12,\n \"Q\": 13,\n \"R\": 14,\n \"S\": 15,\n \"T\": 16,\n \"U\": 1,\n \"V\": 17,\n \"W\": 18,\n \"X\": 20,\n \"Y\": 19,\n \"Z\": 3,\n \"-\": 21,\n}\nID_TO_HHBLITS_AA = {\n 0: \"A\",\n 1: \"C\", # Also U.\n 2: \"D\", # Also B.\n 3: \"E\", # Also Z.\n 4: \"F\",\n 5: \"G\",\n 6: \"H\",\n 7: \"I\",\n 8: \"K\",\n 9: \"L\",\n 10: \"M\",\n 11: \"N\",\n 12: \"P\",\n 13: \"Q\",\n 14: \"R\",\n 15: \"S\",\n 16: \"T\",\n 17: \"V\",\n 18: \"W\",\n 19: \"Y\",\n 20: \"X\", # Includes J and O.\n 21: \"-\",\n}\nMAP_HHBLITS_AATYPE_TO_OUR_AATYPE = tuple(\n restypes_with_x_and_gap.index(ID_TO_HHBLITS_AA[i])\n for i in range(len(restypes_with_x_and_gap))\n)\nSTANDARD_ATOM_MASK = _make_standard_atom_mask()" }, { "identifier": "frames_and_literature_positions_to_atom14_pos", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def frames_and_literature_positions_to_atom14_pos(\n r: Rigid,\n aatype: torch.Tensor,\n default_frames,\n group_idx,\n atom_mask,\n lit_positions,\n):\n # [*, N, 14, 4, 4]\n default_4x4 = default_frames[aatype, ...]\n\n # [*, N, 14]\n group_mask = group_idx[aatype, ...]\n\n # [*, N, 14, 8]\n group_mask = nn.functional.one_hot(\n group_mask,\n num_classes=default_frames.shape[-3],\n )\n\n # [*, N, 14, 8]\n t_atoms_to_global = r[..., None, :] * group_mask\n\n # [*, N, 14]\n t_atoms_to_global = t_atoms_to_global.map_tensor_fn(\n lambda x: torch.sum(x, dim=-1)\n )\n\n # [*, N, 14, 1]\n atom_mask = atom_mask[aatype, ...].unsqueeze(-1)\n\n # [*, N, 14, 3]\n lit_positions = lit_positions[aatype, ...]\n pred_positions = t_atoms_to_global.apply(lit_positions)\n pred_positions = pred_positions * atom_mask\n\n return pred_positions" }, { "identifier": "torsion_angles_to_frames", "path": "frame2seq/openfold/utils/feats.py", "snippet": "def torsion_angles_to_frames(\n r: Rigid,\n alpha: torch.Tensor,\n aatype: torch.Tensor,\n rrgdf: torch.Tensor,\n):\n # [*, N, 8, 4, 4]\n default_4x4 = rrgdf[aatype, ...]\n\n # [*, N, 8] transformations, i.e.\n # One [*, N, 8, 3, 3] rotation matrix and\n # One [*, N, 8, 3] translation matrix\n default_r = r.from_tensor_4x4(default_4x4)\n\n bb_rot = alpha.new_zeros((*((1,) * len(alpha.shape[:-1])), 2))\n bb_rot[..., 1] = 1\n\n # [*, N, 8, 2]\n alpha = torch.cat(\n [bb_rot.expand(*alpha.shape[:-2], -1, -1), alpha], dim=-2\n )\n\n # [*, N, 8, 3, 3]\n # Produces rotation matrices of the form:\n # [\n # [1, 0 , 0 ],\n # [0, a_2,-a_1],\n # [0, a_1, a_2]\n # ]\n # This follows the original code rather than the supplement, which uses\n # different indices.\n\n all_rots = alpha.new_zeros(default_r.get_rots().get_rot_mats().shape)\n all_rots[..., 0, 0] = 1\n all_rots[..., 1, 1] = alpha[..., 1]\n all_rots[..., 1, 2] = -alpha[..., 0]\n all_rots[..., 2, 1:] = alpha\n\n all_rots = Rigid(Rotation(rot_mats=all_rots), None)\n\n all_frames = default_r.compose(all_rots)\n\n chi2_frame_to_frame = all_frames[..., 5]\n chi3_frame_to_frame = all_frames[..., 6]\n chi4_frame_to_frame = all_frames[..., 7]\n\n chi1_frame_to_bb = all_frames[..., 4]\n chi2_frame_to_bb = chi1_frame_to_bb.compose(chi2_frame_to_frame)\n chi3_frame_to_bb = chi2_frame_to_bb.compose(chi3_frame_to_frame)\n chi4_frame_to_bb = chi3_frame_to_bb.compose(chi4_frame_to_frame)\n\n all_frames_to_bb = Rigid.cat(\n [\n all_frames[..., :5],\n chi2_frame_to_bb.unsqueeze(-1),\n chi3_frame_to_bb.unsqueeze(-1),\n chi4_frame_to_bb.unsqueeze(-1),\n ],\n dim=-1,\n )\n\n all_frames_to_global = r[..., None].compose(all_frames_to_bb)\n\n return all_frames_to_global" }, { "identifier": "is_fp16_enabled", "path": "frame2seq/openfold/utils/precision_utils.py", "snippet": "def is_fp16_enabled():\n # Autocast world\n try:\n fp16_enabled = torch.get_autocast_gpu_dtype() == torch.float16\n fp16_enabled = fp16_enabled and torch.is_autocast_enabled()\n except AttributeError:\n fp16_enabled = False\n\n return fp16_enabled" }, { "identifier": "Rotation", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rotation:\n \"\"\"\n A 3D rotation. Depending on how the object is initialized, the\n rotation is represented by either a rotation matrix or a\n quaternion, though both formats are made available by helper functions.\n To simplify gradient computation, the underlying format of the\n rotation cannot be changed in-place. Like Rigid, the class is designed\n to mimic the behavior of a torch Tensor, almost as if each Rotation\n object were a tensor of rotations, in one format or another.\n \"\"\"\n def __init__(self,\n rot_mats: Optional[torch.Tensor] = None,\n quats: Optional[torch.Tensor] = None,\n normalize_quats: bool = True,\n ):\n \"\"\"\n Args:\n rot_mats:\n A [*, 3, 3] rotation matrix tensor. Mutually exclusive with\n quats\n quats:\n A [*, 4] quaternion. Mutually exclusive with rot_mats. If\n normalize_quats is not True, must be a unit quaternion\n normalize_quats:\n If quats is specified, whether to normalize quats\n \"\"\"\n if((rot_mats is None and quats is None) or \n (rot_mats is not None and quats is not None)):\n raise ValueError(\"Exactly one input argument must be specified\")\n\n if((rot_mats is not None and rot_mats.shape[-2:] != (3, 3)) or \n (quats is not None and quats.shape[-1] != 4)):\n raise ValueError(\n \"Incorrectly shaped rotation matrix or quaternion\"\n )\n\n # Force full-precision\n if(quats is not None):\n quats = quats.to(dtype=torch.float32)\n if(rot_mats is not None):\n rot_mats = rot_mats.to(dtype=torch.float32)\n\n if(quats is not None and normalize_quats):\n quats = quats / torch.linalg.norm(quats, dim=-1, keepdim=True)\n\n self._rot_mats = rot_mats\n self._quats = quats\n\n @staticmethod\n def identity(\n shape,\n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None,\n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rotation:\n \"\"\"\n Returns an identity Rotation.\n\n Args:\n shape:\n The \"shape\" of the resulting Rotation object. See documentation\n for the shape property\n dtype:\n The torch dtype for the rotation\n device:\n The torch device for the new rotation\n requires_grad:\n Whether the underlying tensors in the new rotation object\n should require gradient computation\n fmt:\n One of \"quat\" or \"rot_mat\". Determines the underlying format\n of the new object's rotation \n Returns:\n A new identity rotation\n \"\"\"\n if(fmt == \"rot_mat\"):\n rot_mats = identity_rot_mats(\n shape, dtype, device, requires_grad,\n )\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(fmt == \"quat\"):\n quats = identity_quats(shape, dtype, device, requires_grad)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(f\"Invalid format: f{fmt}\")\n\n # Magic methods\n\n def __getitem__(self, index: Any) -> Rotation:\n \"\"\"\n Allows torch-style indexing over the virtual shape of the rotation\n object. See documentation for the shape property.\n\n Args:\n index:\n A torch index. E.g. (1, 3, 2), or (slice(None,))\n Returns:\n The indexed rotation\n \"\"\"\n if type(index) != tuple:\n index = (index,)\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats[index + (slice(None), slice(None))]\n return Rotation(rot_mats=rot_mats)\n elif(self._quats is not None):\n quats = self._quats[index + (slice(None),)]\n return Rotation(quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Pointwise left multiplication of the rotation with a tensor. Can be\n used to e.g. mask the Rotation.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats * right[..., None, None]\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats * right[..., None]\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rotation:\n \"\"\"\n Reverse pointwise multiplication of the rotation with a tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n \n # Properties\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the virtual shape of the rotation object. This shape is\n defined as the batch dimensions of the underlying rotation matrix\n or quaternion. If the Rotation was initialized with a [10, 3, 3]\n rotation matrix tensor, for example, the resulting shape would be\n [10].\n \n Returns:\n The virtual shape of the rotation object\n \"\"\"\n s = None\n if(self._quats is not None):\n s = self._quats.shape[:-1]\n else:\n s = self._rot_mats.shape[:-2]\n\n return s\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Returns the dtype of the underlying rotation.\n\n Returns:\n The dtype of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.dtype\n elif(self._quats is not None):\n return self._quats.dtype\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n The device of the underlying rotation\n\n Returns:\n The device of the underlying rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.device\n elif(self._quats is not None):\n return self._quats.device\n else:\n raise ValueError(\"Both rotations are None\")\n\n @property\n def requires_grad(self) -> bool:\n \"\"\"\n Returns the requires_grad property of the underlying rotation\n\n Returns:\n The requires_grad property of the underlying tensor\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats.requires_grad\n elif(self._quats is not None):\n return self._quats.requires_grad\n else:\n raise ValueError(\"Both rotations are None\")\n\n def get_rot_mats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a rotation matrix tensor.\n\n Returns:\n The rotation as a rotation matrix tensor\n \"\"\"\n rot_mats = self._rot_mats\n if(rot_mats is None):\n if(self._quats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n rot_mats = quat_to_rot(self._quats)\n\n return rot_mats \n\n def get_quats(self) -> torch.Tensor:\n \"\"\"\n Returns the underlying rotation as a quaternion tensor.\n\n Depending on whether the Rotation was initialized with a\n quaternion, this function may call torch.linalg.eigh.\n\n Returns:\n The rotation as a quaternion tensor.\n \"\"\"\n quats = self._quats\n if(quats is None):\n if(self._rot_mats is None):\n raise ValueError(\"Both rotations are None\")\n else:\n quats = rot_to_quat(self._rot_mats)\n\n return quats\n\n def get_cur_rot(self) -> torch.Tensor:\n \"\"\"\n Return the underlying rotation in its current form\n\n Returns:\n The stored rotation\n \"\"\"\n if(self._rot_mats is not None):\n return self._rot_mats\n elif(self._quats is not None):\n return self._quats\n else:\n raise ValueError(\"Both rotations are None\")\n\n # Rotation functions\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor, \n normalize_quats: bool = True\n ) -> Rotation:\n \"\"\"\n Returns a new quaternion Rotation after updating the current\n object's underlying rotation with a quaternion update, formatted\n as a [*, 3] tensor whose final three columns represent x, y, z such \n that (1, x, y, z) is the desired (not necessarily unit) quaternion\n update.\n\n Args:\n q_update_vec:\n A [*, 3] quaternion update tensor\n normalize_quats:\n Whether to normalize the output quaternion\n Returns:\n An updated Rotation\n \"\"\"\n quats = self.get_quats()\n new_quats = quats + quat_multiply_by_vec(quats, q_update_vec)\n return Rotation(\n rot_mats=None, \n quats=new_quats, \n normalize_quats=normalize_quats,\n )\n\n def compose_r(self, r: Rotation) -> Rotation:\n \"\"\"\n Compose the rotation matrices of the current Rotation object with\n those of another.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n r1 = self.get_rot_mats()\n r2 = r.get_rot_mats()\n new_rot_mats = rot_matmul(r1, r2)\n return Rotation(rot_mats=new_rot_mats, quats=None)\n\n def compose_q(self, r: Rotation, normalize_quats: bool = True) -> Rotation:\n \"\"\"\n Compose the quaternions of the current Rotation object with those\n of another.\n\n Depending on whether either Rotation was initialized with\n quaternions, this function may call torch.linalg.eigh.\n\n Args:\n r:\n An update rotation object\n Returns:\n An updated rotation object\n \"\"\"\n q1 = self.get_quats()\n q2 = r.get_quats()\n new_quats = quat_multiply(q1, q2)\n return Rotation(\n rot_mats=None, quats=new_quats, normalize_quats=normalize_quats\n )\n\n def apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n Apply the current Rotation as a rotation matrix to a set of 3D\n coordinates.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n return rot_vec_mul(rot_mats, pts)\n\n def invert_apply(self, pts: torch.Tensor) -> torch.Tensor:\n \"\"\"\n The inverse of the apply() method.\n\n Args:\n pts:\n A [*, 3] set of points\n Returns:\n [*, 3] inverse-rotated points\n \"\"\"\n rot_mats = self.get_rot_mats()\n inv_rot_mats = invert_rot_mat(rot_mats) \n return rot_vec_mul(inv_rot_mats, pts)\n\n def invert(self) -> Rotation:\n \"\"\"\n Returns the inverse of the current Rotation.\n\n Returns:\n The inverse of the current Rotation\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=invert_rot_mat(self._rot_mats), \n quats=None\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None,\n quats=invert_quat(self._quats),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n # \"Tensor\" stuff\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shape of the Rotation object.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed Rotation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n\n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.unsqueeze(dim if dim >= 0 else dim - 2)\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = self._quats.unsqueeze(dim if dim >= 0 else dim - 1)\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n\n @staticmethod\n def cat(\n rs: Sequence[Rotation], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates rotations along one of the batch dimensions. Analogous\n to torch.cat().\n\n Note that the output of this operation is always a rotation matrix,\n regardless of the format of input rotations.\n\n Args:\n rs: \n A list of rotation objects\n dim: \n The dimension along which the rotations should be \n concatenated\n Returns:\n A concatenated Rotation object in rotation matrix format\n \"\"\"\n rot_mats = [r.get_rot_mats() for r in rs]\n rot_mats = torch.cat(rot_mats, dim=dim if dim >= 0 else dim - 2)\n\n return Rotation(rot_mats=rot_mats, quats=None) \n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rotation:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying rotation tensors,\n mapping over the rotation dimension(s). Can be used e.g. to sum out\n a one-hot batch dimension.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rotation \n Returns:\n The transformed Rotation object\n \"\"\" \n if(self._rot_mats is not None):\n rot_mats = self._rot_mats.view(self._rot_mats.shape[:-2] + (9,))\n rot_mats = torch.stack(\n list(map(fn, torch.unbind(rot_mats, dim=-1))), dim=-1\n )\n rot_mats = rot_mats.view(rot_mats.shape[:-1] + (3, 3))\n return Rotation(rot_mats=rot_mats, quats=None)\n elif(self._quats is not None):\n quats = torch.stack(\n list(map(fn, torch.unbind(self._quats, dim=-1))), dim=-1\n )\n return Rotation(rot_mats=None, quats=quats, normalize_quats=False)\n else:\n raise ValueError(\"Both rotations are None\")\n \n def cuda(self) -> Rotation:\n \"\"\"\n Analogous to the cuda() method of torch Tensors\n\n Returns:\n A copy of the Rotation in CUDA memory\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.cuda(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.cuda(),\n normalize_quats=False\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def to(self, \n device: Optional[torch.device], \n dtype: Optional[torch.dtype]\n ) -> Rotation:\n \"\"\"\n Analogous to the to() method of torch Tensors\n\n Args:\n device:\n A torch device\n dtype:\n A torch dtype\n Returns:\n A copy of the Rotation using the new device and dtype\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(\n rot_mats=self._rot_mats.to(device=device, dtype=dtype), \n quats=None,\n )\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.to(device=device, dtype=dtype),\n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")\n\n def detach(self) -> Rotation:\n \"\"\"\n Returns a copy of the Rotation whose underlying Tensor has been\n detached from its torch graph.\n\n Returns:\n A copy of the Rotation whose underlying Tensor has been detached\n from its torch graph\n \"\"\"\n if(self._rot_mats is not None):\n return Rotation(rot_mats=self._rot_mats.detach(), quats=None)\n elif(self._quats is not None):\n return Rotation(\n rot_mats=None, \n quats=self._quats.detach(), \n normalize_quats=False,\n )\n else:\n raise ValueError(\"Both rotations are None\")" }, { "identifier": "Rigid", "path": "frame2seq/openfold/utils/rigid_utils.py", "snippet": "class Rigid:\n \"\"\"\n A class representing a rigid transformation. Little more than a wrapper\n around two objects: a Rotation object and a [*, 3] translation\n Designed to behave approximately like a single torch tensor with the \n shape of the shared batch dimensions of its component parts.\n \"\"\"\n def __init__(self, \n rots: Optional[Rotation],\n trans: Optional[torch.Tensor],\n ):\n \"\"\"\n Args:\n rots: A [*, 3, 3] rotation tensor\n trans: A corresponding [*, 3] translation tensor\n \"\"\"\n # (we need device, dtype, etc. from at least one input)\n\n batch_dims, dtype, device, requires_grad = None, None, None, None\n if(trans is not None):\n batch_dims = trans.shape[:-1]\n dtype = trans.dtype\n device = trans.device\n requires_grad = trans.requires_grad\n elif(rots is not None):\n batch_dims = rots.shape\n dtype = rots.dtype\n device = rots.device\n requires_grad = rots.requires_grad\n else:\n raise ValueError(\"At least one input argument must be specified\")\n\n if(rots is None):\n rots = Rotation.identity(\n batch_dims, dtype, device, requires_grad,\n )\n elif(trans is None):\n trans = identity_trans(\n batch_dims, dtype, device, requires_grad,\n )\n\n if((rots.shape != trans.shape[:-1]) or\n (rots.device != trans.device)):\n raise ValueError(\"Rots and trans incompatible\")\n\n # Force full precision. Happens to the rotations automatically.\n trans = trans.to(dtype=torch.float32)\n\n self._rots = rots\n self._trans = trans\n\n @staticmethod\n def identity(\n shape: Tuple[int], \n dtype: Optional[torch.dtype] = None,\n device: Optional[torch.device] = None, \n requires_grad: bool = True,\n fmt: str = \"quat\",\n ) -> Rigid:\n \"\"\"\n Constructs an identity transformation.\n\n Args:\n shape: \n The desired shape\n dtype: \n The dtype of both internal tensors\n device: \n The device of both internal tensors\n requires_grad: \n Whether grad should be enabled for the internal tensors\n Returns:\n The identity transformation\n \"\"\"\n return Rigid(\n Rotation.identity(shape, dtype, device, requires_grad, fmt=fmt),\n identity_trans(shape, dtype, device, requires_grad),\n )\n\n def __getitem__(self, \n index: Any,\n ) -> Rigid:\n \"\"\" \n Indexes the affine transformation with PyTorch-style indices.\n The index is applied to the shared dimensions of both the rotation\n and the translation.\n\n E.g.::\n\n r = Rotation(rot_mats=torch.rand(10, 10, 3, 3), quats=None)\n t = Rigid(r, torch.rand(10, 10, 3))\n indexed = t[3, 4:6]\n assert(indexed.shape == (2,))\n assert(indexed.get_rots().shape == (2,))\n assert(indexed.get_trans().shape == (2, 3))\n\n Args:\n index: A standard torch tensor index. E.g. 8, (10, None, 3),\n or (3, slice(0, 1, None))\n Returns:\n The indexed tensor \n \"\"\"\n if type(index) != tuple:\n index = (index,)\n \n return Rigid(\n self._rots[index],\n self._trans[index + (slice(None),)],\n )\n\n def __mul__(self,\n right: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Pointwise left multiplication of the transformation with a tensor.\n Can be used to e.g. mask the Rigid.\n\n Args:\n right:\n The tensor multiplicand\n Returns:\n The product\n \"\"\"\n if not(isinstance(right, torch.Tensor)):\n raise TypeError(\"The other multiplicand must be a Tensor\")\n\n new_rots = self._rots * right\n new_trans = self._trans * right[..., None]\n\n return Rigid(new_rots, new_trans)\n\n def __rmul__(self,\n left: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Reverse pointwise multiplication of the transformation with a \n tensor.\n\n Args:\n left:\n The left multiplicand\n Returns:\n The product\n \"\"\"\n return self.__mul__(left)\n\n @property\n def shape(self) -> torch.Size:\n \"\"\"\n Returns the shape of the shared dimensions of the rotation and\n the translation.\n \n Returns:\n The shape of the transformation\n \"\"\"\n s = self._trans.shape[:-1]\n return s\n\n @property\n def device(self) -> torch.device:\n \"\"\"\n Returns the device on which the Rigid's tensors are located.\n\n Returns:\n The device on which the Rigid's tensors are located\n \"\"\"\n return self._trans.device\n\n def get_rots(self) -> Rotation:\n \"\"\"\n Getter for the rotation.\n\n Returns:\n The rotation object\n \"\"\"\n return self._rots\n\n def get_trans(self) -> torch.Tensor:\n \"\"\"\n Getter for the translation.\n\n Returns:\n The stored translation\n \"\"\"\n return self._trans\n\n def compose_q_update_vec(self, \n q_update_vec: torch.Tensor,\n ) -> Rigid:\n \"\"\"\n Composes the transformation with a quaternion update vector of\n shape [*, 6], where the final 6 columns represent the x, y, and\n z values of a quaternion of form (1, x, y, z) followed by a 3D\n translation.\n\n Args:\n q_vec: The quaternion update vector.\n Returns:\n The composed transformation.\n \"\"\"\n q_vec, t_vec = q_update_vec[..., :3], q_update_vec[..., 3:]\n new_rots = self._rots.compose_q_update_vec(q_vec)\n\n trans_update = self._rots.apply(t_vec)\n new_translation = self._trans + trans_update\n\n return Rigid(new_rots, new_translation)\n\n def compose(self,\n r: Rigid,\n ) -> Rigid:\n \"\"\"\n Composes the current rigid object with another.\n\n Args:\n r:\n Another Rigid object\n Returns:\n The composition of the two transformations\n \"\"\"\n new_rot = self._rots.compose_r(r._rots)\n new_trans = self._rots.apply(r._trans) + self._trans\n return Rigid(new_rot, new_trans)\n\n def apply(self, \n pts: torch.Tensor,\n ) -> torch.Tensor:\n \"\"\"\n Applies the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor.\n Returns:\n The transformed points.\n \"\"\"\n rotated = self._rots.apply(pts) \n return rotated + self._trans\n\n def invert_apply(self, \n pts: torch.Tensor\n ) -> torch.Tensor:\n \"\"\"\n Applies the inverse of the transformation to a coordinate tensor.\n\n Args:\n pts: A [*, 3] coordinate tensor\n Returns:\n The transformed points.\n \"\"\"\n pts = pts - self._trans\n return self._rots.invert_apply(pts) \n\n def invert(self) -> Rigid:\n \"\"\"\n Inverts the transformation.\n\n Returns:\n The inverse transformation.\n \"\"\"\n rot_inv = self._rots.invert() \n trn_inv = rot_inv.apply(self._trans)\n\n return Rigid(rot_inv, -1 * trn_inv)\n\n def map_tensor_fn(self, \n fn: Callable[torch.Tensor, torch.Tensor]\n ) -> Rigid:\n \"\"\"\n Apply a Tensor -> Tensor function to underlying translation and\n rotation tensors, mapping over the translation/rotation dimensions\n respectively.\n\n Args:\n fn:\n A Tensor -> Tensor function to be mapped over the Rigid\n Returns:\n The transformed Rigid object\n \"\"\" \n new_rots = self._rots.map_tensor_fn(fn) \n new_trans = torch.stack(\n list(map(fn, torch.unbind(self._trans, dim=-1))), \n dim=-1\n )\n\n return Rigid(new_rots, new_trans)\n\n def to_tensor_4x4(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a homogenous transformation tensor.\n\n Returns:\n A [*, 4, 4] homogenous transformation tensor\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 4, 4))\n tensor[..., :3, :3] = self._rots.get_rot_mats()\n tensor[..., :3, 3] = self._trans\n tensor[..., 3, 3] = 1\n return tensor\n\n @staticmethod\n def from_tensor_4x4(\n t: torch.Tensor\n ) -> Rigid:\n \"\"\"\n Constructs a transformation from a homogenous transformation\n tensor.\n\n Args:\n t: [*, 4, 4] homogenous transformation tensor\n Returns:\n T object with shape [*]\n \"\"\"\n if(t.shape[-2:] != (4, 4)):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n rots = Rotation(rot_mats=t[..., :3, :3], quats=None)\n trans = t[..., :3, 3]\n \n return Rigid(rots, trans)\n\n def to_tensor_7(self) -> torch.Tensor:\n \"\"\"\n Converts a transformation to a tensor with 7 final columns, four \n for the quaternion followed by three for the translation.\n\n Returns:\n A [*, 7] tensor representation of the transformation\n \"\"\"\n tensor = self._trans.new_zeros((*self.shape, 7))\n tensor[..., :4] = self._rots.get_quats()\n tensor[..., 4:] = self._trans\n\n return tensor\n\n @staticmethod\n def from_tensor_7(\n t: torch.Tensor,\n normalize_quats: bool = False,\n ) -> Rigid:\n if(t.shape[-1] != 7):\n raise ValueError(\"Incorrectly shaped input tensor\")\n\n quats, trans = t[..., :4], t[..., 4:]\n\n rots = Rotation(\n rot_mats=None, \n quats=quats, \n normalize_quats=normalize_quats\n )\n\n return Rigid(rots, trans)\n\n @staticmethod\n def from_3_points(\n p_neg_x_axis: torch.Tensor, \n origin: torch.Tensor, \n p_xy_plane: torch.Tensor, \n eps: float = 1e-8\n ) -> Rigid:\n \"\"\"\n Implements algorithm 21. Constructs transformations from sets of 3 \n points using the Gram-Schmidt algorithm.\n\n Args:\n p_neg_x_axis: [*, 3] coordinates\n origin: [*, 3] coordinates used as frame origins\n p_xy_plane: [*, 3] coordinates\n eps: Small epsilon value\n Returns:\n A transformation object of shape [*]\n \"\"\"\n p_neg_x_axis = torch.unbind(p_neg_x_axis, dim=-1)\n origin = torch.unbind(origin, dim=-1)\n p_xy_plane = torch.unbind(p_xy_plane, dim=-1)\n\n e0 = [c1 - c2 for c1, c2 in zip(origin, p_neg_x_axis)]\n e1 = [c1 - c2 for c1, c2 in zip(p_xy_plane, origin)]\n\n denom = torch.sqrt(sum((c * c for c in e0)) + eps)\n e0 = [c / denom for c in e0]\n dot = sum((c1 * c2 for c1, c2 in zip(e0, e1)))\n e1 = [c2 - c1 * dot for c1, c2 in zip(e0, e1)]\n denom = torch.sqrt(sum((c * c for c in e1)) + eps)\n e1 = [c / denom for c in e1]\n e2 = [\n e0[1] * e1[2] - e0[2] * e1[1],\n e0[2] * e1[0] - e0[0] * e1[2],\n e0[0] * e1[1] - e0[1] * e1[0],\n ]\n\n rots = torch.stack([c for tup in zip(e0, e1, e2) for c in tup], dim=-1)\n rots = rots.reshape(rots.shape[:-1] + (3, 3))\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, torch.stack(origin, dim=-1))\n\n def unsqueeze(self, \n dim: int,\n ) -> Rigid:\n \"\"\"\n Analogous to torch.unsqueeze. The dimension is relative to the\n shared dimensions of the rotation/translation.\n \n Args:\n dim: A positive or negative dimension index.\n Returns:\n The unsqueezed transformation.\n \"\"\"\n if dim >= len(self.shape):\n raise ValueError(\"Invalid dimension\")\n rots = self._rots.unsqueeze(dim)\n trans = self._trans.unsqueeze(dim if dim >= 0 else dim - 1)\n\n return Rigid(rots, trans)\n\n @staticmethod\n def cat(\n ts: Sequence[Rigid], \n dim: int,\n ) -> Rigid:\n \"\"\"\n Concatenates transformations along a new dimension.\n\n Args:\n ts: \n A list of T objects\n dim: \n The dimension along which the transformations should be \n concatenated\n Returns:\n A concatenated transformation object\n \"\"\"\n rots = Rotation.cat([t._rots for t in ts], dim) \n trans = torch.cat(\n [t._trans for t in ts], dim=dim if dim >= 0 else dim - 1\n )\n\n return Rigid(rots, trans)\n\n def apply_rot_fn(self, fn: Callable[Rotation, Rotation]) -> Rigid:\n \"\"\"\n Applies a Rotation -> Rotation function to the stored rotation\n object.\n\n Args:\n fn: A function of type Rotation -> Rotation\n Returns:\n A transformation object with a transformed rotation.\n \"\"\"\n return Rigid(fn(self._rots), self._trans)\n\n def apply_trans_fn(self, fn: Callable[torch.Tensor, torch.Tensor]) -> Rigid:\n \"\"\"\n Applies a Tensor -> Tensor function to the stored translation.\n\n Args:\n fn: \n A function of type Tensor -> Tensor to be applied to the\n translation\n Returns:\n A transformation object with a transformed translation.\n \"\"\"\n return Rigid(self._rots, fn(self._trans))\n\n def scale_translation(self, trans_scale_factor: float) -> Rigid:\n \"\"\"\n Scales the translation by a constant factor.\n\n Args:\n trans_scale_factor:\n The constant factor\n Returns:\n A transformation object with a scaled translation.\n \"\"\"\n fn = lambda t: t * trans_scale_factor\n return self.apply_trans_fn(fn)\n\n def stop_rot_gradient(self) -> Rigid:\n \"\"\"\n Detaches the underlying rotation object\n\n Returns:\n A transformation object with detached rotations\n \"\"\"\n fn = lambda r: r.detach()\n return self.apply_rot_fn(fn)\n\n @staticmethod\n def make_transform_from_reference(n_xyz, ca_xyz, c_xyz, eps=1e-20):\n \"\"\"\n Returns a transformation object from reference coordinates.\n \n Note that this method does not take care of symmetries. If you \n provide the atom positions in the non-standard way, the N atom will \n end up not at [-0.527250, 1.359329, 0.0] but instead at \n [-0.527250, -1.359329, 0.0]. You need to take care of such cases in \n your code.\n \n Args:\n n_xyz: A [*, 3] tensor of nitrogen xyz coordinates.\n ca_xyz: A [*, 3] tensor of carbon alpha xyz coordinates.\n c_xyz: A [*, 3] tensor of carbon xyz coordinates.\n Returns:\n A transformation object. After applying the translation and \n rotation to the reference backbone, the coordinates will \n approximately equal to the input coordinates.\n \"\"\" \n translation = -1 * ca_xyz\n n_xyz = n_xyz + translation\n c_xyz = c_xyz + translation\n\n c_x, c_y, c_z = [c_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2)\n sin_c1 = -c_y / norm\n cos_c1 = c_x / norm\n zeros = sin_c1.new_zeros(sin_c1.shape)\n ones = sin_c1.new_ones(sin_c1.shape)\n\n c1_rots = sin_c1.new_zeros((*sin_c1.shape, 3, 3))\n c1_rots[..., 0, 0] = cos_c1\n c1_rots[..., 0, 1] = -1 * sin_c1\n c1_rots[..., 1, 0] = sin_c1\n c1_rots[..., 1, 1] = cos_c1\n c1_rots[..., 2, 2] = 1\n\n norm = torch.sqrt(eps + c_x ** 2 + c_y ** 2 + c_z ** 2)\n sin_c2 = c_z / norm\n cos_c2 = torch.sqrt(c_x ** 2 + c_y ** 2) / norm\n\n c2_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n c2_rots[..., 0, 0] = cos_c2\n c2_rots[..., 0, 2] = sin_c2\n c2_rots[..., 1, 1] = 1\n c2_rots[..., 2, 0] = -1 * sin_c2\n c2_rots[..., 2, 2] = cos_c2\n\n c_rots = rot_matmul(c2_rots, c1_rots)\n n_xyz = rot_vec_mul(c_rots, n_xyz)\n\n _, n_y, n_z = [n_xyz[..., i] for i in range(3)]\n norm = torch.sqrt(eps + n_y ** 2 + n_z ** 2)\n sin_n = -n_z / norm\n cos_n = n_y / norm\n\n n_rots = sin_c2.new_zeros((*sin_c2.shape, 3, 3))\n n_rots[..., 0, 0] = 1\n n_rots[..., 1, 1] = cos_n\n n_rots[..., 1, 2] = -1 * sin_n\n n_rots[..., 2, 1] = sin_n\n n_rots[..., 2, 2] = cos_n\n\n rots = rot_matmul(n_rots, c_rots)\n\n rots = rots.transpose(-1, -2)\n translation = -1 * translation\n\n rot_obj = Rotation(rot_mats=rots, quats=None)\n\n return Rigid(rot_obj, translation)\n\n def cuda(self) -> Rigid:\n \"\"\"\n Moves the transformation object to GPU memory\n \n Returns:\n A version of the transformation on GPU\n \"\"\"\n return Rigid(self._rots.cuda(), self._trans.cuda())" }, { "identifier": "dict_multimap", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def dict_multimap(fn, dicts):\n first = dicts[0]\n new_dict = {}\n for k, v in first.items():\n all_v = [d[k] for d in dicts]\n if type(v) is dict:\n new_dict[k] = dict_multimap(fn, all_v)\n else:\n new_dict[k] = fn(all_v)\n\n return new_dict" }, { "identifier": "permute_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def permute_final_dims(tensor: torch.Tensor, inds: List[int]):\n zero_index = -1 * len(inds)\n first_inds = list(range(len(tensor.shape[:zero_index])))\n return tensor.permute(first_inds + [zero_index + i for i in inds])" }, { "identifier": "flatten_final_dims", "path": "frame2seq/openfold/utils/tensor_utils.py", "snippet": "def flatten_final_dims(t: torch.Tensor, no_dims: int):\n return t.reshape(t.shape[:-no_dims] + (-1,))" } ]
from functools import reduce from operator import mul from typing import Optional, Tuple, Sequence from frame2seq.openfold.model.primitives import Linear, LayerNorm, ipa_point_weights_init_ from frame2seq.openfold.np.residue_constants import ( restype_rigid_group_default_frame, restype_atom14_to_rigid_group, restype_atom14_mask, restype_atom14_rigid_group_positions, ) from frame2seq.openfold.utils.feats import ( frames_and_literature_positions_to_atom14_pos, torsion_angles_to_frames, ) from frame2seq.openfold.utils.precision_utils import is_fp16_enabled from frame2seq.openfold.utils.rigid_utils import Rotation, Rigid from frame2seq.openfold.utils.tensor_utils import ( dict_multimap, permute_final_dims, flatten_final_dims, ) import importlib import math import sys import torch import torch.nn as nn
14,779
# [*, N_res, H * C_hidden] o = flatten_final_dims(o, 2) # [*, H, 3, N_res, P_v] if(inplace_safe): v_pts = permute_final_dims(v_pts, (1, 3, 0, 2)) o_pt = [ torch.matmul(a, v.to(a.dtype)) for v in torch.unbind(v_pts, dim=-3) ] o_pt = torch.stack(o_pt, dim=-3) else: o_pt = torch.sum( ( a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :] ), dim=-2, ) # [*, N_res, H, P_v, 3] o_pt = permute_final_dims(o_pt, (2, 0, 3, 1)) o_pt = r[..., None, None].invert_apply(o_pt) # [*, N_res, H * P_v] o_pt_norm = flatten_final_dims( torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.eps), 2 ) # [*, N_res, H * P_v, 3] o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3) if(_offload_inference): z[0] = z[0].to(o_pt.device) # [*, N_res, H, C_z] o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype)) # [*, N_res, H * C_z] o_pair = flatten_final_dims(o_pair, 2) # [*, N_res, C_s] s = self.linear_out( torch.cat( (o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1 ).to(dtype=z[0].dtype) ) return s class BackboneUpdate(nn.Module): """ Implements part of Algorithm 23. """ def __init__(self, c_s): """ Args: c_s: Single representation channel dimension """ super(BackboneUpdate, self).__init__() self.c_s = c_s self.linear = Linear(self.c_s, 6, init="final") def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: [*, N_res, C_s] single representation Returns: [*, N_res, 6] update vector """ # [*, 6] update = self.linear(s) return update class StructureModuleTransitionLayer(nn.Module): def __init__(self, c): super(StructureModuleTransitionLayer, self).__init__() self.c = c self.linear_1 = Linear(self.c, self.c, init="relu") self.linear_2 = Linear(self.c, self.c, init="relu") self.linear_3 = Linear(self.c, self.c, init="final") self.relu = nn.ReLU() def forward(self, s): s_initial = s s = self.linear_1(s) s = self.relu(s) s = self.linear_2(s) s = self.relu(s) s = self.linear_3(s) s = s + s_initial return s class StructureModuleTransition(nn.Module): def __init__(self, c, num_layers, dropout_rate): super(StructureModuleTransition, self).__init__() self.c = c self.num_layers = num_layers self.dropout_rate = dropout_rate self.layers = nn.ModuleList() for _ in range(self.num_layers): l = StructureModuleTransitionLayer(self.c) self.layers.append(l) self.dropout = nn.Dropout(self.dropout_rate)
# Copyright 2021 AlQuraishi Laboratory # Copyright 2021 DeepMind Technologies Limited # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. attn_core_inplace_cuda = False class AngleResnetBlock(nn.Module): def __init__(self, c_hidden): """ Args: c_hidden: Hidden channel dimension """ super(AngleResnetBlock, self).__init__() self.c_hidden = c_hidden self.linear_1 = Linear(self.c_hidden, self.c_hidden, init="relu") self.linear_2 = Linear(self.c_hidden, self.c_hidden, init="final") self.relu = nn.ReLU() def forward(self, a: torch.Tensor) -> torch.Tensor: s_initial = a a = self.relu(a) a = self.linear_1(a) a = self.relu(a) a = self.linear_2(a) return a + s_initial class AngleResnet(nn.Module): """ Implements Algorithm 20, lines 11-14 """ def __init__(self, c_in, c_hidden, no_blocks, no_angles, epsilon): """ Args: c_in: Input channel dimension c_hidden: Hidden channel dimension no_blocks: Number of resnet blocks no_angles: Number of torsion angles to generate epsilon: Small constant for normalization """ super(AngleResnet, self).__init__() self.c_in = c_in self.c_hidden = c_hidden self.no_blocks = no_blocks self.no_angles = no_angles self.eps = epsilon self.linear_in = Linear(self.c_in, self.c_hidden) self.linear_initial = Linear(self.c_in, self.c_hidden) self.layers = nn.ModuleList() for _ in range(self.no_blocks): layer = AngleResnetBlock(c_hidden=self.c_hidden) self.layers.append(layer) self.linear_out = Linear(self.c_hidden, self.no_angles * 2) self.relu = nn.ReLU() def forward( self, s: torch.Tensor, s_initial: torch.Tensor ) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: s: [*, C_hidden] single embedding s_initial: [*, C_hidden] single embedding as of the start of the StructureModule Returns: [*, no_angles, 2] predicted angles """ # NOTE: The ReLU's applied to the inputs are absent from the supplement # pseudocode but present in the source. For maximal compatibility with # the pretrained weights, I'm going with the source. # [*, C_hidden] s_initial = self.relu(s_initial) s_initial = self.linear_initial(s_initial) s = self.relu(s) s = self.linear_in(s) s = s + s_initial for l in self.layers: s = l(s) s = self.relu(s) # [*, no_angles * 2] s = self.linear_out(s) # [*, no_angles, 2] s = s.view(s.shape[:-1] + (-1, 2)) unnormalized_s = s norm_denom = torch.sqrt( torch.clamp( torch.sum(s ** 2, dim=-1, keepdim=True), min=self.eps, ) ) s = s / norm_denom return unnormalized_s, s class InvariantPointAttention(nn.Module): """ Implements Algorithm 22. """ def __init__( self, c_s: int, c_z: int, c_hidden: int, no_heads: int, no_qk_points: int, no_v_points: int, inf: float = 1e5, eps: float = 1e-8, ): """ Args: c_s: Single representation channel dimension c_z: Pair representation channel dimension c_hidden: Hidden channel dimension no_heads: Number of attention heads no_qk_points: Number of query/key points to generate no_v_points: Number of value points to generate """ super(InvariantPointAttention, self).__init__() self.c_s = c_s self.c_z = c_z self.c_hidden = c_hidden self.no_heads = no_heads self.no_qk_points = no_qk_points self.no_v_points = no_v_points self.inf = inf self.eps = eps # These linear layers differ from their specifications in the # supplement. There, they lack bias and use Glorot initialization. # Here as in the official source, they have bias and use the default # Lecun initialization. hc = self.c_hidden * self.no_heads self.linear_q = Linear(self.c_s, hc) self.linear_kv = Linear(self.c_s, 2 * hc) hpq = self.no_heads * self.no_qk_points * 3 self.linear_q_points = Linear(self.c_s, hpq) hpkv = self.no_heads * (self.no_qk_points + self.no_v_points) * 3 self.linear_kv_points = Linear(self.c_s, hpkv) hpv = self.no_heads * self.no_v_points * 3 self.linear_b = Linear(self.c_z, self.no_heads) self.head_weights = nn.Parameter(torch.zeros((no_heads))) ipa_point_weights_init_(self.head_weights) concat_out_dim = self.no_heads * ( self.c_z + self.c_hidden + self.no_v_points * 4 ) self.linear_out = Linear(concat_out_dim, self.c_s, init="final") self.softmax = nn.Softmax(dim=-1) self.softplus = nn.Softplus() def forward( self, s: torch.Tensor, z: Optional[torch.Tensor], r: Rigid, mask: torch.Tensor, inplace_safe: bool = False, _offload_inference: bool = False, _z_reference_list: Optional[Sequence[torch.Tensor]] = None, attn_drop_rate = 0.0, ) -> torch.Tensor: """ Args: s: [*, N_res, C_s] single representation z: [*, N_res, N_res, C_z] pair representation r: [*, N_res] transformation object mask: [*, N_res] mask Returns: [*, N_res, C_s] single representation update """ if(_offload_inference and inplace_safe): z = _z_reference_list else: z = [z] ####################################### # Generate scalar and point activations ####################################### # [*, N_res, H * C_hidden] q = self.linear_q(s) kv = self.linear_kv(s) # [*, N_res, H, C_hidden] q = q.view(q.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, 2 * C_hidden] kv = kv.view(kv.shape[:-1] + (self.no_heads, -1)) # [*, N_res, H, C_hidden] k, v = torch.split(kv, self.c_hidden, dim=-1) # [*, N_res, H * P_q * 3] q_pts = self.linear_q_points(s) # This is kind of clunky, but it's how the original does it # [*, N_res, H * P_q, 3] q_pts = torch.split(q_pts, q_pts.shape[-1] // 3, dim=-1) q_pts = torch.stack(q_pts, dim=-1) q_pts = r[..., None].apply(q_pts) # [*, N_res, H, P_q, 3] q_pts = q_pts.view( q_pts.shape[:-2] + (self.no_heads, self.no_qk_points, 3) ) # [*, N_res, H * (P_q + P_v) * 3] kv_pts = self.linear_kv_points(s) # [*, N_res, H * (P_q + P_v), 3] kv_pts = torch.split(kv_pts, kv_pts.shape[-1] // 3, dim=-1) kv_pts = torch.stack(kv_pts, dim=-1) kv_pts = r[..., None].apply(kv_pts) # [*, N_res, H, (P_q + P_v), 3] kv_pts = kv_pts.view(kv_pts.shape[:-2] + (self.no_heads, -1, 3)) # [*, N_res, H, P_q/P_v, 3] k_pts, v_pts = torch.split( kv_pts, [self.no_qk_points, self.no_v_points], dim=-2 ) ########################## # Compute attention scores ########################## # [*, N_res, N_res, H] b = self.linear_b(z[0]) if(_offload_inference): assert(sys.getrefcount(z[0]) == 2) z[0] = z[0].cpu() # [*, H, N_res, N_res] if(is_fp16_enabled()): with torch.cuda.amp.autocast(enabled=False): a = torch.matmul( permute_final_dims(q.float(), (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k.float(), (1, 2, 0)), # [*, H, C_hidden, N_res] ) else: a = torch.matmul( permute_final_dims(q, (1, 0, 2)), # [*, H, N_res, C_hidden] permute_final_dims(k, (1, 2, 0)), # [*, H, C_hidden, N_res] ) a *= math.sqrt(1.0 / (3 * self.c_hidden)) a += (math.sqrt(1.0 / 3) * permute_final_dims(b, (2, 0, 1))) # [*, N_res, N_res, H, P_q, 3] pt_att = q_pts.unsqueeze(-4) - k_pts.unsqueeze(-5) if(inplace_safe): pt_att *= pt_att else: pt_att = pt_att ** 2 # [*, N_res, N_res, H, P_q] pt_att = sum(torch.unbind(pt_att, dim=-1)) head_weights = self.softplus(self.head_weights).view( *((1,) * len(pt_att.shape[:-2]) + (-1, 1)) ) head_weights = head_weights * math.sqrt( 1.0 / (3 * (self.no_qk_points * 9.0 / 2)) ) if(inplace_safe): pt_att *= head_weights else: pt_att = pt_att * head_weights # [*, N_res, N_res, H] pt_att = torch.sum(pt_att, dim=-1) * (-0.5) # [*, N_res, N_res] square_mask = mask.unsqueeze(-1) * mask.unsqueeze(-2) square_mask = self.inf * (square_mask - 1) """ Frame2seq implementation of IPA regularization via attention dropout """ if attn_drop_rate > 0.0: random_square_mask = torch.rand(square_mask.shape, device=square_mask.device) random_square_mask = self.inf * -1 * (random_square_mask < attn_drop_rate) square_mask += random_square_mask # [*, H, N_res, N_res] pt_att = permute_final_dims(pt_att, (2, 0, 1)) if(inplace_safe): a += pt_att del pt_att a += square_mask.unsqueeze(-3) # in-place softmax attn_core_inplace_cuda.forward_( a, reduce(mul, a.shape[:-1]), a.shape[-1], ) else: a = a + pt_att a = a + square_mask.unsqueeze(-3) a = self.softmax(a) ################ # Compute output ################ # [*, N_res, H, C_hidden] o = torch.matmul( a, v.transpose(-2, -3).to(dtype=a.dtype) ).transpose(-2, -3) # [*, N_res, H * C_hidden] o = flatten_final_dims(o, 2) # [*, H, 3, N_res, P_v] if(inplace_safe): v_pts = permute_final_dims(v_pts, (1, 3, 0, 2)) o_pt = [ torch.matmul(a, v.to(a.dtype)) for v in torch.unbind(v_pts, dim=-3) ] o_pt = torch.stack(o_pt, dim=-3) else: o_pt = torch.sum( ( a[..., None, :, :, None] * permute_final_dims(v_pts, (1, 3, 0, 2))[..., None, :, :] ), dim=-2, ) # [*, N_res, H, P_v, 3] o_pt = permute_final_dims(o_pt, (2, 0, 3, 1)) o_pt = r[..., None, None].invert_apply(o_pt) # [*, N_res, H * P_v] o_pt_norm = flatten_final_dims( torch.sqrt(torch.sum(o_pt ** 2, dim=-1) + self.eps), 2 ) # [*, N_res, H * P_v, 3] o_pt = o_pt.reshape(*o_pt.shape[:-3], -1, 3) if(_offload_inference): z[0] = z[0].to(o_pt.device) # [*, N_res, H, C_z] o_pair = torch.matmul(a.transpose(-2, -3), z[0].to(dtype=a.dtype)) # [*, N_res, H * C_z] o_pair = flatten_final_dims(o_pair, 2) # [*, N_res, C_s] s = self.linear_out( torch.cat( (o, *torch.unbind(o_pt, dim=-1), o_pt_norm, o_pair), dim=-1 ).to(dtype=z[0].dtype) ) return s class BackboneUpdate(nn.Module): """ Implements part of Algorithm 23. """ def __init__(self, c_s): """ Args: c_s: Single representation channel dimension """ super(BackboneUpdate, self).__init__() self.c_s = c_s self.linear = Linear(self.c_s, 6, init="final") def forward(self, s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Args: [*, N_res, C_s] single representation Returns: [*, N_res, 6] update vector """ # [*, 6] update = self.linear(s) return update class StructureModuleTransitionLayer(nn.Module): def __init__(self, c): super(StructureModuleTransitionLayer, self).__init__() self.c = c self.linear_1 = Linear(self.c, self.c, init="relu") self.linear_2 = Linear(self.c, self.c, init="relu") self.linear_3 = Linear(self.c, self.c, init="final") self.relu = nn.ReLU() def forward(self, s): s_initial = s s = self.linear_1(s) s = self.relu(s) s = self.linear_2(s) s = self.relu(s) s = self.linear_3(s) s = s + s_initial return s class StructureModuleTransition(nn.Module): def __init__(self, c, num_layers, dropout_rate): super(StructureModuleTransition, self).__init__() self.c = c self.num_layers = num_layers self.dropout_rate = dropout_rate self.layers = nn.ModuleList() for _ in range(self.num_layers): l = StructureModuleTransitionLayer(self.c) self.layers.append(l) self.dropout = nn.Dropout(self.dropout_rate)
self.layer_norm = LayerNorm(self.c)
1
2023-12-25 09:29:36+00:00
24k
iKala/ievals
ievals/cli/ieval.py
[ { "identifier": "TGI_Evaluator", "path": "ievals/modules/qa_evaluators/tgi.py", "snippet": "class TGI_Evaluator(Evaluator):\n def __init__(\n self,\n choices,\n k,\n ip_addr,\n model_name,\n systemMessageToken=\"<|im_start|>system\\n\",\n messageEndToken=\"<|im_end|>\",\n assistantMessageToken=\"<|im_start|>assistant\\n\",\n userMessageToken=\"<|im_start|>user\\n\",\n switch_zh_hans=False,\n ):\n super(TGI_Evaluator, self).__init__(choices, model_name, k)\n self.ip_addr = ip_addr\n self.model_name = model_name\n self.userMessageToken = userMessageToken\n self.assistantMessageToken = assistantMessageToken\n self.messageEndToken = messageEndToken\n self.systemMessageToken = systemMessageToken\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n text = \"\"\n for prompt in full_prompt:\n if prompt[\"role\"] == \"system\":\n text += (\n self.systemMessageToken\n + prompt[\"content\"]\n + self.messageEndToken\n )\n elif prompt[\"role\"] == \"user\":\n text += (\n self.userMessageToken + prompt[\"content\"] + self.messageEndToken\n )\n elif prompt[\"role\"] == \"assistant\":\n text += (\n self.assistantMessageToken\n + prompt[\"content\"]\n + self.messageEndToken\n )\n text += self.assistantMessageToken\n if self.converter:\n text = self.converter.convert(text)\n\n while response is None and timeout_counter <= 30:\n try:\n response = requests.post(\n f\"http://{self.ip_addr}/generate\",\n data=json.dumps(\n {\n \"inputs\": text,\n \"parameters\": {\n \"max_new_tokens\": 90,\n \"temperature\": 0.001,\n \"stop\": [self.messageEndToken],\n },\n }\n ),\n headers={\"Content-Type\": \"application/json\"},\n )\n r = response.json()\n if \"generated_text\" not in r:\n raise ValueError(\"not found: \" + str(r))\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.json()[\"generated_text\"].split(\n self.messageEndToken\n )[0]\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]).\",\n r\"答案:([A-D])\",\n r\"([A-D]). \",\n r\"^選([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"^選項([A-D])\",\n r\"答案是\\s?选?项?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案應該是:\\s?选?项?\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"正確答案是([A-D])\",\n r\"正確答案是 ([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "Gemini_Evaluator", "path": "ievals/modules/qa_evaluators/gemini.py", "snippet": "class Gemini_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(Gemini_Evaluator, self).__init__(choices, model_name, k)\n genai.configure(api_key=api_key)\n\n self.model = genai.GenerativeModel(\n model_name,\n safety_settings=[\n {\n \"category\": \"HARM_CATEGORY_HARASSMENT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_HATE_SPEECH\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_SEXUALLY_EXPLICIT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n {\n \"category\": \"HARM_CATEGORY_DANGEROUS_CONTENT\",\n \"threshold\": \"BLOCK_ONLY_HIGH\",\n },\n ],\n )\n\n self.model_name = model_name\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI主力,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI主力,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n text = []\n prev_role = \"\"\n for prompt in full_prompt:\n if prompt[\"role\"] == \"system\":\n text.append(prompt[\"content\"] + \"\\n\")\n elif prompt[\"role\"] == \"user\":\n if prev_role == \"system\":\n text[-1] += \"問題: \" + prompt[\"content\"] + \"\\n\"\n else:\n text.append(\"問題: \" + prompt[\"content\"] + \"\\n\")\n elif prompt[\"role\"] == \"assistant\":\n text.append(prompt[\"content\"] + \"\\n\")\n prev_role = prompt[\"role\"]\n if self.converter:\n text = [self.converter.convert(seg) for seg in text]\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.model.generate_content(text)\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n\n if response == None:\n response_str = \"\"\n else:\n try:\n response_str = response.text\n except (ValueError, IndexError):\n response_str = \"\"\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?选?项?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "Claude_Evaluator", "path": "ievals/modules/qa_evaluators/claude.py", "snippet": "class Claude_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(Claude_Evaluator, self).__init__(choices, model_name, k)\n self.client = anthropic.Anthropic(api_key=api_key)\n self.model_name\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請直接選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請直接選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請直接選出正確的答案。\",\n }\n ]\n\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請直接選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n text = \"\"\n for prompt in full_prompt:\n if prompt[\"role\"] == \"system\":\n text += anthropic.HUMAN_PROMPT + \" \" + prompt[\"content\"]\n elif prompt[\"role\"] == \"user\":\n text += anthropic.HUMAN_PROMPT + \" \" + prompt[\"content\"]\n elif prompt[\"role\"] == \"assistant\":\n text += anthropic.AI_PROMPT + \" \" + prompt[\"content\"]\n text += anthropic.AI_PROMPT\n if self.converter:\n text = self.converter.convert(text)\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.completions.create(\n prompt=text,\n stop_sequences=[anthropic.HUMAN_PROMPT],\n model=self.model_name,\n temperature=0.1,\n max_tokens_to_sample=300,\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.completion\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"正確的答案應該是:.*?\\b([A-D])\\b\",\n r\"正確的選項應為:.*?\\b([A-D])\\b\",\n r\"所以答案為([A-D])\",\n r\"答案為\\s?([A-D])\",\n r\"所以下列方程式的解是([A-D])\",\n r\"选([A-D])\",\n r\"选项([A-D])\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是\\s?选?项?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str, re.DOTALL)\n else:\n break\n return ans_list" }, { "identifier": "Azure_Evaluator", "path": "ievals/modules/qa_evaluators/azure.py", "snippet": "class Azure_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(Azure_Evaluator, self).__init__(choices, model_name, k)\n self.client = AzureOpenAI(\n api_key=api_key,\n api_version=os.getenv(\"AZURE_OPENAI_VERSION\", \"2023-07-01-preview\"),\n azure_endpoint=os.getenv(\"AZURE_OPENAI_ENDPOINT\"),\n )\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n\n prompt += tmp\n\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n\n if self.converter:\n converted = []\n for p in full_prompt:\n p[\"content\"] = self.converter.convert(p[\"content\"])\n converted.append(p)\n full_prompt = converted\n\n response = None\n timeout_counter = 0\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.chat.completions.create(\n model=self.model_name, messages=full_prompt, temperature=0.0\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n\n response_str = \"\"\n if response != None:\n response_str = response.choices[0].message.content\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n if response_str is None:\n response_str = \"\"\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "GPT_Evaluator", "path": "ievals/modules/qa_evaluators/oai_complete.py", "snippet": "class GPT_Evaluator(Evaluator):\n \"\"\"\n Completion endpoint for instruction based model\n davinci, gpt-3.5-instruct\n \"\"\"\n\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(GPT_Evaluator, self).__init__(choices, model_name, k)\n openai.api_key = api_key\n self.client = openai.OpenAI(api_key=api_key)\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n if self.converter:\n converted = []\n for p in full_prompt:\n p[\"content\"] = self.converter.convert(p[\"content\"])\n converted.append(p)\n full_prompt = converted\n\n text = \"\"\n for prompt in full_prompt:\n text += prompt[\"content\"] + \"\\n\"\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.completions.create(\n model=self.model_name, prompt=text, temperature=0.0\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.choices[0].text\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter: # simplified chinese\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "ChatGPT_Evaluator", "path": "ievals/modules/qa_evaluators/chatgpt.py", "snippet": "class ChatGPT_Evaluator(Evaluator):\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(ChatGPT_Evaluator, self).__init__(choices, model_name, k)\n openai.api_key = api_key\n self.client = openai.OpenAI(api_key=api_key)\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n prompt += tmp\n\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n if self.converter: # convert to simplified chinese\n for idx, prompt in enumerate(full_prompt):\n full_prompt[idx][\"content\"] = self.converter.convert(\n prompt[\"content\"]\n )\n\n while response is None and timeout_counter <= 30:\n try:\n response = self.client.chat.completions.create(\n model=self.model_name,\n messages=full_prompt,\n temperature=0.0,\n max_tokens=200,\n )\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n if response == None:\n response_str = \"\"\n else:\n response_str = response.choices[0].message.content\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n # manually found regex which can be used to parse most of the response\n # text\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "DashScope_Evaluator", "path": "ievals/modules/qa_evaluators/ali_dashscope.py", "snippet": "class DashScope_Evaluator(Evaluator):\n \"\"\"\n Completion endpoint for instruction based model\n qwen models\n \"\"\"\n\n def __init__(self, choices, k, api_key, model_name, switch_zh_hans=False):\n super(DashScope_Evaluator, self).__init__(choices, model_name, k)\n dashscope.api_key = api_key\n assert model_name in set(Generation.Models.__dict__.values())\n self.model_name = model_name\n self.converter = None\n if switch_zh_hans:\n self.converter = opencc.OpenCC(\"t2s.json\")\n\n def format_example(self, line, include_answer=True, cot=False):\n example = line[\"question\"]\n for choice in self.choices:\n example += f'\\n{choice}. {line[f\"{choice}\"]}'\n\n example += \"\\n答案:\"\n if include_answer:\n if cot:\n ans = line[\"answer\"]\n content = \"讓我們一步一步思考,\\n\" + line[\"explanation\"] + f\"\\n所以答案是{ans}。\"\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": content},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n {\"role\": \"assistant\", \"content\": line[\"answer\"]},\n ]\n else:\n return [\n {\"role\": \"user\", \"content\": example},\n ]\n\n def generate_few_shot_prompt(self, subject, dev_df, cot=False):\n prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject}考試單選題,請選出正確的答案。\",\n }\n ]\n k = self.k\n if self.k == -1:\n k = dev_df.shape[0]\n for i in range(k):\n tmp = self.format_example(dev_df.iloc[i, :], include_answer=True, cot=cot)\n if i == 0:\n tmp[0][\"content\"] = (\n f\"以下是關於{subject}考試單選題,請選出正確的答案。\\n\\n\" + tmp[0][\"content\"]\n )\n if self.converter:\n tmp[0][\"content\"] = self.converter.convert(tmp[0][\"content\"])\n prompt += tmp\n return prompt\n\n def eval_subject(\n self,\n subject_name,\n test_df,\n dev_df=None,\n few_shot=False,\n save_result_dir=None,\n cot=False,\n ):\n correct_num = 0\n if save_result_dir:\n result = []\n score = []\n if few_shot:\n few_shot_prompt = self.generate_few_shot_prompt(\n subject_name, dev_df, cot=cot\n )\n else:\n few_shot_prompt = [\n {\n \"role\": \"system\",\n \"content\": f\"你是一位專業的中文AI助理,以下是關於{subject_name}考試單選題,請選出正確的答案。\",\n }\n ]\n answers = list(test_df[\"answer\"])\n for row_index, row in tqdm(\n test_df.iterrows(), total=len(test_df), dynamic_ncols=True\n ):\n question = self.format_example(row, include_answer=False)\n full_prompt = few_shot_prompt + question\n if not few_shot:\n full_prompt[-1][\"content\"] = (\n f\"以下是關於{subject_name}考試單選題,請選出正確的答案。\\n\\n\"\n + full_prompt[-1][\"content\"]\n )\n response = None\n timeout_counter = 0\n if self.converter:\n converted = []\n for p in full_prompt:\n p[\"content\"] = self.converter.convert(p[\"content\"])\n converted.append(p)\n full_prompt = converted\n\n text = \"\"\n for prompt in full_prompt:\n text += prompt[\"content\"] + \"\\n\"\n\n while response is None and timeout_counter <= 30:\n try:\n response = Generation.call(model=self.model_name, prompt=text)\n except Exception as msg:\n if \"timeout=600\" in str(msg):\n timeout_counter += 1\n logging.error(msg)\n sleep(5)\n continue\n\n if response.status_code == HTTPStatus.OK:\n response_str = response.output.text\n else:\n response_str = \"\"\n\n if cot:\n ans_list = re.findall(r\"答案是(.+?)。\", response_str)\n if self.converter: # simplified chinese\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案为(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"选项(.+?)是正确的。\", response_str)\n else:\n if len(ans_list) == 0:\n ans_list = re.findall(r\"答案為(.+?)。\", response_str)\n if len(ans_list) == 0:\n ans_list = re.findall(r\"選項(.+?)是正確的。\", response_str)\n\n if len(ans_list) == 0:\n correct = 0\n else:\n if self.exact_match(ans_list[-1], row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n response_str = response_str.strip()\n if few_shot:\n if len(response_str) > 0:\n if self.exact_match(response_str, row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n else:\n if len(response_str) > 0:\n ans_list = self.extract_ans(response_str)\n if len(ans_list) > 0 and (ans_list[-1] == row[\"answer\"]):\n correct_num += 1\n correct = 1\n else:\n correct = 0\n else:\n correct = 0\n if save_result_dir:\n result.append(response_str)\n score.append(correct)\n correct_ratio = 100 * correct_num / len(answers)\n\n if save_result_dir:\n test_df[\"model_output\"] = result\n test_df[\"correctness\"] = score\n test_df.to_csv(\n os.path.join(save_result_dir, f\"{subject_name}_val.csv\"),\n encoding=\"utf-8\",\n index=False,\n )\n return correct_ratio\n\n def extract_ans(self, response_str):\n pattern = [\n r\"([A-D]). \",\n r\"([A-D]).\",\n r\"^選([A-D])\",\n r\"^選項([A-D])\",\n r\"^选([A-D])\",\n r\"^选项([A-D])\",\n r\"答案是\\s?選?項?\\s?([A-D])\",\n r\"答案為\\s?選?項?\\s?([A-D])\",\n r\"答案應為\\s?選?項?\\s?([A-D])\",\n r\"答案为\\s?选?项?\\s?([A-D])\",\n r\"答案应为\\s?选?项?\\s?([A-D])\",\n r\"答案選\\s?選?項?\\s?([A-D])\",\n r\"答案选\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"正確的一項是\\s?([A-D])\",\n r\"正确的一项是\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案是:\\s?選?項?\\s?([A-D])\",\n r\"答案應該是:\\s?選?項?\\s?([A-D])\",\n r\"答案為:\\s?選?項?\\s?([A-D])\",\n r\"答案應為:\\s?選?項?\\s?([A-D])\",\n r\"答案:\\s?選?項?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n r\"答案是:\\s?选?项?\\s?([A-D])\",\n r\"答案应该是:\\s?选?项?\\s?([A-D])\",\n r\"答案为:\\s?选?项?\\s?([A-D])\",\n r\"答案应为:\\s?选?项?\\s?([A-D])\",\n r\"答案:\\s?选?项?\\s?([A-D])\",\n ]\n ans_list = []\n if response_str[0] in [\"A\", \"B\", \"C\", \"D\"]:\n ans_list.append(response_str[0])\n for p in pattern:\n if self.converter:\n p = self.converter.convert(p)\n if len(ans_list) == 0:\n ans_list = re.findall(p, response_str)\n else:\n break\n return ans_list" }, { "identifier": "run_exp", "path": "ievals/exp_executer.py", "snippet": "def run_exp(\n evaluator,\n model_name,\n dataset,\n postfix_name=\"tgi\",\n cache_path=\".cache\",\n split_name=\"test\",\n few_shot=False,\n):\n model_name_path = model_name.replace(\"/\", \"_\")\n save_result_dir = None\n\n if cache_path:\n os.makedirs(f\"{cache_path}\", exist_ok=True)\n os.makedirs(f\"{cache_path}/{model_name_path}\", exist_ok=True)\n save_result_dir = f\"{cache_path}/{model_name_path}\"\n\n task_list, subject2name, subject2category = get_exp_setting(dataset)\n postfix = model_name.split(\"/\")[-1]\n prefix_name = dataset.split(\"/\")[-1]\n result_cache = f\"{prefix_name}_{postfix_name}.tsv\"\n if os.path.exists(result_cache):\n logging.info(f\"Found previous cache {result_cache}, skipping executed subjects\")\n df = pd.read_csv(result_cache, delimiter=\"\\t\", header=None)\n df.columns = [\"model_name\", \"subject\", \"score\"]\n finished_subjects = df[\"subject\"].tolist()\n task_list = [t for t in task_list if t not in finished_subjects]\n\n output_filename = \"\"\n # TODO: absract out the dataset-task logic, as this is likely\n # limited under multi subject task only\n for task in task_list:\n zh_name = subject2name[task]\n test = load_dataset(dataset, task)[split_name]\n test_df = pd.DataFrame([dict(row) for row in test])\n dev = load_dataset(dataset, task)[\"train\"]\n dev_df = pd.DataFrame([dict(row) for row in dev])\n\n accuracy = evaluator.eval_subject(\n zh_name,\n test_df,\n dev_df=dev_df,\n few_shot=few_shot,\n save_result_dir=f\"{cache_path}/{model_name_path}\",\n )\n\n with open(result_cache, \"a\") as fout:\n fout.write(\"{}\\t{}\\t{:.5f}\\n\".format(model_name, task, accuracy))\n\n df = pd.read_csv(result_cache, delimiter=\"\\t\", header=None)\n df.columns = [\"model_name\", \"subject\", \"score\"]\n for model_name in df[\"model_name\"].unique():\n print(model_name)" } ]
import os import logging import argparse import pandas as pd from datasets import load_dataset from ievals.modules.qa_evaluators.tgi import TGI_Evaluator from ievals.modules.qa_evaluators.gemini import Gemini_Evaluator from ievals.modules.qa_evaluators.claude import Claude_Evaluator from ievals.modules.qa_evaluators.azure import Azure_Evaluator from ievals.modules.qa_evaluators.oai_complete import GPT_Evaluator from ievals.modules.qa_evaluators.chatgpt import ChatGPT_Evaluator from ievals.modules.qa_evaluators.hf_chat import HF_Chat_Evaluator from ievals.modules.qa_evaluators.hf_base import ( Qwen_Evaluator, ) # we only use this for qwen base model from ievals.modules.qa_evaluators.ali_dashscope import DashScope_Evaluator from ievals.exp_executer import run_exp
19,443
""" CLI for all models Support mode: if tgi service was used you must pass in IP and hostname if the service was found in model_config.csv you could skip providing the 4 tokens (user, assistant, system, eos) else you need to pass in the four token in args """ try: except ImportError as e: logging.error("huggingface and qwen models are not supported due to " + str(e)) def get_model_config(): current_dir = os.path.dirname(os.path.abspath(__file__)) up_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) df = pd.read_csv(os.path.join(up_dir, "model_config.csv")) df.fillna("", inplace=True) valid_model_names = df["model_name"].tolist() return valid_model_names, df def get_tgi_prompt_config(model_name): valid_model_names, df = get_model_config() if model_name not in valid_model_names: return None, None prompt_config = df[df["model_name"] == model_name].iloc[0] prompt_config.pop("model_name") return prompt_config def get_evaluator(model_name, series=""): if len(series): if series == "azure": return Azure_Evaluator elif series == "openai_chat": return ChatGPT_Evaluator elif series == "openai_complete":
""" CLI for all models Support mode: if tgi service was used you must pass in IP and hostname if the service was found in model_config.csv you could skip providing the 4 tokens (user, assistant, system, eos) else you need to pass in the four token in args """ try: except ImportError as e: logging.error("huggingface and qwen models are not supported due to " + str(e)) def get_model_config(): current_dir = os.path.dirname(os.path.abspath(__file__)) up_dir = os.path.abspath(os.path.join(current_dir, os.pardir)) df = pd.read_csv(os.path.join(up_dir, "model_config.csv")) df.fillna("", inplace=True) valid_model_names = df["model_name"].tolist() return valid_model_names, df def get_tgi_prompt_config(model_name): valid_model_names, df = get_model_config() if model_name not in valid_model_names: return None, None prompt_config = df[df["model_name"] == model_name].iloc[0] prompt_config.pop("model_name") return prompt_config def get_evaluator(model_name, series=""): if len(series): if series == "azure": return Azure_Evaluator elif series == "openai_chat": return ChatGPT_Evaluator elif series == "openai_complete":
return GPT_Evaluator
4
2023-12-24 08:00:38+00:00
24k
kraina-ai/quackosm
quackosm/functions.py
[ { "identifier": "GroupedOsmTagsFilter", "path": "quackosm/_osm_tags_filters.py", "snippet": "def merge_osm_tags_filter(osm_tags_filter: OsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: GroupedOsmTagsFilter) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: Iterable[OsmTagsFilter]) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(osm_tags_filter: Iterable[GroupedOsmTagsFilter]) -> OsmTagsFilter: ...\ndef merge_osm_tags_filter(\n osm_tags_filter: Union[\n OsmTagsFilter, GroupedOsmTagsFilter, Iterable[OsmTagsFilter], Iterable[GroupedOsmTagsFilter]\n ]\n) -> OsmTagsFilter:\ndef _merge_grouped_osm_tags_filter(grouped_filter: GroupedOsmTagsFilter) -> OsmTagsFilter:\ndef _merge_multiple_osm_tags_filters(osm_tags_filters: Iterable[OsmTagsFilter]) -> OsmTagsFilter:" }, { "identifier": "OsmWayPolygonConfig", "path": "quackosm/_osm_way_polygon_features.py", "snippet": "class OsmWayPolygonConfig(NamedTuple):\n \"\"\"OSM Way polygon features config object.\"\"\"\n\n all: Iterable[str]\n allowlist: dict[str, Iterable[str]]\n denylist: dict[str, Iterable[str]]" }, { "identifier": "PbfFileReader", "path": "quackosm/pbf_file_reader.py", "snippet": "class PbfFileReader:\n \"\"\"\n PbfFileReader.\n\n PBF(Protocolbuffer Binary Format)[1] file reader is a dedicated `*.osm.pbf` files reader\n class based on DuckDB[2] and its spatial extension[3].\n\n Handler can filter out OSM features based on tags filter and geometry filter\n to limit the result.\n\n References:\n 1. https://wiki.openstreetmap.org/wiki/PBF_Format\n 2. https://duckdb.org/\n 3. https://github.com/duckdb/duckdb_spatial\n \"\"\"\n\n class ConvertedOSMParquetFiles(NamedTuple):\n \"\"\"List of parquet files read from the `*.osm.pbf` file.\"\"\"\n\n nodes_valid_with_tags: \"duckdb.DuckDBPyRelation\"\n nodes_filtered_ids: \"duckdb.DuckDBPyRelation\"\n\n ways_all_with_tags: \"duckdb.DuckDBPyRelation\"\n ways_with_unnested_nodes_refs: \"duckdb.DuckDBPyRelation\"\n ways_required_ids: \"duckdb.DuckDBPyRelation\"\n ways_filtered_ids: \"duckdb.DuckDBPyRelation\"\n\n relations_all_with_tags: \"duckdb.DuckDBPyRelation\"\n relations_with_unnested_way_refs: \"duckdb.DuckDBPyRelation\"\n relations_filtered_ids: \"duckdb.DuckDBPyRelation\"\n\n class ParsedOSMFeatures(NamedTuple):\n \"\"\"Final list of parsed features from the `*.osm.pbf` file.\"\"\"\n\n nodes: \"duckdb.DuckDBPyRelation\"\n ways: \"duckdb.DuckDBPyRelation\"\n relations: \"duckdb.DuckDBPyRelation\"\n\n def __init__(\n self,\n tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None,\n geometry_filter: Optional[BaseGeometry] = None,\n working_directory: Union[str, Path] = \"files\",\n osm_way_polygon_features_config: Optional[\n Union[OsmWayPolygonConfig, dict[str, Any]]\n ] = None,\n ) -> None:\n \"\"\"\n Initialize PbfFileReader.\n\n Args:\n tags_filter (Union[OsmTagsFilter, GroupedOsmTagsFilter], optional): A dictionary\n specifying which tags to download.\n The keys should be OSM tags (e.g. `building`, `amenity`).\n The values should either be `True` for retrieving all objects with the tag,\n string for retrieving a single tag-value pair\n or list of strings for retrieving all values specified in the list.\n `tags={'leisure': 'park}` would return parks from the area.\n `tags={'leisure': 'park, 'amenity': True, 'shop': ['bakery', 'bicycle']}`\n would return parks, all amenity types, bakeries and bicycle shops.\n If `None`, handler will allow all of the tags to be parsed. Defaults to `None`.\n geometry_filter (BaseGeometry, optional): Region which can be used to filter only\n intersecting OSM objects. Defaults to `None`.\n working_directory (Union[str, Path], optional): Directory where to save\n the parsed `*.parquet` files. Defaults to \"files\".\n osm_way_polygon_features_config (Union[OsmWayPolygonConfig, dict[str, Any]], optional):\n Config used to determine which closed way features are polygons.\n Modifications to this config left are left for experienced OSM users.\n Defaults to predefined \"osm_way_polygon_features.json\".\n \"\"\"\n self.tags_filter = tags_filter\n self.merged_tags_filter = merge_osm_tags_filter(tags_filter) if tags_filter else None\n self.geometry_filter = geometry_filter\n self.working_directory = Path(working_directory)\n self.working_directory.mkdir(parents=True, exist_ok=True)\n self.connection: duckdb.DuckDBPyConnection = None\n\n self.rows_per_bucket = 1_000_000\n memory = psutil.virtual_memory()\n # If less than 8 / 16 GB total memory, reduce number of rows per group\n if memory.total < (8 * (1024**3)):\n self.rows_per_bucket = 100_000\n elif memory.total < (16 * (1024**3)):\n self.rows_per_bucket = 500_000\n\n if osm_way_polygon_features_config is None:\n # Config based on two sources + manual OSM wiki check\n # 1. https://github.com/tyrasd/osm-polygon-features/blob/v0.9.2/polygon-features.json\n # 2. https://github.com/ideditor/id-area-keys/blob/v5.0.1/areaKeys.json\n osm_way_polygon_features_config = json.loads(\n (Path(__file__).parent / \"osm_way_polygon_features.json\").read_text()\n )\n\n self.osm_way_polygon_features_config: OsmWayPolygonConfig = (\n osm_way_polygon_features_config\n if isinstance(osm_way_polygon_features_config, OsmWayPolygonConfig)\n else parse_dict_to_config_object(osm_way_polygon_features_config)\n )\n\n def get_features_gdf(\n self,\n file_paths: Union[str, Path, Iterable[Union[str, Path]]],\n explode_tags: Optional[bool] = None,\n ignore_cache: bool = False,\n filter_osm_ids: Optional[list[str]] = None,\n ) -> gpd.GeoDataFrame:\n \"\"\"\n Get features GeoDataFrame from a list of PBF files.\n\n Function parses multiple PBF files and returns a single GeoDataFrame with parsed\n OSM objects.\n\n Args:\n file_paths (Union[str, Path, Iterable[Union[str, Path]]]):\n Path or list of paths of `*.osm.pbf` files to be parsed.\n explode_tags (bool, optional): Whether to split tags into columns based on OSM tag keys.\n If `None`, will be set based on `tags_filter` parameter.\n If no tags filter is provided, then `explode_tags` will set to `False`,\n if there is tags filter it will set to `True`. Defaults to `None`.\n ignore_cache: (bool, optional): Whether to ignore precalculated geoparquet files or not.\n Defaults to False.\n filter_osm_ids: (list[str], optional): List of OSM features ids to read from the file.\n Have to be in the form of 'node/<id>', 'way/<id>' or 'relation/<id>'.\n Defaults to an empty list.\n\n Returns:\n gpd.GeoDataFrame: GeoDataFrame with OSM features.\n \"\"\"\n if isinstance(file_paths, (str, Path)):\n file_paths = [file_paths]\n\n if filter_osm_ids is None:\n filter_osm_ids = []\n\n if explode_tags is None:\n explode_tags = self.tags_filter is not None\n\n parsed_geoparquet_files = []\n for file_path in file_paths:\n parsed_geoparquet_file = self.convert_pbf_to_gpq(\n file_path,\n explode_tags=explode_tags,\n ignore_cache=ignore_cache,\n filter_osm_ids=filter_osm_ids,\n )\n parsed_geoparquet_files.append(parsed_geoparquet_file)\n\n parquet_tables = [\n io.read_geoparquet_table(parsed_parquet_file) # type: ignore\n for parsed_parquet_file in parsed_geoparquet_files\n ]\n joined_parquet_table: pa.Table = pa.concat_tables(parquet_tables)\n gdf_parquet = gpd.GeoDataFrame(\n data=joined_parquet_table.drop(GEOMETRY_COLUMN).to_pandas(maps_as_pydicts=\"strict\"),\n geometry=ga.to_geopandas(joined_parquet_table.column(GEOMETRY_COLUMN)),\n ).set_index(FEATURES_INDEX)\n\n return gdf_parquet\n\n def convert_pbf_to_gpq(\n self,\n pbf_path: Union[str, Path],\n result_file_path: Optional[Union[str, Path]] = None,\n explode_tags: Optional[bool] = None,\n ignore_cache: bool = False,\n filter_osm_ids: Optional[list[str]] = None,\n ) -> Path:\n \"\"\"\n Convert PBF file to GeoParquet file.\n\n Args:\n pbf_path (Union[str, Path]): Pbf file to be parsed to GeoParquet.\n result_file_path (Union[str, Path], optional): Where to save\n the geoparquet file. If not provided, will be generated based on hashes\n from provided tags filter and geometry filter. Defaults to `None`.\n explode_tags (bool, optional): Whether to split tags into columns based on OSM tag keys.\n If `None`, will be set based on `tags_filter` parameter.\n If no tags filter is provided, then `explode_tags` will set to `False`,\n if there is tags filter it will set to `True`. Defaults to `None`.\n ignore_cache (bool, optional): Whether to ignore precalculated geoparquet files or not.\n Defaults to False.\n filter_osm_ids: (list[str], optional): List of OSM features ids to read from the file.\n Have to be in the form of 'node/<id>', 'way/<id>' or 'relation/<id>'.\n Defaults to an empty list.\n\n Returns:\n Path: Path to the generated GeoParquet file.\n \"\"\"\n if filter_osm_ids is None:\n filter_osm_ids = []\n\n if explode_tags is None:\n explode_tags = self.tags_filter is not None\n\n with tempfile.TemporaryDirectory(dir=self.working_directory.resolve()) as tmp_dir_name:\n try:\n self._set_up_duckdb_connection(tmp_dir_name)\n result_file_path = result_file_path or self._generate_geoparquet_result_file_path(\n pbf_path,\n filter_osm_ids=filter_osm_ids,\n explode_tags=explode_tags,\n )\n parsed_geoparquet_file = self._parse_pbf_file(\n pbf_path=pbf_path,\n tmp_dir_name=tmp_dir_name,\n result_file_path=Path(result_file_path),\n filter_osm_ids=filter_osm_ids,\n explode_tags=explode_tags,\n ignore_cache=ignore_cache,\n )\n return parsed_geoparquet_file\n finally:\n if self.connection is not None:\n self.connection.close()\n self.connection = None\n\n def _set_up_duckdb_connection(self, tmp_dir_name: str) -> None:\n self.connection = duckdb.connect(\n database=str(Path(tmp_dir_name) / \"db.duckdb\"),\n config=dict(preserve_insertion_order=False),\n )\n for extension_name in (\"parquet\", \"spatial\"):\n self.connection.install_extension(extension_name)\n self.connection.load_extension(extension_name)\n\n self.connection.sql(\"\"\"\n CREATE OR REPLACE MACRO linestring_to_linestring_wkt(ls) AS\n 'LINESTRING (' || array_to_string([pt.x || ' ' || pt.y for pt in ls], ', ') || ')';\n \"\"\")\n self.connection.sql(\"\"\"\n CREATE OR REPLACE MACRO linestring_to_polygon_wkt(ls) AS\n 'POLYGON ((' || array_to_string([pt.x || ' ' || pt.y for pt in ls], ', ') || '))';\n \"\"\")\n\n def _parse_pbf_file(\n self,\n pbf_path: Union[str, Path],\n tmp_dir_name: str,\n result_file_path: Path,\n filter_osm_ids: list[str],\n explode_tags: bool = True,\n ignore_cache: bool = False,\n ) -> Path:\n if not result_file_path.exists() or ignore_cache:\n elements = self.connection.sql(f\"SELECT * FROM ST_READOSM('{Path(pbf_path)}');\")\n converted_osm_parquet_files = self._prefilter_elements_ids(\n elements, tmp_dir_name, filter_osm_ids\n )\n\n self._delete_directories(\n tmp_dir_name,\n [\n \"nodes_filtered_non_distinct_ids\",\n \"nodes_prepared_ids\",\n \"ways_valid_ids\",\n \"ways_filtered_non_distinct_ids\",\n \"relations_valid_ids\",\n \"relations_ids\",\n ],\n )\n\n filtered_nodes_with_geometry = self._get_filtered_nodes_with_geometry(\n converted_osm_parquet_files, tmp_dir_name\n )\n self._delete_directories(tmp_dir_name, \"nodes_filtered_ids\")\n\n ways_refs_with_nodes_structs = self._get_ways_refs_with_nodes_structs(\n converted_osm_parquet_files, tmp_dir_name\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"nodes_valid_with_tags\",\n ],\n )\n\n filtered_ways_with_linestrings = self._get_filtered_ways_with_linestrings(\n osm_parquet_files=converted_osm_parquet_files,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n tmp_dir_name=tmp_dir_name,\n )\n required_ways_with_linestrings = self._get_required_ways_with_linestrings(\n osm_parquet_files=converted_osm_parquet_files,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n tmp_dir_name=tmp_dir_name,\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"ways_required_grouped\",\n \"ways_required_ids\",\n \"ways_with_unnested_nodes_refs\",\n \"ways_refs_with_nodes_structs\",\n \"required_ways_ids_grouped\",\n \"required_ways_grouped\",\n \"required_ways_tmp\",\n \"filtered_ways_ids_grouped\",\n \"filtered_ways_grouped\",\n \"filtered_ways_tmp\",\n ],\n )\n\n filtered_ways_with_proper_geometry = self._get_filtered_ways_with_proper_geometry(\n converted_osm_parquet_files, filtered_ways_with_linestrings, tmp_dir_name\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"ways_prepared_ids\",\n \"ways_filtered_ids\",\n \"ways_all_with_tags\",\n \"filtered_ways_with_linestrings\",\n ],\n )\n\n filtered_relations_with_geometry = self._get_filtered_relations_with_geometry(\n converted_osm_parquet_files, required_ways_with_linestrings, tmp_dir_name\n )\n self._delete_directories(\n tmp_dir_name,\n [\n \"relations_all_with_tags\",\n \"relations_with_unnested_way_refs\",\n \"relations_filtered_ids\",\n \"required_ways_with_linestrings\",\n \"valid_relation_parts\",\n \"relation_inner_parts\",\n \"relation_outer_parts\",\n \"relation_outer_parts_with_holes\",\n \"relation_outer_parts_without_holes\",\n ],\n )\n\n self._concatenate_results_to_geoparquet(\n PbfFileReader.ParsedOSMFeatures(\n nodes=filtered_nodes_with_geometry,\n ways=filtered_ways_with_proper_geometry,\n relations=filtered_relations_with_geometry,\n ),\n tmp_dir_name=tmp_dir_name,\n save_file_path=result_file_path,\n explode_tags=explode_tags,\n )\n\n return result_file_path\n\n def _generate_geoparquet_result_file_path(\n self,\n pbf_file_path: Union[str, Path],\n explode_tags: bool,\n filter_osm_ids: list[str],\n ) -> Path:\n pbf_file_name = Path(pbf_file_path).name.removesuffix(\".osm.pbf\")\n\n osm_filter_tags_hash_part = \"nofilter\"\n if self.tags_filter is not None:\n h = hashlib.new(\"sha256\")\n h.update(json.dumps(self.tags_filter).encode())\n osm_filter_tags_hash_part = h.hexdigest()\n\n clipping_geometry_hash_part = \"noclip\"\n if self.geometry_filter is not None:\n h = hashlib.new(\"sha256\")\n h.update(wktlib.dumps(self.geometry_filter).encode())\n clipping_geometry_hash_part = h.hexdigest()\n\n exploded_tags_part = \"exploded\" if explode_tags else \"compact\"\n\n filter_osm_ids_hash_part = \"\"\n if filter_osm_ids:\n h = hashlib.new(\"sha256\")\n h.update(json.dumps(sorted(set(filter_osm_ids))).encode())\n filter_osm_ids_hash_part = f\"_{h.hexdigest()}\"\n\n result_file_name = (\n f\"{pbf_file_name}_{osm_filter_tags_hash_part}\"\n f\"_{clipping_geometry_hash_part}_{exploded_tags_part}{filter_osm_ids_hash_part}.geoparquet\"\n )\n return Path(self.working_directory) / result_file_name\n\n def _prefilter_elements_ids(\n self, elements: \"duckdb.DuckDBPyRelation\", tmp_dir_name: str, filter_osm_ids: list[str]\n ) -> ConvertedOSMParquetFiles:\n sql_filter = self._generate_osm_tags_sql_filter()\n filtered_tags_clause = self._generate_filtered_tags_clause()\n\n is_intersecting = self.geometry_filter is not None\n\n with TaskProgressSpinner(\"Reading nodes\", \"1\"):\n # NODES - VALID (NV)\n # - select all with kind = 'node'\n # - select all with lat and lon not empty\n nodes_valid_with_tags = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT\n id,\n {filtered_tags_clause},\n lon,\n lat\n FROM ({elements.sql_query()})\n WHERE kind = 'node'\n AND lat IS NOT NULL AND lon IS NOT NULL\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_valid_with_tags\",\n )\n # NODES - INTERSECTING (NI)\n # - select all from NV which intersect given geometry filter\n # NODES - FILTERED (NF)\n # - select all from NI with tags filter\n filter_osm_node_ids_filter = self._generate_elements_filter(filter_osm_ids, \"node\")\n if is_intersecting:\n wkt = cast(BaseGeometry, self.geometry_filter).wkt\n intersection_filter = f\"ST_Intersects(ST_Point(lon, lat), ST_GeomFromText('{wkt}'))\"\n with TaskProgressSpinner(\"Filtering nodes - intersection\", \"2\"):\n nodes_intersecting_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT DISTINCT id FROM ({nodes_valid_with_tags.sql_query()}) n\n WHERE {intersection_filter} = true\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_intersecting_ids\",\n )\n with TaskProgressSpinner(\"Filtering nodes - tags\", \"3\"):\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({nodes_valid_with_tags.sql_query()}) n\n SEMI JOIN ({nodes_intersecting_ids.sql_query()}) ni ON n.id = ni.id\n WHERE tags IS NOT NULL AND cardinality(tags) > 0 AND ({sql_filter})\n AND ({filter_osm_node_ids_filter})\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_filtered_non_distinct_ids\",\n )\n else:\n with TaskProgressSpinner(\"Filtering nodes - intersection\", \"2\"):\n pass\n with TaskProgressSpinner(\"Filtering nodes - tags\", \"3\"):\n nodes_intersecting_ids = nodes_valid_with_tags\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({nodes_valid_with_tags.sql_query()}) n\n WHERE tags IS NOT NULL AND cardinality(tags) > 0 AND ({sql_filter})\n AND ({filter_osm_node_ids_filter})\n \"\"\",\n file_path=Path(tmp_dir_name) / \"nodes_filtered_non_distinct_ids\",\n )\n with TaskProgressSpinner(\"Calculating distinct filtered nodes ids\", \"4\"):\n nodes_filtered_ids = self._calculate_unique_ids_to_parquet(\n Path(tmp_dir_name) / \"nodes_filtered_non_distinct_ids\",\n Path(tmp_dir_name) / \"nodes_filtered_ids\",\n )\n\n with TaskProgressSpinner(\"Reading ways\", \"5\"):\n # WAYS - VALID (WV)\n # - select all with kind = 'way'\n # - select all with more then one ref\n # - join all NV to refs\n # - select all where all refs has been joined (total_refs == found_refs)\n self.connection.sql(f\"\"\"\n SELECT *\n FROM ({elements.sql_query()}) w\n WHERE kind = 'way' AND len(refs) >= 2\n \"\"\").to_view(\"ways\", replace=True)\n ways_all_with_tags = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH filtered_tags AS (\n SELECT id, {filtered_tags_clause}, tags as raw_tags\n FROM ways w\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n )\n SELECT id, tags, raw_tags\n FROM filtered_tags\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_all_with_tags\",\n )\n with TaskProgressSpinner(\"Unnesting ways\", \"6\"):\n ways_with_unnested_nodes_refs = self._sql_to_parquet_file(\n sql_query=\"\"\"\n SELECT w.id, UNNEST(refs) as ref, UNNEST(range(length(refs))) as ref_idx\n FROM ways w\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_with_unnested_nodes_refs\",\n )\n with TaskProgressSpinner(\"Filtering ways - valid refs\", \"7\"):\n ways_valid_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH total_ways_with_nodes_refs AS (\n SELECT id, ref\n FROM ({ways_with_unnested_nodes_refs.sql_query()})\n ),\n unmatched_ways_with_nodes_refs AS (\n SELECT id, ref\n FROM ({ways_with_unnested_nodes_refs.sql_query()}) w\n ANTI JOIN ({nodes_valid_with_tags.sql_query()}) nv ON nv.id = w.ref\n )\n SELECT DISTINCT id\n FROM total_ways_with_nodes_refs\n EXCEPT\n SELECT DISTINCT id\n FROM unmatched_ways_with_nodes_refs\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_valid_ids\",\n )\n\n with TaskProgressSpinner(\"Filtering ways - intersection\", \"8\"):\n # WAYS - INTERSECTING (WI)\n # - select all from WV with joining any from NV on ref\n if is_intersecting:\n ways_intersecting_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT DISTINCT uwr.id\n FROM ({ways_with_unnested_nodes_refs.sql_query()}) uwr\n SEMI JOIN ({ways_valid_ids.sql_query()}) wv ON uwr.id = wv.id\n SEMI JOIN ({nodes_intersecting_ids.sql_query()}) n ON n.id = uwr.ref\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_intersecting_ids\",\n )\n else:\n ways_intersecting_ids = ways_valid_ids\n with TaskProgressSpinner(\"Filtering ways - tags\", \"9\"):\n # WAYS - FILTERED (WF)\n # - select all from WI with tags filter\n filter_osm_way_ids_filter = self._generate_elements_filter(filter_osm_ids, \"way\")\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({ways_all_with_tags.sql_query()}) w\n SEMI JOIN ({ways_intersecting_ids.sql_query()}) wi ON w.id = wi.id\n WHERE ({sql_filter}) AND ({filter_osm_way_ids_filter})\n \"\"\",\n file_path=Path(tmp_dir_name) / \"ways_filtered_non_distinct_ids\",\n )\n\n with TaskProgressSpinner(\"Calculating distinct filtered ways ids\", \"10\"):\n ways_filtered_ids = self._calculate_unique_ids_to_parquet(\n Path(tmp_dir_name) / \"ways_filtered_non_distinct_ids\",\n Path(tmp_dir_name) / \"ways_filtered_ids\",\n )\n\n with TaskProgressSpinner(\"Reading relations\", \"11\"):\n # RELATIONS - VALID (RV)\n # - select all with kind = 'relation'\n # - select all with more then one ref\n # - select all with type in ['boundary', 'multipolygon']\n # - join all WV to refs\n # - select all where all refs has been joined (total_refs == found_refs)\n self.connection.sql(f\"\"\"\n SELECT *\n FROM ({elements.sql_query()})\n WHERE kind = 'relation' AND len(refs) > 0\n AND list_contains(map_keys(tags), 'type')\n AND list_has_any(map_extract(tags, 'type'), ['boundary', 'multipolygon'])\n \"\"\").to_view(\"relations\", replace=True)\n relations_all_with_tags = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH filtered_tags AS (\n SELECT id, {filtered_tags_clause}\n FROM relations r\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n )\n SELECT id, tags\n FROM filtered_tags\n WHERE tags IS NOT NULL AND cardinality(tags) > 0\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_all_with_tags\",\n )\n\n with TaskProgressSpinner(\"Unnesting relations\", \"12\"):\n relations_with_unnested_way_refs = self._sql_to_parquet_file(\n sql_query=\"\"\"\n WITH unnested_relation_refs AS (\n SELECT\n r.id,\n UNNEST(refs) as ref,\n UNNEST(ref_types) as ref_type,\n UNNEST(ref_roles) as ref_role,\n UNNEST(range(length(refs))) as ref_idx\n FROM relations r\n )\n SELECT id, ref, ref_role, ref_idx\n FROM unnested_relation_refs\n WHERE ref_type = 'way'\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_with_unnested_way_refs\",\n )\n\n with TaskProgressSpinner(\"Filtering relations - valid refs\", \"13\"):\n relations_valid_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n WITH total_relation_refs AS (\n SELECT id, ref\n FROM ({relations_with_unnested_way_refs.sql_query()}) frr\n ),\n unmatched_relation_refs AS (\n SELECT id, ref\n FROM ({relations_with_unnested_way_refs.sql_query()}) r\n ANTI JOIN ({ways_valid_ids.sql_query()}) wv ON wv.id = r.ref\n )\n SELECT DISTINCT id\n FROM total_relation_refs\n EXCEPT\n SELECT DISTINCT id\n FROM unmatched_relation_refs\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_valid_ids\",\n )\n\n with TaskProgressSpinner(\"Filtering relations - intersection\", \"14\"):\n # RELATIONS - INTERSECTING (RI)\n # - select all from RW with joining any from RV on ref\n if is_intersecting:\n relations_intersecting_ids = self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT frr.id\n FROM ({relations_with_unnested_way_refs.sql_query()}) frr\n SEMI JOIN ({relations_valid_ids.sql_query()}) rv ON frr.id = rv.id\n SEMI JOIN ({ways_intersecting_ids.sql_query()}) wi ON wi.id = frr.ref\n \"\"\",\n file_path=Path(tmp_dir_name) / \"relations_intersecting_ids\",\n )\n else:\n relations_intersecting_ids = relations_valid_ids\n\n with TaskProgressSpinner(\"Filtering relations - tags\", \"15\"):\n # RELATIONS - FILTERED (RF)\n # - select all from RI with tags filter\n filter_osm_relation_ids_filter = self._generate_elements_filter(\n filter_osm_ids, \"relation\"\n )\n\n relations_ids_path = Path(tmp_dir_name) / \"relations_ids\"\n relations_ids_path.mkdir(parents=True, exist_ok=True)\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT id FROM ({relations_all_with_tags.sql_query()}) r\n SEMI JOIN ({relations_intersecting_ids.sql_query()}) ri ON r.id = ri.id\n WHERE ({sql_filter}) AND ({filter_osm_relation_ids_filter})\n \"\"\",\n file_path=relations_ids_path / \"filtered\",\n )\n\n with TaskProgressSpinner(\"Calculating distinct filtered relations ids\", \"16\"):\n relations_filtered_ids = self._calculate_unique_ids_to_parquet(\n relations_ids_path / \"filtered\", Path(tmp_dir_name) / \"relations_filtered_ids\"\n )\n\n ways_prepared_ids_path = Path(tmp_dir_name) / \"ways_prepared_ids\"\n ways_prepared_ids_path.mkdir(parents=True, exist_ok=True)\n\n with TaskProgressSpinner(\"Loading required ways - by relations\", \"17\"):\n # WAYS - REQUIRED (WR)\n # - required - all IDs from WF\n # + all needed to construct relations from RF\n self._sql_to_parquet_file(\n sql_query=f\"\"\"\n SELECT ref as id\n FROM ({relations_with_unnested_way_refs.sql_query()}) frr\n SEMI JOIN ({relations_filtered_ids.sql_query()}) fri ON fri.id = frr.id\n \"\"\",\n file_path=ways_prepared_ids_path / \"required_by_relations\",\n )\n\n with TaskProgressSpinner(\"Calculating distinct required ways ids\", \"18\"):\n ways_required_ids = self._calculate_unique_ids_to_parquet(\n ways_prepared_ids_path, Path(tmp_dir_name) / \"ways_required_ids\"\n )\n\n return PbfFileReader.ConvertedOSMParquetFiles(\n nodes_valid_with_tags=nodes_valid_with_tags,\n nodes_filtered_ids=nodes_filtered_ids,\n ways_all_with_tags=ways_all_with_tags,\n ways_with_unnested_nodes_refs=ways_with_unnested_nodes_refs,\n ways_required_ids=ways_required_ids,\n ways_filtered_ids=ways_filtered_ids,\n relations_all_with_tags=relations_all_with_tags,\n relations_with_unnested_way_refs=relations_with_unnested_way_refs,\n relations_filtered_ids=relations_filtered_ids,\n )\n\n def _delete_directories(\n self, tmp_dir_name: Union[Path, str], directories: Union[str, list[str]]\n ) -> None:\n if isinstance(directories, str):\n directories = [directories]\n for directory in directories:\n directory_path = Path(tmp_dir_name) / directory\n if not directory_path.exists():\n continue\n shutil.rmtree(directory_path)\n\n def _generate_osm_tags_sql_filter(self) -> str:\n \"\"\"Prepare features filter clauses based on tags filter.\"\"\"\n filter_clauses = [\"(1=1)\"]\n\n if self.merged_tags_filter:\n filter_clauses.clear()\n\n for filter_tag_key, filter_tag_value in self.merged_tags_filter.items():\n if isinstance(filter_tag_value, bool) and filter_tag_value:\n filter_clauses.append(f\"(list_contains(map_keys(tags), '{filter_tag_key}'))\")\n elif isinstance(filter_tag_value, str):\n escaped_value = self._sql_escape(filter_tag_value)\n filter_clauses.append(\n f\"list_extract(map_extract(tags, '{filter_tag_key}'), 1) =\"\n f\" '{escaped_value}'\"\n )\n elif isinstance(filter_tag_value, list) and filter_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in filter_tag_value]\n filter_clauses.append(\n f\"list_extract(map_extract(tags, '{filter_tag_key}'), 1) IN\"\n f\" ({', '.join(values_list)})\"\n )\n\n return \" OR \".join(filter_clauses)\n\n def _generate_filtered_tags_clause(self) -> str:\n \"\"\"Prepare filtered tags clause by removing tags commonly ignored by OGR.\"\"\"\n tags_to_ignore = [\n \"area\",\n \"created_by\",\n \"converted_by\",\n \"source\",\n \"time\",\n \"ele\",\n \"note\",\n \"todo\",\n \"fixme\",\n \"FIXME\",\n \"openGeoDB:\",\n ]\n escaped_tags_to_ignore = [f\"'{tag}'\" for tag in tags_to_ignore]\n\n return f\"\"\"\n map_from_entries(\n [\n tag_entry\n for tag_entry in map_entries(tags)\n if not tag_entry.key in ({','.join(escaped_tags_to_ignore)})\n and not starts_with(tag_entry.key, 'openGeoDB:')\n ]\n ) as tags\n \"\"\"\n\n def _generate_elements_filter(\n self, filter_osm_ids: list[str], element_type: Literal[\"node\", \"way\", \"relation\"]\n ) -> str:\n filter_osm_relation_ids = [\n osm_id.replace(f\"{element_type}/\", \"\")\n for osm_id in filter_osm_ids\n if osm_id.startswith(f\"{element_type}/\")\n ]\n if not filter_osm_ids:\n filter_osm_ids_filter = \"1=1\"\n elif filter_osm_relation_ids:\n filter_osm_ids_filter = f\"id in ({','.join(filter_osm_relation_ids)})\"\n else:\n filter_osm_ids_filter = \"id IS NULL\"\n\n return filter_osm_ids_filter\n\n def _sql_escape(self, value: str) -> str:\n \"\"\"Escape value for SQL query.\"\"\"\n return value.replace(\"'\", \"''\")\n\n def _sql_to_parquet_file(self, sql_query: str, file_path: Path) -> \"duckdb.DuckDBPyRelation\":\n relation = self.connection.sql(sql_query)\n return self._save_parquet_file(relation, file_path)\n\n def _save_parquet_file(\n self, relation: \"duckdb.DuckDBPyRelation\", file_path: Path\n ) -> \"duckdb.DuckDBPyRelation\":\n self.connection.sql(f\"\"\"\n COPY (\n SELECT * FROM ({relation.sql_query()})\n ) TO '{file_path}' (FORMAT 'parquet', PER_THREAD_OUTPUT true, ROW_GROUP_SIZE 25000)\n \"\"\")\n return self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{file_path}/**')\n \"\"\")\n\n def _calculate_unique_ids_to_parquet(\n self, file_path: Path, result_path: Optional[Path] = None\n ) -> \"duckdb.DuckDBPyRelation\":\n if result_path is None:\n result_path = file_path / \"distinct\"\n\n self.connection.sql(f\"\"\"\n COPY (\n SELECT id FROM read_parquet('{file_path}/**') GROUP BY id\n ) TO '{result_path}' (FORMAT 'parquet', PER_THREAD_OUTPUT true, ROW_GROUP_SIZE 25000)\n \"\"\")\n\n return self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{result_path}/**')\n \"\"\")\n\n def _get_filtered_nodes_with_geometry(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n nodes_with_geometry = self.connection.sql(f\"\"\"\n SELECT\n n.id,\n n.tags,\n ST_Point(round(n.lon, 7), round(n.lat, 7)) geometry\n FROM ({osm_parquet_files.nodes_valid_with_tags.sql_query()}) n\n SEMI JOIN ({osm_parquet_files.nodes_filtered_ids.sql_query()}) fn ON n.id = fn.id\n \"\"\")\n nodes_parquet = self._save_parquet_file_with_geometry(\n relation=nodes_with_geometry,\n file_path=Path(tmp_dir_name) / \"filtered_nodes_with_geometry\",\n step_name=\"Saving filtered nodes with geometries\",\n step_number=\"19\",\n )\n return nodes_parquet\n\n def _get_ways_refs_with_nodes_structs(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n ways_refs_with_nodes_structs = self.connection.sql(f\"\"\"\n SELECT\n w.id,\n w.ref,\n w.ref_idx,\n struct_pack(x := round(n.lon, 7), y := round(n.lat, 7))::POINT_2D point\n FROM ({osm_parquet_files.nodes_valid_with_tags.sql_query()}) n\n JOIN ({osm_parquet_files.ways_with_unnested_nodes_refs.sql_query()}) w ON w.ref = n.id\n \"\"\")\n with TaskProgressSpinner(\"Saving required nodes with structs\", \"20\"):\n ways_refs_parquet = self._save_parquet_file(\n relation=ways_refs_with_nodes_structs,\n file_path=Path(tmp_dir_name) / \"ways_refs_with_nodes_structs\",\n )\n return ways_refs_parquet\n\n def _get_filtered_ways_with_linestrings(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n ways_refs_with_nodes_structs: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n grouped_ways_path = Path(tmp_dir_name) / \"filtered_ways_grouped\"\n grouped_ways_tmp_path = Path(tmp_dir_name) / \"filtered_ways_tmp\"\n destination_dir_path = Path(tmp_dir_name) / \"filtered_ways_with_linestrings\"\n\n with TaskProgressSpinner(\"Grouping filtered ways\", \"21\"):\n groups = self._group_ways(\n ways_ids=osm_parquet_files.ways_filtered_ids,\n destination_dir_path=destination_dir_path,\n grouped_ways_tmp_path=grouped_ways_tmp_path,\n grouped_ways_path=grouped_ways_path,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n )\n\n with TaskProgressBar(\"Saving filtered ways with linestrings\", \"22\") as bar:\n self._construct_ways_linestrings(\n bar=bar,\n groups=groups,\n destination_dir_path=destination_dir_path,\n grouped_ways_path=grouped_ways_path,\n )\n\n ways_parquet = self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{destination_dir_path}/**')\n \"\"\")\n return ways_parquet\n\n def _get_required_ways_with_linestrings(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n ways_refs_with_nodes_structs: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n grouped_ways_path = Path(tmp_dir_name) / \"required_ways_grouped\"\n grouped_ways_tmp_path = Path(tmp_dir_name) / \"required_ways_tmp\"\n destination_dir_path = Path(tmp_dir_name) / \"required_ways_with_linestrings\"\n\n with TaskProgressSpinner(\"Grouping required ways\", \"23\"):\n groups = self._group_ways(\n ways_ids=osm_parquet_files.ways_required_ids,\n destination_dir_path=destination_dir_path,\n grouped_ways_tmp_path=grouped_ways_tmp_path,\n grouped_ways_path=grouped_ways_path,\n ways_refs_with_nodes_structs=ways_refs_with_nodes_structs,\n )\n\n with TaskProgressBar(\"Saving required ways with linestrings\", \"24\") as bar:\n self._construct_ways_linestrings(\n bar=bar,\n groups=groups,\n destination_dir_path=destination_dir_path,\n grouped_ways_path=grouped_ways_path,\n )\n\n ways_parquet = self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{destination_dir_path}/**')\n \"\"\")\n return ways_parquet\n\n def _group_ways(\n self,\n ways_ids: \"duckdb.DuckDBPyRelation\",\n ways_refs_with_nodes_structs: \"duckdb.DuckDBPyRelation\",\n destination_dir_path: Path,\n grouped_ways_tmp_path: Path,\n grouped_ways_path: Path,\n ) -> int:\n total_required_ways = ways_ids.count(\"id\").fetchone()[0]\n\n destination_dir_path.mkdir(parents=True, exist_ok=True)\n grouped_ways_tmp_path.mkdir(parents=True, exist_ok=True)\n\n if total_required_ways == 0:\n empty_file_path = str(destination_dir_path / \"empty.parquet\")\n self.connection.sql(\"CREATE OR REPLACE TABLE x(id STRING, linestring LINESTRING_2D);\")\n self.connection.table(\"x\").to_parquet(empty_file_path)\n return -1\n\n groups = int(floor(total_required_ways / self.rows_per_bucket))\n\n ways_ids_grouped_relation = self.connection.sql(f\"\"\"\n SELECT id,\n floor(\n row_number() OVER () / {self.rows_per_bucket}\n )::INTEGER as \"group\",\n FROM ({ways_ids.sql_query()})\n \"\"\")\n grouped_ways_ids_with_group_path = grouped_ways_tmp_path / \"ids_with_group\"\n ways_ids_grouped_relation_parquet = self._save_parquet_file(\n relation=ways_ids_grouped_relation, file_path=grouped_ways_ids_with_group_path\n )\n\n ways_with_nodes_points_relation = self.connection.sql(f\"\"\"\n SELECT\n w.id, w.point, w.ref_idx, rw.\"group\"\n FROM ({ways_ids_grouped_relation_parquet.sql_query()}) rw\n JOIN ({ways_refs_with_nodes_structs.sql_query()}) w\n ON rw.id = w.id\n \"\"\")\n\n grouped_ways_ids_with_points_path = grouped_ways_tmp_path / \"ids_with_points\"\n ways_with_nodes_points_relation_parquet = self._save_parquet_file(\n relation=ways_with_nodes_points_relation, file_path=grouped_ways_ids_with_points_path\n )\n\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n id, point, ref_idx, \"group\"\n FROM ({ways_with_nodes_points_relation_parquet.sql_query()}) w\n ) TO '{grouped_ways_path}'\n (FORMAT 'parquet', PARTITION_BY (\"group\"), ROW_GROUP_SIZE 25000)\n \"\"\")\n\n return groups\n\n def _construct_ways_linestrings(\n self,\n bar: TaskProgressBar,\n groups: int,\n destination_dir_path: Path,\n grouped_ways_path: Path,\n ) -> None:\n grouped_ways_path.mkdir(parents=True, exist_ok=True)\n\n for group in bar.track(range(groups + 1)):\n current_ways_group_path = grouped_ways_path / f\"group={group}\"\n current_ways_group_relation = self.connection.sql(f\"\"\"\n SELECT * FROM read_parquet('{current_ways_group_path}/**')\n \"\"\")\n\n ways_with_linestrings = self.connection.sql(f\"\"\"\n SELECT id, list(point ORDER BY ref_idx ASC)::LINESTRING_2D linestring\n FROM ({current_ways_group_relation.sql_query()})\n GROUP BY id\n \"\"\")\n self._save_parquet_file(\n relation=ways_with_linestrings,\n file_path=destination_dir_path / f\"group={group}\",\n )\n\n def _get_filtered_ways_with_proper_geometry(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n required_ways_with_linestrings: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n osm_way_polygon_features_filter_clauses = [\n \"list_contains(map_keys(raw_tags), 'area') AND \"\n \"list_extract(map_extract(raw_tags, 'area'), 1) = 'yes'\"\n ]\n\n for osm_tag_key in self.osm_way_polygon_features_config.all:\n osm_way_polygon_features_filter_clauses.append(\n f\"list_contains(map_keys(raw_tags), '{osm_tag_key}')\"\n )\n\n for osm_tag_key, osm_tag_values in self.osm_way_polygon_features_config.allowlist.items():\n escaped_values = \",\".join(\n [f\"'{self._sql_escape(osm_tag_value)}'\" for osm_tag_value in osm_tag_values]\n )\n osm_way_polygon_features_filter_clauses.append(\n f\"list_contains(map_keys(raw_tags), '{osm_tag_key}') AND\"\n f\" list_has_any(map_extract(raw_tags, '{osm_tag_key}'), [{escaped_values}])\"\n )\n\n for osm_tag_key, osm_tag_values in self.osm_way_polygon_features_config.denylist.items():\n escaped_values = \",\".join(\n [f\"'{self._sql_escape(osm_tag_value)}'\" for osm_tag_value in osm_tag_values]\n )\n osm_way_polygon_features_filter_clauses.append(\n f\"list_contains(map_keys(raw_tags), '{osm_tag_key}') AND NOT\"\n f\" list_has_any(map_extract(raw_tags, '{osm_tag_key}'), [{escaped_values}])\"\n )\n\n ways_with_proper_geometry = self.connection.sql(f\"\"\"\n WITH required_ways_with_linestrings AS (\n SELECT\n w.id,\n w.tags,\n w_l.linestring,\n -- Filter below is based on `_is_closed_way_a_polygon` function from OSMnx\n -- Filter values are built dynamically from a config.\n (\n -- if first and last nodes are the same\n ST_Equals(linestring[1]::POINT_2D, linestring[-1]::POINT_2D)\n -- if the element doesn't have any tags leave it as a Linestring\n AND raw_tags IS NOT NULL\n -- if the element is specifically tagged 'area':'no' -> LineString\n AND NOT (\n list_contains(map_keys(raw_tags), 'area')\n AND list_extract(map_extract(raw_tags, 'area'), 1) = 'no'\n )\n AND ({' OR '.join(osm_way_polygon_features_filter_clauses)})\n ) AS is_polygon\n FROM ({required_ways_with_linestrings.sql_query()}) w_l\n SEMI JOIN ({osm_parquet_files.ways_filtered_ids.sql_query()}) fw ON w_l.id = fw.id\n JOIN ({osm_parquet_files.ways_all_with_tags.sql_query()}) w ON w.id = w_l.id\n ),\n proper_geometries AS (\n SELECT\n id,\n tags,\n (CASE\n WHEN is_polygon\n THEN linestring_to_polygon_wkt(linestring)\n ELSE linestring_to_linestring_wkt(linestring)\n END)::GEOMETRY AS geometry\n FROM\n required_ways_with_linestrings w\n )\n SELECT id, tags, geometry FROM proper_geometries\n \"\"\")\n ways_parquet = self._save_parquet_file_with_geometry(\n relation=ways_with_proper_geometry,\n file_path=Path(tmp_dir_name) / \"filtered_ways_with_geometry\",\n step_name=\"Saving filtered ways with geometries\",\n step_number=\"25\",\n )\n return ways_parquet\n\n def _get_filtered_relations_with_geometry(\n self,\n osm_parquet_files: ConvertedOSMParquetFiles,\n required_ways_with_linestrings: \"duckdb.DuckDBPyRelation\",\n tmp_dir_name: str,\n ) -> \"duckdb.DuckDBPyRelation\":\n valid_relation_parts = self.connection.sql(f\"\"\"\n WITH unnested_relations AS (\n SELECT\n r.id,\n COALESCE(r.ref_role, 'outer') as ref_role,\n r.ref,\n linestring_to_linestring_wkt(w.linestring)::GEOMETRY as geometry\n FROM ({osm_parquet_files.relations_with_unnested_way_refs.sql_query()}) r\n SEMI JOIN ({osm_parquet_files.relations_filtered_ids.sql_query()}) fr\n ON r.id = fr.id\n JOIN ({required_ways_with_linestrings.sql_query()}) w\n ON w.id = r.ref\n ORDER BY r.id, r.ref_idx\n ),\n any_outer_refs AS (\n SELECT id, bool_or(ref_role == 'outer') any_outer_refs\n FROM unnested_relations\n GROUP BY id\n ),\n relations_with_geometries AS (\n SELECT\n x.id,\n CASE WHEN aor.any_outer_refs\n THEN x.ref_role ELSE 'outer'\n END as ref_role,\n x.geom geometry,\n row_number() OVER (PARTITION BY x.id) as geometry_id\n FROM (\n SELECT\n id,\n ref_role,\n UNNEST(\n ST_Dump(ST_LineMerge(ST_Collect(list(geometry)))), recursive := true\n ),\n FROM unnested_relations\n GROUP BY id, ref_role\n ) x\n JOIN any_outer_refs aor ON aor.id = x.id\n WHERE ST_NPoints(geom) >= 4\n ),\n valid_relations AS (\n SELECT id, is_valid\n FROM (\n SELECT\n id,\n bool_and(\n ST_Equals(ST_StartPoint(geometry), ST_EndPoint(geometry))\n ) is_valid\n FROM relations_with_geometries\n GROUP BY id\n )\n WHERE is_valid = true\n )\n SELECT * FROM relations_with_geometries\n SEMI JOIN valid_relations ON relations_with_geometries.id = valid_relations.id\n \"\"\")\n valid_relation_parts_parquet = self._save_parquet_file_with_geometry(\n relation=valid_relation_parts,\n file_path=Path(tmp_dir_name) / \"valid_relation_parts\",\n step_name=\"Saving valid relations parts\",\n step_number=\"26\",\n )\n relation_inner_parts = self.connection.sql(f\"\"\"\n SELECT id, geometry_id, ST_MakePolygon(geometry) geometry\n FROM ({valid_relation_parts_parquet.sql_query()})\n WHERE ref_role = 'inner'\n \"\"\")\n relation_inner_parts_parquet = self._save_parquet_file_with_geometry(\n relation=relation_inner_parts,\n file_path=Path(tmp_dir_name) / \"relation_inner_parts\",\n fix_geometries=True,\n step_name=\"Saving relations inner parts\",\n step_number=\"27\",\n )\n relation_outer_parts = self.connection.sql(f\"\"\"\n SELECT id, geometry_id, ST_MakePolygon(geometry) geometry\n FROM ({valid_relation_parts_parquet.sql_query()})\n WHERE ref_role = 'outer'\n \"\"\")\n relation_outer_parts_parquet = self._save_parquet_file_with_geometry(\n relation=relation_outer_parts,\n file_path=Path(tmp_dir_name) / \"relation_outer_parts\",\n fix_geometries=True,\n step_name=\"Saving relations outer parts\",\n step_number=\"28\",\n )\n relation_outer_parts_with_holes = self.connection.sql(f\"\"\"\n SELECT\n og.id,\n og.geometry_id,\n ST_Difference(any_value(og.geometry), ST_Union_Agg(ig.geometry)) geometry\n FROM ({relation_outer_parts_parquet.sql_query()}) og\n JOIN ({relation_inner_parts_parquet.sql_query()}) ig\n ON og.id = ig.id AND ST_WITHIN(ig.geometry, og.geometry)\n GROUP BY og.id, og.geometry_id\n \"\"\")\n relation_outer_parts_with_holes_parquet = self._save_parquet_file_with_geometry(\n relation=relation_outer_parts_with_holes,\n file_path=Path(tmp_dir_name) / \"relation_outer_parts_with_holes\",\n step_name=\"Saving relations outer parts with holes\",\n step_number=\"29\",\n )\n relation_outer_parts_without_holes = self.connection.sql(f\"\"\"\n SELECT\n og.id,\n og.geometry_id,\n og.geometry\n FROM ({relation_outer_parts_parquet.sql_query()}) og\n ANTI JOIN ({relation_outer_parts_with_holes_parquet.sql_query()}) ogwh\n ON og.id = ogwh.id AND og.geometry_id = ogwh.geometry_id\n \"\"\")\n relation_outer_parts_without_holes_parquet = self._save_parquet_file_with_geometry(\n relation=relation_outer_parts_without_holes,\n file_path=Path(tmp_dir_name) / \"relation_outer_parts_without_holes\",\n step_name=\"Saving relations outer parts without holes\",\n step_number=\"30\",\n )\n relations_with_geometry = self.connection.sql(f\"\"\"\n WITH unioned_outer_geometries AS (\n SELECT id, geometry\n FROM ({relation_outer_parts_with_holes_parquet.sql_query()})\n UNION ALL\n SELECT id, geometry\n FROM ({relation_outer_parts_without_holes_parquet.sql_query()})\n ),\n final_geometries AS (\n SELECT id, ST_Union_Agg(geometry) geometry\n FROM unioned_outer_geometries\n GROUP BY id\n )\n SELECT r_g.id, r.tags, r_g.geometry\n FROM final_geometries r_g\n JOIN ({osm_parquet_files.relations_all_with_tags.sql_query()}) r\n ON r.id = r_g.id\n \"\"\")\n relations_parquet = self._save_parquet_file_with_geometry(\n relation=relations_with_geometry,\n file_path=Path(tmp_dir_name) / \"filtered_relations_with_geometry\",\n step_name=\"Saving filtered relations with geometries\",\n step_number=\"31\",\n )\n return relations_parquet\n\n def _save_parquet_file_with_geometry(\n self,\n relation: \"duckdb.DuckDBPyRelation\",\n file_path: Path,\n step_name: str,\n step_number: str,\n fix_geometries: bool = False,\n ) -> \"duckdb.DuckDBPyRelation\":\n if not fix_geometries:\n with TaskProgressSpinner(step_name, step_number):\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb\n FROM ({relation.sql_query()})\n ) TO '{file_path}' (\n FORMAT 'parquet',\n PER_THREAD_OUTPUT true,\n ROW_GROUP_SIZE 25000\n )\n \"\"\")\n else:\n valid_path = file_path / \"valid\"\n invalid_path = file_path / \"invalid\"\n fixed_path = file_path / \"fixed\"\n\n valid_path.mkdir(parents=True, exist_ok=True)\n invalid_path.mkdir(parents=True, exist_ok=True)\n fixed_path.mkdir(parents=True, exist_ok=True)\n\n # Save valid features\n with TaskProgressSpinner(f\"{step_name} - valid geometries\", f\"{step_number}.1\"):\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb\n FROM ({relation.sql_query()})\n WHERE ST_IsValid(geometry)\n ) TO '{valid_path}' (\n FORMAT 'parquet',\n PER_THREAD_OUTPUT true,\n ROW_GROUP_SIZE 25000\n )\n \"\"\")\n\n # Save invalid features\n with TaskProgressSpinner(f\"{step_name} - invalid geometries\", f\"{step_number}.2\"):\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb,\n floor(\n row_number() OVER () / {self.rows_per_bucket}\n )::INTEGER as \"group\",\n FROM ({relation.sql_query()})\n WHERE NOT ST_IsValid(geometry)\n ) TO '{invalid_path}' (\n FORMAT 'parquet', PARTITION_BY (\"group\"), ROW_GROUP_SIZE 25000\n )\n \"\"\")\n\n # Fix invalid features\n total_groups = 0\n while (invalid_path / f\"group={total_groups}\").exists():\n total_groups += 1\n\n if total_groups > 0:\n with TaskProgressBar(\n f\"{step_name} - fixing invalid geometries\", f\"{step_number}.3\"\n ) as bar:\n for group_id in bar.track(range(total_groups)):\n current_invalid_features_group_path = invalid_path / f\"group={group_id}\"\n current_invalid_features_group_table = pq.read_table(\n current_invalid_features_group_path\n ).drop(\"group\")\n valid_geometry_column = ga.as_wkb(\n ga.to_geopandas(\n ga.with_crs(\n current_invalid_features_group_table.column(\"geometry_wkb\"),\n WGS84_CRS,\n )\n ).make_valid(),\n )\n current_invalid_features_group_table = (\n current_invalid_features_group_table.drop(\"geometry_wkb\")\n )\n\n current_invalid_features_group_table = (\n current_invalid_features_group_table.append_column(\n \"geometry_wkb\", valid_geometry_column\n )\n )\n pq.write_table(\n current_invalid_features_group_table,\n fixed_path / f\"data_{group_id}.parquet\",\n )\n\n self._delete_directories(invalid_path.parent, [\"invalid\"])\n\n return self.connection.sql(f\"\"\"\n SELECT * EXCLUDE (geometry_wkb), ST_GeomFromWKB(geometry_wkb) geometry\n FROM read_parquet('{file_path}/**')\n \"\"\")\n\n def _concatenate_results_to_geoparquet(\n self,\n parsed_data: ParsedOSMFeatures,\n tmp_dir_name: str,\n save_file_path: Path,\n explode_tags: bool,\n ) -> None:\n select_clauses = [\n *self._generate_osm_tags_sql_select(parsed_data, explode_tags),\n \"geometry\",\n ]\n\n node_select_clauses = [\"'node/' || id as feature_id\", *select_clauses]\n way_select_clauses = [\"'way/' || id as feature_id\", *select_clauses]\n relation_select_clauses = [\"'relation/' || id as feature_id\", *select_clauses]\n\n unioned_features = self.connection.sql(f\"\"\"\n SELECT {', '.join(node_select_clauses)}\n FROM ({parsed_data.nodes.sql_query()}) n\n UNION ALL\n SELECT {', '.join(way_select_clauses)}\n FROM ({parsed_data.ways.sql_query()}) w\n UNION ALL\n SELECT {', '.join(relation_select_clauses)}\n FROM ({parsed_data.relations.sql_query()}) r\n \"\"\")\n\n grouped_features = self._parse_features_relation_to_groups(unioned_features, explode_tags)\n\n valid_features_full_relation = self.connection.sql(f\"\"\"\n SELECT * FROM ({grouped_features.sql_query()})\n WHERE ST_IsValid(geometry)\n \"\"\")\n\n valid_features_parquet_path = Path(tmp_dir_name) / \"osm_valid_elements\"\n valid_features_parquet_relation = self._save_parquet_file_with_geometry(\n valid_features_full_relation,\n valid_features_parquet_path,\n step_name=\"Saving valid features\",\n step_number=\"32.1\",\n )\n\n valid_features_parquet_table = pq.read_table(valid_features_parquet_path)\n\n is_empty = valid_features_parquet_table.num_rows == 0\n\n if not is_empty:\n geometry_column = ga.as_wkb(\n ga.with_crs(valid_features_parquet_table.column(\"geometry_wkb\"), WGS84_CRS)\n )\n else:\n geometry_column = ga.as_wkb(gpd.GeoSeries([], crs=WGS84_CRS))\n\n valid_features_parquet_table = valid_features_parquet_table.append_column(\n GEOMETRY_COLUMN, geometry_column\n ).drop(\"geometry_wkb\")\n\n parquet_tables = [valid_features_parquet_table]\n\n invalid_features_full_relation = self.connection.sql(f\"\"\"\n SELECT * FROM ({grouped_features.sql_query()}) a\n ANTI JOIN ({valid_features_parquet_relation.sql_query()}) b\n ON a.feature_id = b.feature_id\n \"\"\")\n\n total_nodes = parsed_data.nodes.count(\"id\").fetchone()[0]\n total_ways = parsed_data.ways.count(\"id\").fetchone()[0]\n total_relations = parsed_data.relations.count(\"id\").fetchone()[0]\n total_features = total_nodes + total_ways + total_relations\n\n valid_features = valid_features_parquet_relation.count(\"feature_id\").fetchone()[0]\n\n invalid_features = total_features - valid_features\n\n if invalid_features > 0:\n with TaskProgressSpinner(\"Grouping invalid features\", \"32.2\"):\n groups = floor(invalid_features / self.rows_per_bucket)\n grouped_invalid_features_result_parquet = (\n Path(tmp_dir_name) / \"osm_invalid_elements_grouped\"\n )\n self.connection.sql(f\"\"\"\n COPY (\n SELECT\n * EXCLUDE (geometry), ST_AsWKB(geometry) geometry_wkb,\n floor(\n row_number() OVER () / {self.rows_per_bucket}\n )::INTEGER as \"group\",\n FROM ({invalid_features_full_relation.sql_query()})\n ) TO '{grouped_invalid_features_result_parquet}'\n (FORMAT 'parquet', PARTITION_BY (\"group\"), ROW_GROUP_SIZE 25000)\n \"\"\")\n\n with TaskProgressBar(\"Fixing invalid features\", \"32.3\") as bar:\n for group in bar.track(range(groups + 1)):\n current_invalid_features_group_path = (\n grouped_invalid_features_result_parquet / f\"group={group}\"\n )\n current_invalid_features_group_table = pq.read_table(\n current_invalid_features_group_path\n ).drop(\"group\")\n valid_geometry_column = ga.as_wkb(\n ga.to_geopandas(\n ga.with_crs(\n current_invalid_features_group_table.column(\"geometry_wkb\"),\n WGS84_CRS,\n )\n ).make_valid()\n )\n\n current_invalid_features_group_table = (\n current_invalid_features_group_table.append_column(\n GEOMETRY_COLUMN, valid_geometry_column\n )\n )\n current_invalid_features_group_table = (\n current_invalid_features_group_table.drop(\"geometry_wkb\")\n )\n parquet_tables.append(current_invalid_features_group_table)\n\n joined_parquet_table: pa.Table = pa.concat_tables(parquet_tables)\n\n is_empty = joined_parquet_table.num_rows == 0\n\n empty_columns = []\n for column_name in joined_parquet_table.column_names:\n if column_name in (FEATURES_INDEX, GEOMETRY_COLUMN):\n continue\n if (\n is_empty\n or pa.compute.all(\n pa.compute.is_null(joined_parquet_table.column(column_name))\n ).as_py()\n ):\n empty_columns.append(column_name)\n\n if empty_columns:\n joined_parquet_table = joined_parquet_table.drop(empty_columns)\n\n with TaskProgressSpinner(\"Saving final geoparquet file\", \"33\"):\n io.write_geoparquet_table( # type: ignore\n joined_parquet_table, save_file_path, primary_geometry_column=GEOMETRY_COLUMN\n )\n\n def _generate_osm_tags_sql_select(\n self, parsed_data: ParsedOSMFeatures, explode_tags: bool\n ) -> list[str]:\n \"\"\"Prepare features filter clauses based on tags filter.\"\"\"\n osm_tag_keys_select_clauses = []\n\n # TODO: elif keep other tags\n if not self.merged_tags_filter and not explode_tags:\n osm_tag_keys_select_clauses = [\"tags\"]\n elif not self.merged_tags_filter and explode_tags:\n osm_tag_keys = set()\n for elements in (\n parsed_data.nodes,\n parsed_data.ways,\n parsed_data.relations,\n ):\n found_tag_keys = [row[0] for row in self.connection.sql(f\"\"\"\n SELECT DISTINCT UNNEST(map_keys(tags)) tag_key\n FROM ({elements.sql_query()})\n \"\"\").fetchall()]\n osm_tag_keys.update(found_tag_keys)\n osm_tag_keys_select_clauses = [\n f\"list_extract(map_extract(tags, '{osm_tag_key}'), 1) as \\\"{osm_tag_key}\\\"\"\n for osm_tag_key in sorted(list(osm_tag_keys))\n ]\n elif self.merged_tags_filter and not explode_tags:\n filter_tag_clauses = []\n for filter_tag_key, filter_tag_value in self.merged_tags_filter.items():\n if isinstance(filter_tag_value, bool) and filter_tag_value:\n filter_tag_clauses.append(f\"tag_entry.key = '{filter_tag_key}'\")\n elif isinstance(filter_tag_value, str):\n escaped_value = self._sql_escape(filter_tag_value)\n filter_tag_clauses.append(\n f\"(tag_entry.key = '{filter_tag_key}' AND tag_entry.value =\"\n f\" '{escaped_value}')\"\n )\n elif isinstance(filter_tag_value, list) and filter_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in filter_tag_value]\n filter_tag_clauses.append(\n f\"(tag_entry.key = '{filter_tag_key}' AND tag_entry.value IN\"\n f\" ({', '.join(values_list)}))\"\n )\n osm_tag_keys_select_clauses = [f\"\"\"\n map_from_entries(\n [\n tag_entry\n for tag_entry in map_entries(tags)\n if {\" OR \".join(filter_tag_clauses)}\n ]\n ) as tags\n \"\"\"]\n elif self.merged_tags_filter and explode_tags:\n for filter_tag_key, filter_tag_value in self.merged_tags_filter.items():\n if isinstance(filter_tag_value, bool) and filter_tag_value:\n osm_tag_keys_select_clauses.append(\n f\"list_extract(map_extract(tags, '{filter_tag_key}'), 1) as\"\n f' \"{filter_tag_key}\"'\n )\n elif isinstance(filter_tag_value, str):\n escaped_value = self._sql_escape(filter_tag_value)\n osm_tag_keys_select_clauses.append(f\"\"\"\n CASE WHEN list_extract(\n map_extract(tags, '{filter_tag_key}'), 1\n ) = '{escaped_value}'\n THEN '{escaped_value}'\n ELSE NULL\n END as \"{filter_tag_key}\"\n \"\"\")\n elif isinstance(filter_tag_value, list) and filter_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in filter_tag_value]\n osm_tag_keys_select_clauses.append(f\"\"\"\n CASE WHEN list_extract(\n map_extract(tags, '{filter_tag_key}'), 1\n ) IN ({', '.join(values_list)})\n THEN list_extract(map_extract(tags, '{filter_tag_key}'), 1)\n ELSE NULL\n END as \"{filter_tag_key}\"\n \"\"\")\n\n if len(osm_tag_keys_select_clauses) > 100:\n warnings.warn(\n \"Select clause contains more than 100 columns\"\n f\" (found {len(osm_tag_keys_select_clauses)} columns).\"\n \" Query might fail with insufficient memory resources.\"\n \" Consider applying more restrictive OsmTagsFilter for parsing.\",\n stacklevel=1,\n )\n\n return osm_tag_keys_select_clauses\n\n def _parse_features_relation_to_groups(\n self,\n features_relation: \"duckdb.DuckDBPyRelation\",\n explode_tags: bool,\n ) -> \"duckdb.DuckDBPyRelation\":\n \"\"\"\n Optionally group raw OSM features into groups defined in `GroupedOsmTagsFilter`.\n\n Creates new features based on definition from `GroupedOsmTagsFilter`.\n Returns transformed DuckDB relation with columns based on group names from the filter.\n Values are built by concatenation of matching tag key and value with\n an equal sign (eg. amenity=parking). Since many tags can match a definition\n of a single group, a first match is used as a feature value.\n\n Args:\n features_relation (duckdb.DuckDBPyRelation): Generated features from the loader.\n explode_tags (bool): Whether to split tags into columns based on OSM tag keys.\n\n Returns:\n duckdb.DuckDBPyRelation: Parsed features_relation.\n \"\"\"\n if not self.tags_filter or not is_expected_type(self.tags_filter, GroupedOsmTagsFilter):\n return features_relation\n\n grouped_features_relation: \"duckdb.DuckDBPyRelation\"\n grouped_tags_filter = cast(GroupedOsmTagsFilter, self.tags_filter)\n\n if explode_tags:\n case_clauses = []\n for group_name in sorted(grouped_tags_filter.keys()):\n osm_filter = grouped_tags_filter[group_name]\n case_when_clauses = []\n for osm_tag_key, osm_tag_value in osm_filter.items():\n if isinstance(osm_tag_value, bool) and osm_tag_value:\n case_when_clauses.append(\n f\"WHEN \\\"{osm_tag_key}\\\" IS NOT NULL THEN '{osm_tag_key}=' ||\"\n f' \"{osm_tag_key}\"'\n )\n elif isinstance(osm_tag_value, str):\n escaped_value = self._sql_escape(osm_tag_value)\n case_when_clauses.append(\n f\"WHEN \\\"{osm_tag_key}\\\" = '{escaped_value}' THEN '{osm_tag_key}=' ||\"\n f' \"{osm_tag_key}\"'\n )\n elif isinstance(osm_tag_value, list) and osm_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in osm_tag_value]\n case_when_clauses.append(\n f\"WHEN \\\"{osm_tag_key}\\\" IN ({', '.join(values_list)}) THEN\"\n f\" '{osm_tag_key}=' || \\\"{osm_tag_key}\\\"\"\n )\n case_clause = f'CASE {\" \".join(case_when_clauses)} END AS \"{group_name}\"'\n case_clauses.append(case_clause)\n\n joined_case_clauses = \", \".join(case_clauses)\n grouped_features_relation = self.connection.sql(f\"\"\"\n SELECT feature_id, {joined_case_clauses}, geometry\n FROM ({features_relation.sql_query()})\n \"\"\")\n else:\n case_clauses = []\n group_names = sorted(grouped_tags_filter.keys())\n for group_name in group_names:\n osm_filter = grouped_tags_filter[group_name]\n case_when_clauses = []\n for osm_tag_key, osm_tag_value in osm_filter.items():\n element_clause = f\"element_at(tags, '{osm_tag_key}')[1]\"\n if isinstance(osm_tag_value, bool) and osm_tag_value:\n case_when_clauses.append(\n f\"WHEN {element_clause} IS NOT NULL THEN '{osm_tag_key}=' ||\"\n f\" {element_clause}\"\n )\n elif isinstance(osm_tag_value, str):\n escaped_value = self._sql_escape(osm_tag_value)\n case_when_clauses.append(\n f\"WHEN {element_clause} = '{escaped_value}' THEN '{osm_tag_key}=' ||\"\n f\" {element_clause}\"\n )\n elif isinstance(osm_tag_value, list) and osm_tag_value:\n values_list = [f\"'{self._sql_escape(value)}'\" for value in osm_tag_value]\n case_when_clauses.append(\n f\"WHEN {element_clause} IN ({', '.join(values_list)}) THEN\"\n f\" '{osm_tag_key}=' || {element_clause}\"\n )\n case_clause = f'CASE {\" \".join(case_when_clauses)} END'\n case_clauses.append(case_clause)\n\n group_names_as_sql_strings = [f\"'{group_name}'\" for group_name in group_names]\n groups_map = (\n f\"map([{', '.join(group_names_as_sql_strings)}], [{', '.join(case_clauses)}])\"\n )\n non_null_groups_map = f\"\"\"map_from_entries(\n [\n tag_entry\n for tag_entry in map_entries({groups_map})\n if tag_entry.value IS NOT NULL\n ]\n ) as tags\"\"\"\n\n grouped_features_relation = self.connection.sql(f\"\"\"\n SELECT feature_id, {non_null_groups_map}, geometry\n FROM ({features_relation.sql_query()})\n \"\"\")\n\n return grouped_features_relation" } ]
from collections.abc import Iterable from pathlib import Path from typing import Any, Optional, Union from shapely.geometry.base import BaseGeometry from quackosm._osm_tags_filters import GroupedOsmTagsFilter, OsmTagsFilter from quackosm._osm_way_polygon_features import OsmWayPolygonConfig from quackosm.pbf_file_reader import PbfFileReader import geopandas as gpd
21,439
│ node/10025656392 │ {name=Direction de… │ POINT (7.4270392 43.7365262) │ │ node/10025656393 │ {name=IQOS, openin… │ POINT (7.4275175 43.7373195) │ │ node/10025656394 │ {artist_name=Anna … │ POINT (7.4293446 43.737448) │ │ · │ · │ · │ │ · │ · │ · │ │ · │ · │ · │ │ way/986864693 │ {natural=bare_rock} │ POLYGON ((7.4340482 43.745598, 7.4340263 4… │ │ way/986864694 │ {barrier=wall} │ LINESTRING (7.4327547 43.7445382, 7.432808… │ │ way/986864695 │ {natural=bare_rock} │ POLYGON ((7.4332994 43.7449315, 7.4332912 … │ │ way/986864696 │ {barrier=wall} │ LINESTRING (7.4356006 43.7464325, 7.435574… │ │ way/986864697 │ {natural=bare_rock} │ POLYGON ((7.4362767 43.74697, 7.4362983 43… │ │ way/990669427 │ {amenity=shelter, … │ POLYGON ((7.4146087 43.733883, 7.4146192 4… │ │ way/990669428 │ {highway=secondary… │ LINESTRING (7.4136598 43.7334433, 7.413640… │ │ way/990669429 │ {highway=secondary… │ LINESTRING (7.4137621 43.7334251, 7.413746… │ │ way/990848785 │ {addr:city=Monaco,… │ POLYGON ((7.4142551 43.7339622, 7.4143113 … │ │ way/993121275 │ {building=yes, nam… │ POLYGON ((7.4321416 43.7481309, 7.4321638 … │ ├──────────────────┴──────────────────────┴──────────────────────────────────────────────┤ │ 7906 rows (20 shown) 3 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ Get only buildings, amenities and highways from a PBF file. Tags will be split into separate columns because of applying the filter. >>> gpq_path = qosm.convert_pbf_to_gpq( ... monaco_pbf_path, ... tags_filter={"building": True, "amenity": True, "highway": True} ... ) >>> gpq_path.as_posix() 'files/monaco_6593ca69098459d039054bc5fe0a87c56681e29a5f59d38ce3485c03cb0e9374_noclip_exploded.geoparquet' Inspect the file with duckdb >>> import duckdb >>> duckdb.load_extension('spatial') >>> duckdb.read_parquet(str(gpq_path)).project( ... "* REPLACE (ST_GeomFromWKB(geometry) AS geometry)" ... ).order("feature_id") # doctest: +SKIP ┌──────────────────┬──────────┬────────────┬─────────────┬───────────────────────────────┐ │ feature_id │ building │ amenity │ highway │ geometry │ │ varchar │ varchar │ varchar │ varchar │ geometry │ ├──────────────────┼──────────┼────────────┼─────────────┼───────────────────────────────┤ │ node/10025656390 │ NULL │ restaurant │ NULL │ POINT (7.4269287 43.7368818) │ │ node/10025843517 │ NULL │ restaurant │ NULL │ POINT (7.4219362 43.7367446) │ │ node/10025852089 │ NULL │ bar │ NULL │ POINT (7.4227543 43.7369926) │ │ node/10025852090 │ NULL │ restaurant │ NULL │ POINT (7.4225093 43.7369627) │ │ node/10068880332 │ NULL │ NULL │ platform │ POINT (7.4380849 43.7493273) │ │ node/10068880335 │ NULL │ bench │ NULL │ POINT (7.4186855 43.7321515) │ │ node/10127713363 │ NULL │ cafe │ NULL │ POINT (7.4266367 43.7420755) │ │ node/10601158089 │ NULL │ restaurant │ NULL │ POINT (7.4213086 43.7336187) │ │ node/10671507005 │ NULL │ bar │ NULL │ POINT (7.4296915 43.7423307) │ │ node/10674256605 │ NULL │ bar │ NULL │ POINT (7.4213558 43.7336317) │ │ · │ · │ · │ · │ · │ │ · │ · │ · │ · │ · │ │ · │ · │ · │ · │ · │ │ way/981971425 │ NULL │ NULL │ residential │ LINESTRING (7.4321217 43.74… │ │ way/982061461 │ NULL │ NULL │ secondary │ LINESTRING (7.4246341 43.74… │ │ way/982081599 │ NULL │ NULL │ tertiary │ LINESTRING (7.4225202 43.73… │ │ way/982081600 │ NULL │ NULL │ service │ LINESTRING (7.4225202 43.73… │ │ way/986029035 │ NULL │ NULL │ path │ LINESTRING (7.4189462 43.73… │ │ way/990669427 │ NULL │ shelter │ NULL │ POLYGON ((7.4146087 43.7338… │ │ way/990669428 │ NULL │ NULL │ secondary │ LINESTRING (7.4136598 43.73… │ │ way/990669429 │ NULL │ NULL │ secondary │ LINESTRING (7.4137621 43.73… │ │ way/990848785 │ yes │ NULL │ NULL │ POLYGON ((7.4142551 43.7339… │ │ way/993121275 │ yes │ NULL │ NULL │ POLYGON ((7.4321416 43.7481… │ ├──────────────────┴──────────┴────────────┴─────────────┴───────────────────────────────┤ │ 5772 rows (20 shown) 5 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ Get features for Malé - the capital city of Maldives Tags will be kept in a single column. >>> from shapely.geometry import box >>> gpq_path = qosm.convert_pbf_to_gpq( ... maldives_pbf_path, ... geometry_filter=box( ... minx=73.4975872, ... miny=4.1663240, ... maxx=73.5215528, ... maxy=4.1818121 ... ) ... ) >>> gpq_path.as_posix() 'files/maldives_nofilter_35532d32333a47a057265be0d7903ce27f6aa6ca3df31fe45f4ce67e4dbb3fb5_compact.geoparquet' Inspect the file with duckdb >>> import duckdb >>> duckdb.load_extension('spatial') >>> duckdb.read_parquet(str(gpq_path)).project( ... "* REPLACE (ST_GeomFromWKB(geometry) AS geometry)" ... ).order("feature_id") # doctest: +SKIP ┌──────────────────┬──────────────────────┬──────────────────────────────────────────────┐ │ feature_id │ tags │ geometry │ │ varchar │ map(varchar, varch… │ geometry │ ├──────────────────┼──────────────────────┼──────────────────────────────────────────────┤ │ node/10010180778 │ {brand=Ooredoo, br… │ POINT (73.5179039 4.1752105) │ │ node/10062500171 │ {contact:facebook=… │ POINT (73.509583 4.1724485) │ │ node/10078084764 │ {addr:city=Male', … │ POINT (73.5047972 4.1726734) │ │ node/10078086040 │ {addr:city=Malé, a… │ POINT (73.5031714 4.1759622) │ │ node/10158825718 │ {addr:postcode=201… │ POINT (73.5083189 4.1730108) │ │ node/10289176711 │ {addr:street=Dhona… │ POINT (73.5133902 4.1725724) │ │ node/10294045310 │ {amenity=restauran… │ POINT (73.5091277 4.1735378) │ │ node/10294045311 │ {amenity=restauran… │ POINT (73.5055534 4.1759515) │ │ node/10294045411 │ {amenity=restauran… │ POINT (73.5037257 4.1717866) │ │ node/10294045412 │ {amenity=restauran… │ POINT (73.5024147 4.1761633) │ │ · │ · │ · │ │ · │ · │ · │ │ · │ · │ · │ │ way/91986244 │ {highway=residenti… │ LINESTRING (73.5069785 4.1704686, 73.50759… │ │ way/91986245 │ {highway=residenti… │ LINESTRING (73.5135834 4.1740562, 73.51383… │ │ way/91986249 │ {highway=residenti… │ LINESTRING (73.5153971 4.1735146, 73.51601… │ │ way/91986251 │ {highway=residenti… │ LINESTRING (73.5082522 4.1709887, 73.50823… │ │ way/91986254 │ {highway=residenti… │ LINESTRING (73.508114 4.1693477, 73.508154… │ │ way/91986255 │ {landuse=cemetery,… │ POLYGON ((73.507509 4.1731064, 73.5078884 … │ │ way/91986256 │ {highway=residenti… │ LINESTRING (73.5106692 4.1744828, 73.51082… │ │ way/935784864 │ {layer=-1, locatio… │ LINESTRING (73.4875382 4.1703263, 73.50074… │ │ way/935784867 │ {layer=-1, locatio… │ LINESTRING (73.446172 4.1856738, 73.460937… │ │ way/959150179 │ {amenity=place_of_… │ POLYGON ((73.5184052 4.1755282, 73.5184863… │ ├──────────────────┴──────────────────────┴──────────────────────────────────────────────┤ │ 2140 rows (20 shown) 3 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ """
""" Functions. This module contains helper functions to simplify the usage. """ def convert_pbf_to_gpq( pbf_path: Union[str, Path], tags_filter: Optional[Union[OsmTagsFilter, GroupedOsmTagsFilter]] = None, geometry_filter: Optional[BaseGeometry] = None, result_file_path: Optional[Union[str, Path]] = None, explode_tags: Optional[bool] = None, ignore_cache: bool = False, filter_osm_ids: Optional[list[str]] = None, working_directory: Union[str, Path] = "files", osm_way_polygon_features_config: Optional[Union[OsmWayPolygonConfig, dict[str, Any]]] = None, ) -> Path: """ Convert PBF file to GeoParquet file. Args: pbf_path (Union[str, Path]): Pbf file to be parsed to GeoParquet. tags_filter (Union[OsmTagsFilter, GroupedOsmTagsFilter], optional): A dictionary specifying which tags to download. The keys should be OSM tags (e.g. `building`, `amenity`). The values should either be `True` for retrieving all objects with the tag, string for retrieving a single tag-value pair or list of strings for retrieving all values specified in the list. `tags={'leisure': 'park}` would return parks from the area. `tags={'leisure': 'park, 'amenity': True, 'shop': ['bakery', 'bicycle']}` would return parks, all amenity types, bakeries and bicycle shops. If `None`, handler will allow all of the tags to be parsed. Defaults to `None`. geometry_filter (BaseGeometry, optional): Region which can be used to filter only intersecting OSM objects. Defaults to `None`. result_file_path (Union[str, Path], optional): Where to save the geoparquet file. If not provided, will be generated based on hashes from provided tags filter and geometry filter. Defaults to `None`. explode_tags (bool, optional): Whether to split tags into columns based on OSM tag keys. If `None`, will be set based on `tags_filter` parameter. If no tags filter is provided, then `explode_tags` will set to `False`, if there is tags filter it will set to `True`. Defaults to `None`. ignore_cache (bool, optional): Whether to ignore precalculated geoparquet files or not. Defaults to False. filter_osm_ids: (list[str], optional): List of OSM features ids to read from the file. Have to be in the form of 'node/<id>', 'way/<id>' or 'relation/<id>'. Defaults to an empty list. working_directory (Union[str, Path], optional): Directory where to save the parsed `*.parquet` files. Defaults to "files". osm_way_polygon_features_config (Union[OsmWayPolygonConfig, dict[str, Any]], optional): Config used to determine which closed way features are polygons. Modifications to this config left are left for experienced OSM users. Defaults to predefined "osm_way_polygon_features.json". Returns: Path: Path to the generated GeoParquet file. Examples: Get OSM data from a PBF file. Tags will be kept in a single column. >>> import quackosm as qosm >>> gpq_path = qosm.convert_pbf_to_gpq(monaco_pbf_path) >>> gpq_path.as_posix() 'files/monaco_nofilter_noclip_compact.geoparquet' Inspect the file with duckdb >>> import duckdb >>> duckdb.load_extension('spatial') >>> duckdb.read_parquet(str(gpq_path)).project( ... "* REPLACE (ST_GeomFromWKB(geometry) AS geometry)" ... ).order("feature_id") # doctest: +SKIP ┌──────────────────┬──────────────────────┬──────────────────────────────────────────────┐ │ feature_id │ tags │ geometry │ │ varchar │ map(varchar, varch… │ geometry │ ├──────────────────┼──────────────────────┼──────────────────────────────────────────────┤ │ node/10005045289 │ {shop=bakery} │ POINT (7.4224498 43.7310532) │ │ node/10020887517 │ {leisure=swimming_… │ POINT (7.4131561 43.7338391) │ │ node/10021298117 │ {leisure=swimming_… │ POINT (7.4277743 43.7427669) │ │ node/10021298717 │ {leisure=swimming_… │ POINT (7.4263029 43.7409734) │ │ node/10025656383 │ {ferry=yes, name=Q… │ POINT (7.4254971 43.7369002) │ │ node/10025656390 │ {amenity=restauran… │ POINT (7.4269287 43.7368818) │ │ node/10025656391 │ {name=Capitainerie… │ POINT (7.4272127 43.7359593) │ │ node/10025656392 │ {name=Direction de… │ POINT (7.4270392 43.7365262) │ │ node/10025656393 │ {name=IQOS, openin… │ POINT (7.4275175 43.7373195) │ │ node/10025656394 │ {artist_name=Anna … │ POINT (7.4293446 43.737448) │ │ · │ · │ · │ │ · │ · │ · │ │ · │ · │ · │ │ way/986864693 │ {natural=bare_rock} │ POLYGON ((7.4340482 43.745598, 7.4340263 4… │ │ way/986864694 │ {barrier=wall} │ LINESTRING (7.4327547 43.7445382, 7.432808… │ │ way/986864695 │ {natural=bare_rock} │ POLYGON ((7.4332994 43.7449315, 7.4332912 … │ │ way/986864696 │ {barrier=wall} │ LINESTRING (7.4356006 43.7464325, 7.435574… │ │ way/986864697 │ {natural=bare_rock} │ POLYGON ((7.4362767 43.74697, 7.4362983 43… │ │ way/990669427 │ {amenity=shelter, … │ POLYGON ((7.4146087 43.733883, 7.4146192 4… │ │ way/990669428 │ {highway=secondary… │ LINESTRING (7.4136598 43.7334433, 7.413640… │ │ way/990669429 │ {highway=secondary… │ LINESTRING (7.4137621 43.7334251, 7.413746… │ │ way/990848785 │ {addr:city=Monaco,… │ POLYGON ((7.4142551 43.7339622, 7.4143113 … │ │ way/993121275 │ {building=yes, nam… │ POLYGON ((7.4321416 43.7481309, 7.4321638 … │ ├──────────────────┴──────────────────────┴──────────────────────────────────────────────┤ │ 7906 rows (20 shown) 3 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ Get only buildings, amenities and highways from a PBF file. Tags will be split into separate columns because of applying the filter. >>> gpq_path = qosm.convert_pbf_to_gpq( ... monaco_pbf_path, ... tags_filter={"building": True, "amenity": True, "highway": True} ... ) >>> gpq_path.as_posix() 'files/monaco_6593ca69098459d039054bc5fe0a87c56681e29a5f59d38ce3485c03cb0e9374_noclip_exploded.geoparquet' Inspect the file with duckdb >>> import duckdb >>> duckdb.load_extension('spatial') >>> duckdb.read_parquet(str(gpq_path)).project( ... "* REPLACE (ST_GeomFromWKB(geometry) AS geometry)" ... ).order("feature_id") # doctest: +SKIP ┌──────────────────┬──────────┬────────────┬─────────────┬───────────────────────────────┐ │ feature_id │ building │ amenity │ highway │ geometry │ │ varchar │ varchar │ varchar │ varchar │ geometry │ ├──────────────────┼──────────┼────────────┼─────────────┼───────────────────────────────┤ │ node/10025656390 │ NULL │ restaurant │ NULL │ POINT (7.4269287 43.7368818) │ │ node/10025843517 │ NULL │ restaurant │ NULL │ POINT (7.4219362 43.7367446) │ │ node/10025852089 │ NULL │ bar │ NULL │ POINT (7.4227543 43.7369926) │ │ node/10025852090 │ NULL │ restaurant │ NULL │ POINT (7.4225093 43.7369627) │ │ node/10068880332 │ NULL │ NULL │ platform │ POINT (7.4380849 43.7493273) │ │ node/10068880335 │ NULL │ bench │ NULL │ POINT (7.4186855 43.7321515) │ │ node/10127713363 │ NULL │ cafe │ NULL │ POINT (7.4266367 43.7420755) │ │ node/10601158089 │ NULL │ restaurant │ NULL │ POINT (7.4213086 43.7336187) │ │ node/10671507005 │ NULL │ bar │ NULL │ POINT (7.4296915 43.7423307) │ │ node/10674256605 │ NULL │ bar │ NULL │ POINT (7.4213558 43.7336317) │ │ · │ · │ · │ · │ · │ │ · │ · │ · │ · │ · │ │ · │ · │ · │ · │ · │ │ way/981971425 │ NULL │ NULL │ residential │ LINESTRING (7.4321217 43.74… │ │ way/982061461 │ NULL │ NULL │ secondary │ LINESTRING (7.4246341 43.74… │ │ way/982081599 │ NULL │ NULL │ tertiary │ LINESTRING (7.4225202 43.73… │ │ way/982081600 │ NULL │ NULL │ service │ LINESTRING (7.4225202 43.73… │ │ way/986029035 │ NULL │ NULL │ path │ LINESTRING (7.4189462 43.73… │ │ way/990669427 │ NULL │ shelter │ NULL │ POLYGON ((7.4146087 43.7338… │ │ way/990669428 │ NULL │ NULL │ secondary │ LINESTRING (7.4136598 43.73… │ │ way/990669429 │ NULL │ NULL │ secondary │ LINESTRING (7.4137621 43.73… │ │ way/990848785 │ yes │ NULL │ NULL │ POLYGON ((7.4142551 43.7339… │ │ way/993121275 │ yes │ NULL │ NULL │ POLYGON ((7.4321416 43.7481… │ ├──────────────────┴──────────┴────────────┴─────────────┴───────────────────────────────┤ │ 5772 rows (20 shown) 5 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ Get features for Malé - the capital city of Maldives Tags will be kept in a single column. >>> from shapely.geometry import box >>> gpq_path = qosm.convert_pbf_to_gpq( ... maldives_pbf_path, ... geometry_filter=box( ... minx=73.4975872, ... miny=4.1663240, ... maxx=73.5215528, ... maxy=4.1818121 ... ) ... ) >>> gpq_path.as_posix() 'files/maldives_nofilter_35532d32333a47a057265be0d7903ce27f6aa6ca3df31fe45f4ce67e4dbb3fb5_compact.geoparquet' Inspect the file with duckdb >>> import duckdb >>> duckdb.load_extension('spatial') >>> duckdb.read_parquet(str(gpq_path)).project( ... "* REPLACE (ST_GeomFromWKB(geometry) AS geometry)" ... ).order("feature_id") # doctest: +SKIP ┌──────────────────┬──────────────────────┬──────────────────────────────────────────────┐ │ feature_id │ tags │ geometry │ │ varchar │ map(varchar, varch… │ geometry │ ├──────────────────┼──────────────────────┼──────────────────────────────────────────────┤ │ node/10010180778 │ {brand=Ooredoo, br… │ POINT (73.5179039 4.1752105) │ │ node/10062500171 │ {contact:facebook=… │ POINT (73.509583 4.1724485) │ │ node/10078084764 │ {addr:city=Male', … │ POINT (73.5047972 4.1726734) │ │ node/10078086040 │ {addr:city=Malé, a… │ POINT (73.5031714 4.1759622) │ │ node/10158825718 │ {addr:postcode=201… │ POINT (73.5083189 4.1730108) │ │ node/10289176711 │ {addr:street=Dhona… │ POINT (73.5133902 4.1725724) │ │ node/10294045310 │ {amenity=restauran… │ POINT (73.5091277 4.1735378) │ │ node/10294045311 │ {amenity=restauran… │ POINT (73.5055534 4.1759515) │ │ node/10294045411 │ {amenity=restauran… │ POINT (73.5037257 4.1717866) │ │ node/10294045412 │ {amenity=restauran… │ POINT (73.5024147 4.1761633) │ │ · │ · │ · │ │ · │ · │ · │ │ · │ · │ · │ │ way/91986244 │ {highway=residenti… │ LINESTRING (73.5069785 4.1704686, 73.50759… │ │ way/91986245 │ {highway=residenti… │ LINESTRING (73.5135834 4.1740562, 73.51383… │ │ way/91986249 │ {highway=residenti… │ LINESTRING (73.5153971 4.1735146, 73.51601… │ │ way/91986251 │ {highway=residenti… │ LINESTRING (73.5082522 4.1709887, 73.50823… │ │ way/91986254 │ {highway=residenti… │ LINESTRING (73.508114 4.1693477, 73.508154… │ │ way/91986255 │ {landuse=cemetery,… │ POLYGON ((73.507509 4.1731064, 73.5078884 … │ │ way/91986256 │ {highway=residenti… │ LINESTRING (73.5106692 4.1744828, 73.51082… │ │ way/935784864 │ {layer=-1, locatio… │ LINESTRING (73.4875382 4.1703263, 73.50074… │ │ way/935784867 │ {layer=-1, locatio… │ LINESTRING (73.446172 4.1856738, 73.460937… │ │ way/959150179 │ {amenity=place_of_… │ POLYGON ((73.5184052 4.1755282, 73.5184863… │ ├──────────────────┴──────────────────────┴──────────────────────────────────────────────┤ │ 2140 rows (20 shown) 3 columns │ └────────────────────────────────────────────────────────────────────────────────────────┘ """
return PbfFileReader(
2
2023-12-28 11:26:41+00:00
24k
KyanChen/TTP
mmdet/models/roi_heads/bbox_heads/multi_instance_bbox_head.py
[ { "identifier": "BBoxHead", "path": "mmdet/models/roi_heads/bbox_heads/bbox_head.py", "snippet": "class BBoxHead(BaseModule):\n \"\"\"Simplest RoI head, with only two fc layers for classification and\n regression respectively.\"\"\"\n\n def __init__(self,\n with_avg_pool: bool = False,\n with_cls: bool = True,\n with_reg: bool = True,\n roi_feat_size: int = 7,\n in_channels: int = 256,\n num_classes: int = 80,\n bbox_coder: ConfigType = dict(\n type='DeltaXYWHBBoxCoder',\n clip_border=True,\n target_means=[0., 0., 0., 0.],\n target_stds=[0.1, 0.1, 0.2, 0.2]),\n predict_box_type: str = 'hbox',\n reg_class_agnostic: bool = False,\n reg_decoded_bbox: bool = False,\n reg_predictor_cfg: ConfigType = dict(type='Linear'),\n cls_predictor_cfg: ConfigType = dict(type='Linear'),\n loss_cls: ConfigType = dict(\n type='CrossEntropyLoss',\n use_sigmoid=False,\n loss_weight=1.0),\n loss_bbox: ConfigType = dict(\n type='SmoothL1Loss', beta=1.0, loss_weight=1.0),\n init_cfg: OptMultiConfig = None) -> None:\n super().__init__(init_cfg=init_cfg)\n assert with_cls or with_reg\n self.with_avg_pool = with_avg_pool\n self.with_cls = with_cls\n self.with_reg = with_reg\n self.roi_feat_size = _pair(roi_feat_size)\n self.roi_feat_area = self.roi_feat_size[0] * self.roi_feat_size[1]\n self.in_channels = in_channels\n self.num_classes = num_classes\n self.predict_box_type = predict_box_type\n self.reg_class_agnostic = reg_class_agnostic\n self.reg_decoded_bbox = reg_decoded_bbox\n self.reg_predictor_cfg = reg_predictor_cfg\n self.cls_predictor_cfg = cls_predictor_cfg\n\n self.bbox_coder = TASK_UTILS.build(bbox_coder)\n self.loss_cls = MODELS.build(loss_cls)\n self.loss_bbox = MODELS.build(loss_bbox)\n\n in_channels = self.in_channels\n if self.with_avg_pool:\n self.avg_pool = nn.AvgPool2d(self.roi_feat_size)\n else:\n in_channels *= self.roi_feat_area\n if self.with_cls:\n # need to add background class\n if self.custom_cls_channels:\n cls_channels = self.loss_cls.get_cls_channels(self.num_classes)\n else:\n cls_channels = num_classes + 1\n cls_predictor_cfg_ = self.cls_predictor_cfg.copy()\n cls_predictor_cfg_.update(\n in_features=in_channels, out_features=cls_channels)\n self.fc_cls = MODELS.build(cls_predictor_cfg_)\n if self.with_reg:\n box_dim = self.bbox_coder.encode_size\n out_dim_reg = box_dim if reg_class_agnostic else \\\n box_dim * num_classes\n reg_predictor_cfg_ = self.reg_predictor_cfg.copy()\n if isinstance(reg_predictor_cfg_, (dict, ConfigDict)):\n reg_predictor_cfg_.update(\n in_features=in_channels, out_features=out_dim_reg)\n self.fc_reg = MODELS.build(reg_predictor_cfg_)\n self.debug_imgs = None\n if init_cfg is None:\n self.init_cfg = []\n if self.with_cls:\n self.init_cfg += [\n dict(\n type='Normal', std=0.01, override=dict(name='fc_cls'))\n ]\n if self.with_reg:\n self.init_cfg += [\n dict(\n type='Normal', std=0.001, override=dict(name='fc_reg'))\n ]\n\n # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n @property\n def custom_cls_channels(self) -> bool:\n \"\"\"get custom_cls_channels from loss_cls.\"\"\"\n return getattr(self.loss_cls, 'custom_cls_channels', False)\n\n # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n @property\n def custom_activation(self) -> bool:\n \"\"\"get custom_activation from loss_cls.\"\"\"\n return getattr(self.loss_cls, 'custom_activation', False)\n\n # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n @property\n def custom_accuracy(self) -> bool:\n \"\"\"get custom_accuracy from loss_cls.\"\"\"\n return getattr(self.loss_cls, 'custom_accuracy', False)\n\n def forward(self, x: Tuple[Tensor]) -> tuple:\n \"\"\"Forward features from the upstream network.\n\n Args:\n x (tuple[Tensor]): Features from the upstream network, each is\n a 4D-tensor.\n\n Returns:\n tuple: A tuple of classification scores and bbox prediction.\n\n - cls_score (Tensor): Classification scores for all\n scale levels, each is a 4D-tensor, the channels number\n is num_base_priors * num_classes.\n - bbox_pred (Tensor): Box energies / deltas for all\n scale levels, each is a 4D-tensor, the channels number\n is num_base_priors * 4.\n \"\"\"\n if self.with_avg_pool:\n if x.numel() > 0:\n x = self.avg_pool(x)\n x = x.view(x.size(0), -1)\n else:\n # avg_pool does not support empty tensor,\n # so use torch.mean instead it\n x = torch.mean(x, dim=(-1, -2))\n cls_score = self.fc_cls(x) if self.with_cls else None\n bbox_pred = self.fc_reg(x) if self.with_reg else None\n return cls_score, bbox_pred\n\n def _get_targets_single(self, pos_priors: Tensor, neg_priors: Tensor,\n pos_gt_bboxes: Tensor, pos_gt_labels: Tensor,\n cfg: ConfigDict) -> tuple:\n \"\"\"Calculate the ground truth for proposals in the single image\n according to the sampling results.\n\n Args:\n pos_priors (Tensor): Contains all the positive boxes,\n has shape (num_pos, 4), the last dimension 4\n represents [tl_x, tl_y, br_x, br_y].\n neg_priors (Tensor): Contains all the negative boxes,\n has shape (num_neg, 4), the last dimension 4\n represents [tl_x, tl_y, br_x, br_y].\n pos_gt_bboxes (Tensor): Contains gt_boxes for\n all positive samples, has shape (num_pos, 4),\n the last dimension 4\n represents [tl_x, tl_y, br_x, br_y].\n pos_gt_labels (Tensor): Contains gt_labels for\n all positive samples, has shape (num_pos, ).\n cfg (obj:`ConfigDict`): `train_cfg` of R-CNN.\n\n Returns:\n Tuple[Tensor]: Ground truth for proposals\n in a single image. Containing the following Tensors:\n\n - labels(Tensor): Gt_labels for all proposals, has\n shape (num_proposals,).\n - label_weights(Tensor): Labels_weights for all\n proposals, has shape (num_proposals,).\n - bbox_targets(Tensor):Regression target for all\n proposals, has shape (num_proposals, 4), the\n last dimension 4 represents [tl_x, tl_y, br_x, br_y].\n - bbox_weights(Tensor):Regression weights for all\n proposals, has shape (num_proposals, 4).\n \"\"\"\n num_pos = pos_priors.size(0)\n num_neg = neg_priors.size(0)\n num_samples = num_pos + num_neg\n\n # original implementation uses new_zeros since BG are set to be 0\n # now use empty & fill because BG cat_id = num_classes,\n # FG cat_id = [0, num_classes-1]\n labels = pos_priors.new_full((num_samples, ),\n self.num_classes,\n dtype=torch.long)\n reg_dim = pos_gt_bboxes.size(-1) if self.reg_decoded_bbox \\\n else self.bbox_coder.encode_size\n label_weights = pos_priors.new_zeros(num_samples)\n bbox_targets = pos_priors.new_zeros(num_samples, reg_dim)\n bbox_weights = pos_priors.new_zeros(num_samples, reg_dim)\n if num_pos > 0:\n labels[:num_pos] = pos_gt_labels\n pos_weight = 1.0 if cfg.pos_weight <= 0 else cfg.pos_weight\n label_weights[:num_pos] = pos_weight\n if not self.reg_decoded_bbox:\n pos_bbox_targets = self.bbox_coder.encode(\n pos_priors, pos_gt_bboxes)\n else:\n # When the regression loss (e.g. `IouLoss`, `GIouLoss`)\n # is applied directly on the decoded bounding boxes, both\n # the predicted boxes and regression targets should be with\n # absolute coordinate format.\n pos_bbox_targets = get_box_tensor(pos_gt_bboxes)\n bbox_targets[:num_pos, :] = pos_bbox_targets\n bbox_weights[:num_pos, :] = 1\n if num_neg > 0:\n label_weights[-num_neg:] = 1.0\n\n return labels, label_weights, bbox_targets, bbox_weights\n\n def get_targets(self,\n sampling_results: List[SamplingResult],\n rcnn_train_cfg: ConfigDict,\n concat: bool = True) -> tuple:\n \"\"\"Calculate the ground truth for all samples in a batch according to\n the sampling_results.\n\n Almost the same as the implementation in bbox_head, we passed\n additional parameters pos_inds_list and neg_inds_list to\n `_get_targets_single` function.\n\n Args:\n sampling_results (List[obj:SamplingResult]): Assign results of\n all images in a batch after sampling.\n rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n concat (bool): Whether to concatenate the results of all\n the images in a single batch.\n\n Returns:\n Tuple[Tensor]: Ground truth for proposals in a single image.\n Containing the following list of Tensors:\n\n - labels (list[Tensor],Tensor): Gt_labels for all\n proposals in a batch, each tensor in list has\n shape (num_proposals,) when `concat=False`, otherwise\n just a single tensor has shape (num_all_proposals,).\n - label_weights (list[Tensor]): Labels_weights for\n all proposals in a batch, each tensor in list has\n shape (num_proposals,) when `concat=False`, otherwise\n just a single tensor has shape (num_all_proposals,).\n - bbox_targets (list[Tensor],Tensor): Regression target\n for all proposals in a batch, each tensor in list\n has shape (num_proposals, 4) when `concat=False`,\n otherwise just a single tensor has shape\n (num_all_proposals, 4), the last dimension 4 represents\n [tl_x, tl_y, br_x, br_y].\n - bbox_weights (list[tensor],Tensor): Regression weights for\n all proposals in a batch, each tensor in list has shape\n (num_proposals, 4) when `concat=False`, otherwise just a\n single tensor has shape (num_all_proposals, 4).\n \"\"\"\n pos_priors_list = [res.pos_priors for res in sampling_results]\n neg_priors_list = [res.neg_priors for res in sampling_results]\n pos_gt_bboxes_list = [res.pos_gt_bboxes for res in sampling_results]\n pos_gt_labels_list = [res.pos_gt_labels for res in sampling_results]\n labels, label_weights, bbox_targets, bbox_weights = multi_apply(\n self._get_targets_single,\n pos_priors_list,\n neg_priors_list,\n pos_gt_bboxes_list,\n pos_gt_labels_list,\n cfg=rcnn_train_cfg)\n\n if concat:\n labels = torch.cat(labels, 0)\n label_weights = torch.cat(label_weights, 0)\n bbox_targets = torch.cat(bbox_targets, 0)\n bbox_weights = torch.cat(bbox_weights, 0)\n return labels, label_weights, bbox_targets, bbox_weights\n\n def loss_and_target(self,\n cls_score: Tensor,\n bbox_pred: Tensor,\n rois: Tensor,\n sampling_results: List[SamplingResult],\n rcnn_train_cfg: ConfigDict,\n concat: bool = True,\n reduction_override: Optional[str] = None) -> dict:\n \"\"\"Calculate the loss based on the features extracted by the bbox head.\n\n Args:\n cls_score (Tensor): Classification prediction\n results of all class, has shape\n (batch_size * num_proposals_single_image, num_classes)\n bbox_pred (Tensor): Regression prediction results,\n has shape\n (batch_size * num_proposals_single_image, 4), the last\n dimension 4 represents [tl_x, tl_y, br_x, br_y].\n rois (Tensor): RoIs with the shape\n (batch_size * num_proposals_single_image, 5) where the first\n column indicates batch id of each RoI.\n sampling_results (List[obj:SamplingResult]): Assign results of\n all images in a batch after sampling.\n rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN.\n concat (bool): Whether to concatenate the results of all\n the images in a single batch. Defaults to True.\n reduction_override (str, optional): The reduction\n method used to override the original reduction\n method of the loss. Options are \"none\",\n \"mean\" and \"sum\". Defaults to None,\n\n Returns:\n dict: A dictionary of loss and targets components.\n The targets are only used for cascade rcnn.\n \"\"\"\n\n cls_reg_targets = self.get_targets(\n sampling_results, rcnn_train_cfg, concat=concat)\n losses = self.loss(\n cls_score,\n bbox_pred,\n rois,\n *cls_reg_targets,\n reduction_override=reduction_override)\n\n # cls_reg_targets is only for cascade rcnn\n return dict(loss_bbox=losses, bbox_targets=cls_reg_targets)\n\n def loss(self,\n cls_score: Tensor,\n bbox_pred: Tensor,\n rois: Tensor,\n labels: Tensor,\n label_weights: Tensor,\n bbox_targets: Tensor,\n bbox_weights: Tensor,\n reduction_override: Optional[str] = None) -> dict:\n \"\"\"Calculate the loss based on the network predictions and targets.\n\n Args:\n cls_score (Tensor): Classification prediction\n results of all class, has shape\n (batch_size * num_proposals_single_image, num_classes)\n bbox_pred (Tensor): Regression prediction results,\n has shape\n (batch_size * num_proposals_single_image, 4), the last\n dimension 4 represents [tl_x, tl_y, br_x, br_y].\n rois (Tensor): RoIs with the shape\n (batch_size * num_proposals_single_image, 5) where the first\n column indicates batch id of each RoI.\n labels (Tensor): Gt_labels for all proposals in a batch, has\n shape (batch_size * num_proposals_single_image, ).\n label_weights (Tensor): Labels_weights for all proposals in a\n batch, has shape (batch_size * num_proposals_single_image, ).\n bbox_targets (Tensor): Regression target for all proposals in a\n batch, has shape (batch_size * num_proposals_single_image, 4),\n the last dimension 4 represents [tl_x, tl_y, br_x, br_y].\n bbox_weights (Tensor): Regression weights for all proposals in a\n batch, has shape (batch_size * num_proposals_single_image, 4).\n reduction_override (str, optional): The reduction\n method used to override the original reduction\n method of the loss. Options are \"none\",\n \"mean\" and \"sum\". Defaults to None,\n\n Returns:\n dict: A dictionary of loss.\n \"\"\"\n\n losses = dict()\n\n if cls_score is not None:\n avg_factor = max(torch.sum(label_weights > 0).float().item(), 1.)\n if cls_score.numel() > 0:\n loss_cls_ = self.loss_cls(\n cls_score,\n labels,\n label_weights,\n avg_factor=avg_factor,\n reduction_override=reduction_override)\n if isinstance(loss_cls_, dict):\n losses.update(loss_cls_)\n else:\n losses['loss_cls'] = loss_cls_\n if self.custom_activation:\n acc_ = self.loss_cls.get_accuracy(cls_score, labels)\n losses.update(acc_)\n else:\n losses['acc'] = accuracy(cls_score, labels)\n if bbox_pred is not None:\n bg_class_ind = self.num_classes\n # 0~self.num_classes-1 are FG, self.num_classes is BG\n pos_inds = (labels >= 0) & (labels < bg_class_ind)\n # do not perform bounding box regression for BG anymore.\n if pos_inds.any():\n if self.reg_decoded_bbox:\n # When the regression loss (e.g. `IouLoss`,\n # `GIouLoss`, `DIouLoss`) is applied directly on\n # the decoded bounding boxes, it decodes the\n # already encoded coordinates to absolute format.\n bbox_pred = self.bbox_coder.decode(rois[:, 1:], bbox_pred)\n bbox_pred = get_box_tensor(bbox_pred)\n if self.reg_class_agnostic:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), -1)[pos_inds.type(torch.bool)]\n else:\n pos_bbox_pred = bbox_pred.view(\n bbox_pred.size(0), self.num_classes,\n -1)[pos_inds.type(torch.bool),\n labels[pos_inds.type(torch.bool)]]\n losses['loss_bbox'] = self.loss_bbox(\n pos_bbox_pred,\n bbox_targets[pos_inds.type(torch.bool)],\n bbox_weights[pos_inds.type(torch.bool)],\n avg_factor=bbox_targets.size(0),\n reduction_override=reduction_override)\n else:\n losses['loss_bbox'] = bbox_pred[pos_inds].sum()\n\n return losses\n\n def predict_by_feat(self,\n rois: Tuple[Tensor],\n cls_scores: Tuple[Tensor],\n bbox_preds: Tuple[Tensor],\n batch_img_metas: List[dict],\n rcnn_test_cfg: Optional[ConfigDict] = None,\n rescale: bool = False) -> InstanceList:\n \"\"\"Transform a batch of output features extracted from the head into\n bbox results.\n\n Args:\n rois (tuple[Tensor]): Tuple of boxes to be transformed.\n Each has shape (num_boxes, 5). last dimension 5 arrange as\n (batch_index, x1, y1, x2, y2).\n cls_scores (tuple[Tensor]): Tuple of box scores, each has shape\n (num_boxes, num_classes + 1).\n bbox_preds (tuple[Tensor]): Tuple of box energies / deltas, each\n has shape (num_boxes, num_classes * 4).\n batch_img_metas (list[dict]): List of image information.\n rcnn_test_cfg (obj:`ConfigDict`, optional): `test_cfg` of R-CNN.\n Defaults to None.\n rescale (bool): If True, return boxes in original image space.\n Defaults to False.\n\n Returns:\n list[:obj:`InstanceData`]: Instance segmentation\n results of each image after the post process.\n Each item usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance, )\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances, ).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n \"\"\"\n assert len(cls_scores) == len(bbox_preds)\n result_list = []\n for img_id in range(len(batch_img_metas)):\n img_meta = batch_img_metas[img_id]\n results = self._predict_by_feat_single(\n roi=rois[img_id],\n cls_score=cls_scores[img_id],\n bbox_pred=bbox_preds[img_id],\n img_meta=img_meta,\n rescale=rescale,\n rcnn_test_cfg=rcnn_test_cfg)\n result_list.append(results)\n\n return result_list\n\n def _predict_by_feat_single(\n self,\n roi: Tensor,\n cls_score: Tensor,\n bbox_pred: Tensor,\n img_meta: dict,\n rescale: bool = False,\n rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData:\n \"\"\"Transform a single image's features extracted from the head into\n bbox results.\n\n Args:\n roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5).\n last dimension 5 arrange as (batch_index, x1, y1, x2, y2).\n cls_score (Tensor): Box scores, has shape\n (num_boxes, num_classes + 1).\n bbox_pred (Tensor): Box energies / deltas.\n has shape (num_boxes, num_classes * 4).\n img_meta (dict): image information.\n rescale (bool): If True, return boxes in original image space.\n Defaults to False.\n rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head.\n Defaults to None\n\n Returns:\n :obj:`InstanceData`: Detection results of each image\\\n Each item usually contains following keys.\n\n - scores (Tensor): Classification scores, has a shape\n (num_instance, )\n - labels (Tensor): Labels of bboxes, has a shape\n (num_instances, ).\n - bboxes (Tensor): Has a shape (num_instances, 4),\n the last dimension 4 arrange as (x1, y1, x2, y2).\n \"\"\"\n results = InstanceData()\n if roi.shape[0] == 0:\n return empty_instances([img_meta],\n roi.device,\n task_type='bbox',\n instance_results=[results],\n box_type=self.predict_box_type,\n use_box_type=False,\n num_classes=self.num_classes,\n score_per_cls=rcnn_test_cfg is None)[0]\n\n # some loss (Seesaw loss..) may have custom activation\n if self.custom_cls_channels:\n scores = self.loss_cls.get_activation(cls_score)\n else:\n scores = F.softmax(\n cls_score, dim=-1) if cls_score is not None else None\n\n img_shape = img_meta['img_shape']\n num_rois = roi.size(0)\n # bbox_pred would be None in some detector when with_reg is False,\n # e.g. Grid R-CNN.\n if bbox_pred is not None:\n num_classes = 1 if self.reg_class_agnostic else self.num_classes\n roi = roi.repeat_interleave(num_classes, dim=0)\n bbox_pred = bbox_pred.view(-1, self.bbox_coder.encode_size)\n bboxes = self.bbox_coder.decode(\n roi[..., 1:], bbox_pred, max_shape=img_shape)\n else:\n bboxes = roi[:, 1:].clone()\n if img_shape is not None and bboxes.size(-1) == 4:\n bboxes[:, [0, 2]].clamp_(min=0, max=img_shape[1])\n bboxes[:, [1, 3]].clamp_(min=0, max=img_shape[0])\n\n if rescale and bboxes.size(0) > 0:\n assert img_meta.get('scale_factor') is not None\n scale_factor = [1 / s for s in img_meta['scale_factor']]\n bboxes = scale_boxes(bboxes, scale_factor)\n\n # Get the inside tensor when `bboxes` is a box type\n bboxes = get_box_tensor(bboxes)\n box_dim = bboxes.size(-1)\n bboxes = bboxes.view(num_rois, -1)\n\n if rcnn_test_cfg is None:\n # This means that it is aug test.\n # It needs to return the raw results without nms.\n results.bboxes = bboxes\n results.scores = scores\n else:\n det_bboxes, det_labels = multiclass_nms(\n bboxes,\n scores,\n rcnn_test_cfg.score_thr,\n rcnn_test_cfg.nms,\n rcnn_test_cfg.max_per_img,\n box_dim=box_dim)\n results.bboxes = det_bboxes[:, :-1]\n results.scores = det_bboxes[:, -1]\n results.labels = det_labels\n return results\n\n def refine_bboxes(self, sampling_results: Union[List[SamplingResult],\n InstanceList],\n bbox_results: dict,\n batch_img_metas: List[dict]) -> InstanceList:\n \"\"\"Refine bboxes during training.\n\n Args:\n sampling_results (List[:obj:`SamplingResult`] or\n List[:obj:`InstanceData`]): Sampling results.\n :obj:`SamplingResult` is the real sampling results\n calculate from bbox_head, while :obj:`InstanceData` is\n fake sampling results, e.g., in Sparse R-CNN or QueryInst, etc.\n bbox_results (dict): Usually is a dictionary with keys:\n\n - `cls_score` (Tensor): Classification scores.\n - `bbox_pred` (Tensor): Box energies / deltas.\n - `rois` (Tensor): RoIs with the shape (n, 5) where the first\n column indicates batch id of each RoI.\n - `bbox_targets` (tuple): Ground truth for proposals in a\n single image. Containing the following list of Tensors:\n (labels, label_weights, bbox_targets, bbox_weights)\n batch_img_metas (List[dict]): List of image information.\n\n Returns:\n list[:obj:`InstanceData`]: Refined bboxes of each image.\n\n Example:\n >>> # xdoctest: +REQUIRES(module:kwarray)\n >>> import numpy as np\n >>> from mmdet.models.task_modules.samplers.\n ... sampling_result import random_boxes\n >>> from mmdet.models.task_modules.samplers import SamplingResult\n >>> self = BBoxHead(reg_class_agnostic=True)\n >>> n_roi = 2\n >>> n_img = 4\n >>> scale = 512\n >>> rng = np.random.RandomState(0)\n ... batch_img_metas = [{'img_shape': (scale, scale)}\n >>> for _ in range(n_img)]\n >>> sampling_results = [SamplingResult.random(rng=10)\n ... for _ in range(n_img)]\n >>> # Create rois in the expected format\n >>> roi_boxes = random_boxes(n_roi, scale=scale, rng=rng)\n >>> img_ids = torch.randint(0, n_img, (n_roi,))\n >>> img_ids = img_ids.float()\n >>> rois = torch.cat([img_ids[:, None], roi_boxes], dim=1)\n >>> # Create other args\n >>> labels = torch.randint(0, 81, (scale,)).long()\n >>> bbox_preds = random_boxes(n_roi, scale=scale, rng=rng)\n >>> cls_score = torch.randn((scale, 81))\n ... # For each image, pretend random positive boxes are gts\n >>> bbox_targets = (labels, None, None, None)\n ... bbox_results = dict(rois=rois, bbox_pred=bbox_preds,\n ... cls_score=cls_score,\n ... bbox_targets=bbox_targets)\n >>> bboxes_list = self.refine_bboxes(sampling_results,\n ... bbox_results,\n ... batch_img_metas)\n >>> print(bboxes_list)\n \"\"\"\n pos_is_gts = [res.pos_is_gt for res in sampling_results]\n # bbox_targets is a tuple\n labels = bbox_results['bbox_targets'][0]\n cls_scores = bbox_results['cls_score']\n rois = bbox_results['rois']\n bbox_preds = bbox_results['bbox_pred']\n if self.custom_activation:\n # TODO: Create a SeasawBBoxHead to simplified logic in BBoxHead\n cls_scores = self.loss_cls.get_activation(cls_scores)\n if cls_scores.numel() == 0:\n return None\n if cls_scores.shape[-1] == self.num_classes + 1:\n # remove background class\n cls_scores = cls_scores[:, :-1]\n elif cls_scores.shape[-1] != self.num_classes:\n raise ValueError('The last dim of `cls_scores` should equal to '\n '`num_classes` or `num_classes + 1`,'\n f'but got {cls_scores.shape[-1]}.')\n labels = torch.where(labels == self.num_classes, cls_scores.argmax(1),\n labels)\n\n img_ids = rois[:, 0].long().unique(sorted=True)\n assert img_ids.numel() <= len(batch_img_metas)\n\n results_list = []\n for i in range(len(batch_img_metas)):\n inds = torch.nonzero(\n rois[:, 0] == i, as_tuple=False).squeeze(dim=1)\n num_rois = inds.numel()\n\n bboxes_ = rois[inds, 1:]\n label_ = labels[inds]\n bbox_pred_ = bbox_preds[inds]\n img_meta_ = batch_img_metas[i]\n pos_is_gts_ = pos_is_gts[i]\n\n bboxes = self.regress_by_class(bboxes_, label_, bbox_pred_,\n img_meta_)\n # filter gt bboxes\n pos_keep = 1 - pos_is_gts_\n keep_inds = pos_is_gts_.new_ones(num_rois)\n keep_inds[:len(pos_is_gts_)] = pos_keep\n results = InstanceData(bboxes=bboxes[keep_inds.type(torch.bool)])\n results_list.append(results)\n\n return results_list\n\n def regress_by_class(self, priors: Tensor, label: Tensor,\n bbox_pred: Tensor, img_meta: dict) -> Tensor:\n \"\"\"Regress the bbox for the predicted class. Used in Cascade R-CNN.\n\n Args:\n priors (Tensor): Priors from `rpn_head` or last stage\n `bbox_head`, has shape (num_proposals, 4).\n label (Tensor): Only used when `self.reg_class_agnostic`\n is False, has shape (num_proposals, ).\n bbox_pred (Tensor): Regression prediction of\n current stage `bbox_head`. When `self.reg_class_agnostic`\n is False, it has shape (n, num_classes * 4), otherwise\n it has shape (n, 4).\n img_meta (dict): Image meta info.\n\n Returns:\n Tensor: Regressed bboxes, the same shape as input rois.\n \"\"\"\n reg_dim = self.bbox_coder.encode_size\n if not self.reg_class_agnostic:\n label = label * reg_dim\n inds = torch.stack([label + i for i in range(reg_dim)], 1)\n bbox_pred = torch.gather(bbox_pred, 1, inds)\n assert bbox_pred.size()[1] == reg_dim\n\n max_shape = img_meta['img_shape']\n regressed_bboxes = self.bbox_coder.decode(\n priors, bbox_pred, max_shape=max_shape)\n return regressed_bboxes" }, { "identifier": "SamplingResult", "path": "mmdet/models/task_modules/samplers/sampling_result.py", "snippet": "class SamplingResult(util_mixins.NiceRepr):\n \"\"\"Bbox sampling result.\n\n Args:\n pos_inds (Tensor): Indices of positive samples.\n neg_inds (Tensor): Indices of negative samples.\n priors (Tensor): The priors can be anchors or points,\n or the bboxes predicted by the previous stage.\n gt_bboxes (Tensor): Ground truth of bboxes.\n assign_result (:obj:`AssignResult`): Assigning results.\n gt_flags (Tensor): The Ground truth flags.\n avg_factor_with_neg (bool): If True, ``avg_factor`` equal to\n the number of total priors; Otherwise, it is the number of\n positive priors. Defaults to True.\n\n Example:\n >>> # xdoctest: +IGNORE_WANT\n >>> from mmdet.models.task_modules.samplers.sampling_result import * # NOQA\n >>> self = SamplingResult.random(rng=10)\n >>> print(f'self = {self}')\n self = <SamplingResult({\n 'neg_inds': tensor([1, 2, 3, 5, 6, 7, 8,\n 9, 10, 11, 12, 13]),\n 'neg_priors': torch.Size([12, 4]),\n 'num_gts': 1,\n 'num_neg': 12,\n 'num_pos': 1,\n 'avg_factor': 13,\n 'pos_assigned_gt_inds': tensor([0]),\n 'pos_inds': tensor([0]),\n 'pos_is_gt': tensor([1], dtype=torch.uint8),\n 'pos_priors': torch.Size([1, 4])\n })>\n \"\"\"\n\n def __init__(self,\n pos_inds: Tensor,\n neg_inds: Tensor,\n priors: Tensor,\n gt_bboxes: Tensor,\n assign_result: AssignResult,\n gt_flags: Tensor,\n avg_factor_with_neg: bool = True) -> None:\n self.pos_inds = pos_inds\n self.neg_inds = neg_inds\n self.num_pos = max(pos_inds.numel(), 1)\n self.num_neg = max(neg_inds.numel(), 1)\n self.avg_factor_with_neg = avg_factor_with_neg\n self.avg_factor = self.num_pos + self.num_neg \\\n if avg_factor_with_neg else self.num_pos\n self.pos_priors = priors[pos_inds]\n self.neg_priors = priors[neg_inds]\n self.pos_is_gt = gt_flags[pos_inds]\n\n self.num_gts = gt_bboxes.shape[0]\n self.pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1\n self.pos_gt_labels = assign_result.labels[pos_inds]\n box_dim = gt_bboxes.box_dim if isinstance(gt_bboxes, BaseBoxes) else 4\n if gt_bboxes.numel() == 0:\n # hack for index error case\n assert self.pos_assigned_gt_inds.numel() == 0\n self.pos_gt_bboxes = gt_bboxes.view(-1, box_dim)\n else:\n if len(gt_bboxes.shape) < 2:\n gt_bboxes = gt_bboxes.view(-1, box_dim)\n self.pos_gt_bboxes = gt_bboxes[self.pos_assigned_gt_inds.long()]\n\n @property\n def priors(self):\n \"\"\"torch.Tensor: concatenated positive and negative priors\"\"\"\n return cat_boxes([self.pos_priors, self.neg_priors])\n\n @property\n def bboxes(self):\n \"\"\"torch.Tensor: concatenated positive and negative boxes\"\"\"\n warnings.warn('DeprecationWarning: bboxes is deprecated, '\n 'please use \"priors\" instead')\n return self.priors\n\n @property\n def pos_bboxes(self):\n warnings.warn('DeprecationWarning: pos_bboxes is deprecated, '\n 'please use \"pos_priors\" instead')\n return self.pos_priors\n\n @property\n def neg_bboxes(self):\n warnings.warn('DeprecationWarning: neg_bboxes is deprecated, '\n 'please use \"neg_priors\" instead')\n return self.neg_priors\n\n def to(self, device):\n \"\"\"Change the device of the data inplace.\n\n Example:\n >>> self = SamplingResult.random()\n >>> print(f'self = {self.to(None)}')\n >>> # xdoctest: +REQUIRES(--gpu)\n >>> print(f'self = {self.to(0)}')\n \"\"\"\n _dict = self.__dict__\n for key, value in _dict.items():\n if isinstance(value, (torch.Tensor, BaseBoxes)):\n _dict[key] = value.to(device)\n return self\n\n def __nice__(self):\n data = self.info.copy()\n data['pos_priors'] = data.pop('pos_priors').shape\n data['neg_priors'] = data.pop('neg_priors').shape\n parts = [f\"'{k}': {v!r}\" for k, v in sorted(data.items())]\n body = ' ' + ',\\n '.join(parts)\n return '{\\n' + body + '\\n}'\n\n @property\n def info(self):\n \"\"\"Returns a dictionary of info about the object.\"\"\"\n return {\n 'pos_inds': self.pos_inds,\n 'neg_inds': self.neg_inds,\n 'pos_priors': self.pos_priors,\n 'neg_priors': self.neg_priors,\n 'pos_is_gt': self.pos_is_gt,\n 'num_gts': self.num_gts,\n 'pos_assigned_gt_inds': self.pos_assigned_gt_inds,\n 'num_pos': self.num_pos,\n 'num_neg': self.num_neg,\n 'avg_factor': self.avg_factor\n }\n\n @classmethod\n def random(cls, rng=None, **kwargs):\n \"\"\"\n Args:\n rng (None | int | numpy.random.RandomState): seed or state.\n kwargs (keyword arguments):\n - num_preds: Number of predicted boxes.\n - num_gts: Number of true boxes.\n - p_ignore (float): Probability of a predicted box assigned to\n an ignored truth.\n - p_assigned (float): probability of a predicted box not being\n assigned.\n\n Returns:\n :obj:`SamplingResult`: Randomly generated sampling result.\n\n Example:\n >>> from mmdet.models.task_modules.samplers.sampling_result import * # NOQA\n >>> self = SamplingResult.random()\n >>> print(self.__dict__)\n \"\"\"\n from mmengine.structures import InstanceData\n\n from mmdet.models.task_modules.assigners import AssignResult\n from mmdet.models.task_modules.samplers import RandomSampler\n rng = ensure_rng(rng)\n\n # make probabilistic?\n num = 32\n pos_fraction = 0.5\n neg_pos_ub = -1\n\n assign_result = AssignResult.random(rng=rng, **kwargs)\n\n # Note we could just compute an assignment\n priors = random_boxes(assign_result.num_preds, rng=rng)\n gt_bboxes = random_boxes(assign_result.num_gts, rng=rng)\n gt_labels = torch.randint(\n 0, 5, (assign_result.num_gts, ), dtype=torch.long)\n\n pred_instances = InstanceData()\n pred_instances.priors = priors\n\n gt_instances = InstanceData()\n gt_instances.bboxes = gt_bboxes\n gt_instances.labels = gt_labels\n\n add_gt_as_proposals = True\n\n sampler = RandomSampler(\n num,\n pos_fraction,\n neg_pos_ub=neg_pos_ub,\n add_gt_as_proposals=add_gt_as_proposals,\n rng=rng)\n self = sampler.sample(\n assign_result=assign_result,\n pred_instances=pred_instances,\n gt_instances=gt_instances)\n return self" }, { "identifier": "empty_instances", "path": "mmdet/models/utils/misc.py", "snippet": "def empty_instances(batch_img_metas: List[dict],\n device: torch.device,\n task_type: str,\n instance_results: OptInstanceList = None,\n mask_thr_binary: Union[int, float] = 0,\n box_type: Union[str, type] = 'hbox',\n use_box_type: bool = False,\n num_classes: int = 80,\n score_per_cls: bool = False) -> List[InstanceData]:\n \"\"\"Handle predicted instances when RoI is empty.\n\n Note: If ``instance_results`` is not None, it will be modified\n in place internally, and then return ``instance_results``\n\n Args:\n batch_img_metas (list[dict]): List of image information.\n device (torch.device): Device of tensor.\n task_type (str): Expected returned task type. it currently\n supports bbox and mask.\n instance_results (list[:obj:`InstanceData`]): List of instance\n results.\n mask_thr_binary (int, float): mask binarization threshold.\n Defaults to 0.\n box_type (str or type): The empty box type. Defaults to `hbox`.\n use_box_type (bool): Whether to warp boxes with the box type.\n Defaults to False.\n num_classes (int): num_classes of bbox_head. Defaults to 80.\n score_per_cls (bool): Whether to generate classwise score for\n the empty instance. ``score_per_cls`` will be True when the model\n needs to produce raw results without nms. Defaults to False.\n\n Returns:\n list[:obj:`InstanceData`]: Detection results of each image\n \"\"\"\n assert task_type in ('bbox', 'mask'), 'Only support bbox and mask,' \\\n f' but got {task_type}'\n\n if instance_results is not None:\n assert len(instance_results) == len(batch_img_metas)\n\n results_list = []\n for img_id in range(len(batch_img_metas)):\n if instance_results is not None:\n results = instance_results[img_id]\n assert isinstance(results, InstanceData)\n else:\n results = InstanceData()\n\n if task_type == 'bbox':\n _, box_type = get_box_type(box_type)\n bboxes = torch.zeros(0, box_type.box_dim, device=device)\n if use_box_type:\n bboxes = box_type(bboxes, clone=False)\n results.bboxes = bboxes\n score_shape = (0, num_classes + 1) if score_per_cls else (0, )\n results.scores = torch.zeros(score_shape, device=device)\n results.labels = torch.zeros((0, ),\n device=device,\n dtype=torch.long)\n else:\n # TODO: Handle the case where rescale is false\n img_h, img_w = batch_img_metas[img_id]['ori_shape'][:2]\n # the type of `im_mask` will be torch.bool or torch.uint8,\n # where uint8 if for visualization and debugging.\n im_mask = torch.zeros(\n 0,\n img_h,\n img_w,\n device=device,\n dtype=torch.bool if mask_thr_binary >= 0 else torch.uint8)\n results.masks = im_mask\n results_list.append(results)\n return results_list" }, { "identifier": "MODELS", "path": "mmdet/registry.py", "snippet": "MODELS = Registry('model', parent=MMENGINE_MODELS, locations=['mmdet.models'])" }, { "identifier": "bbox_overlaps", "path": "mmdet/structures/bbox/bbox_overlaps.py", "snippet": "def bbox_overlaps(bboxes1, bboxes2, mode='iou', is_aligned=False, eps=1e-6):\n \"\"\"Calculate overlap between two set of bboxes.\n\n FP16 Contributed by https://github.com/open-mmlab/mmdetection/pull/4889\n Note:\n Assume bboxes1 is M x 4, bboxes2 is N x 4, when mode is 'iou',\n there are some new generated variable when calculating IOU\n using bbox_overlaps function:\n\n 1) is_aligned is False\n area1: M x 1\n area2: N x 1\n lt: M x N x 2\n rb: M x N x 2\n wh: M x N x 2\n overlap: M x N x 1\n union: M x N x 1\n ious: M x N x 1\n\n Total memory:\n S = (9 x N x M + N + M) * 4 Byte,\n\n When using FP16, we can reduce:\n R = (9 x N x M + N + M) * 4 / 2 Byte\n R large than (N + M) * 4 * 2 is always true when N and M >= 1.\n Obviously, N + M <= N * M < 3 * N * M, when N >=2 and M >=2,\n N + 1 < 3 * N, when N or M is 1.\n\n Given M = 40 (ground truth), N = 400000 (three anchor boxes\n in per grid, FPN, R-CNNs),\n R = 275 MB (one times)\n\n A special case (dense detection), M = 512 (ground truth),\n R = 3516 MB = 3.43 GB\n\n When the batch size is B, reduce:\n B x R\n\n Therefore, CUDA memory runs out frequently.\n\n Experiments on GeForce RTX 2080Ti (11019 MiB):\n\n | dtype | M | N | Use | Real | Ideal |\n |:----:|:----:|:----:|:----:|:----:|:----:|\n | FP32 | 512 | 400000 | 8020 MiB | -- | -- |\n | FP16 | 512 | 400000 | 4504 MiB | 3516 MiB | 3516 MiB |\n | FP32 | 40 | 400000 | 1540 MiB | -- | -- |\n | FP16 | 40 | 400000 | 1264 MiB | 276MiB | 275 MiB |\n\n 2) is_aligned is True\n area1: N x 1\n area2: N x 1\n lt: N x 2\n rb: N x 2\n wh: N x 2\n overlap: N x 1\n union: N x 1\n ious: N x 1\n\n Total memory:\n S = 11 x N * 4 Byte\n\n When using FP16, we can reduce:\n R = 11 x N * 4 / 2 Byte\n\n So do the 'giou' (large than 'iou').\n\n Time-wise, FP16 is generally faster than FP32.\n\n When gpu_assign_thr is not -1, it takes more time on cpu\n but not reduce memory.\n There, we can reduce half the memory and keep the speed.\n\n If ``is_aligned`` is ``False``, then calculate the overlaps between each\n bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned\n pair of bboxes1 and bboxes2.\n\n Args:\n bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.\n bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.\n B indicates the batch dim, in shape (B1, B2, ..., Bn).\n If ``is_aligned`` is ``True``, then m and n must be equal.\n mode (str): \"iou\" (intersection over union), \"iof\" (intersection over\n foreground) or \"giou\" (generalized intersection over union).\n Default \"iou\".\n is_aligned (bool, optional): If True, then m and n must be equal.\n Default False.\n eps (float, optional): A value added to the denominator for numerical\n stability. Default 1e-6.\n\n Returns:\n Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,)\n\n Example:\n >>> bboxes1 = torch.FloatTensor([\n >>> [0, 0, 10, 10],\n >>> [10, 10, 20, 20],\n >>> [32, 32, 38, 42],\n >>> ])\n >>> bboxes2 = torch.FloatTensor([\n >>> [0, 0, 10, 20],\n >>> [0, 10, 10, 19],\n >>> [10, 10, 20, 20],\n >>> ])\n >>> overlaps = bbox_overlaps(bboxes1, bboxes2)\n >>> assert overlaps.shape == (3, 3)\n >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True)\n >>> assert overlaps.shape == (3, )\n\n Example:\n >>> empty = torch.empty(0, 4)\n >>> nonempty = torch.FloatTensor([[0, 0, 10, 9]])\n >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)\n >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)\n >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)\n \"\"\"\n\n assert mode in ['iou', 'iof', 'giou'], f'Unsupported mode {mode}'\n # Either the boxes are empty or the length of boxes' last dimension is 4\n assert (bboxes1.size(-1) == 4 or bboxes1.size(0) == 0)\n assert (bboxes2.size(-1) == 4 or bboxes2.size(0) == 0)\n\n # Batch dim must be the same\n # Batch dim: (B1, B2, ... Bn)\n assert bboxes1.shape[:-2] == bboxes2.shape[:-2]\n batch_shape = bboxes1.shape[:-2]\n\n rows = bboxes1.size(-2)\n cols = bboxes2.size(-2)\n if is_aligned:\n assert rows == cols\n\n if rows * cols == 0:\n if is_aligned:\n return bboxes1.new(batch_shape + (rows, ))\n else:\n return bboxes1.new(batch_shape + (rows, cols))\n\n area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (\n bboxes1[..., 3] - bboxes1[..., 1])\n area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (\n bboxes2[..., 3] - bboxes2[..., 1])\n\n if is_aligned:\n lt = torch.max(bboxes1[..., :2], bboxes2[..., :2]) # [B, rows, 2]\n rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:]) # [B, rows, 2]\n\n wh = fp16_clamp(rb - lt, min=0)\n overlap = wh[..., 0] * wh[..., 1]\n\n if mode in ['iou', 'giou']:\n union = area1 + area2 - overlap\n else:\n union = area1\n if mode == 'giou':\n enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])\n enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])\n else:\n lt = torch.max(bboxes1[..., :, None, :2],\n bboxes2[..., None, :, :2]) # [B, rows, cols, 2]\n rb = torch.min(bboxes1[..., :, None, 2:],\n bboxes2[..., None, :, 2:]) # [B, rows, cols, 2]\n\n wh = fp16_clamp(rb - lt, min=0)\n overlap = wh[..., 0] * wh[..., 1]\n\n if mode in ['iou', 'giou']:\n union = area1[..., None] + area2[..., None, :] - overlap\n else:\n union = area1[..., None]\n if mode == 'giou':\n enclosed_lt = torch.min(bboxes1[..., :, None, :2],\n bboxes2[..., None, :, :2])\n enclosed_rb = torch.max(bboxes1[..., :, None, 2:],\n bboxes2[..., None, :, 2:])\n\n eps = union.new_tensor([eps])\n union = torch.max(union, eps)\n ious = overlap / union\n if mode in ['iou', 'iof']:\n return ious\n # calculate gious\n enclose_wh = fp16_clamp(enclosed_rb - enclosed_lt, min=0)\n enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]\n enclose_area = torch.max(enclose_area, eps)\n gious = ious - (enclose_area - union) / enclose_area\n return gious" } ]
from typing import List, Optional, Tuple, Union from mmcv.cnn import ConvModule from mmengine.config import ConfigDict from mmengine.structures import InstanceData from torch import Tensor, nn from mmdet.models.roi_heads.bbox_heads.bbox_head import BBoxHead from mmdet.models.task_modules.samplers import SamplingResult from mmdet.models.utils import empty_instances from mmdet.registry import MODELS from mmdet.structures.bbox import bbox_overlaps import numpy as np import torch import torch.nn.functional as F
15,729
bbox_pred: Tensor, img_meta: dict, rescale: bool = False, rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None Returns: :obj:`InstanceData`: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cls_score = cls_score.reshape(-1, self.num_classes + 1) bbox_pred = bbox_pred.reshape(-1, 4) roi = roi.repeat_interleave(self.num_instance, dim=0) results = InstanceData() if roi.shape[0] == 0: return empty_instances([img_meta], roi.device, task_type='bbox', instance_results=[results])[0] scores = cls_score.softmax(dim=-1) if cls_score is not None else None img_shape = img_meta['img_shape'] bboxes = self.bbox_coder.decode( roi[..., 1:], bbox_pred, max_shape=img_shape) if rescale and bboxes.size(0) > 0: assert img_meta.get('scale_factor') is not None scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if rcnn_test_cfg is None: # This means that it is aug test. # It needs to return the raw results without nms. results.bboxes = bboxes results.scores = scores else: roi_idx = np.tile( np.arange(bboxes.shape[0] / self.num_instance)[:, None], (1, self.num_instance)).reshape(-1, 1)[:, 0] roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape( -1, 1) bboxes = torch.cat([bboxes, roi_idx], dim=1) det_bboxes, det_scores = self.set_nms( bboxes, scores[:, 1], rcnn_test_cfg.score_thr, rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img) results.bboxes = det_bboxes[:, :-1] results.scores = det_scores results.labels = torch.zeros_like(det_scores) return results @staticmethod def set_nms(bboxes: Tensor, scores: Tensor, score_thr: float, iou_threshold: float, max_num: int = -1) -> Tuple[Tensor, Tensor]: """NMS for multi-instance prediction. Please refer to https://github.com/Purkialo/CrowdDet for more details. Args: bboxes (Tensor): predict bboxes. scores (Tensor): The score of each predict bbox. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_threshold (float): IoU threshold to be considered as conflicted. max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. Returns: Tuple[Tensor, Tensor]: (bboxes, scores). """ bboxes = bboxes[scores > score_thr] scores = scores[scores > score_thr] ordered_scores, order = scores.sort(descending=True) ordered_bboxes = bboxes[order] roi_idx = ordered_bboxes[:, -1] keep = torch.ones(len(ordered_bboxes)) == 1 ruler = torch.arange(len(ordered_bboxes)) keep = keep.to(bboxes.device) ruler = ruler.to(bboxes.device) while ruler.shape[0] > 0: basement = ruler[0] ruler = ruler[1:] idx = roi_idx[basement] # calculate the body overlap basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4) ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4)
# Copyright (c) OpenMMLab. All rights reserved. @MODELS.register_module() class MultiInstanceBBoxHead(BBoxHead): r"""Bbox head used in CrowdDet. .. code-block:: none /-> cls convs_1 -> cls fcs_1 -> cls_1 |-- | \-> reg convs_1 -> reg fcs_1 -> reg_1 | | /-> cls convs_2 -> cls fcs_2 -> cls_2 shared convs -> shared fcs |-- | \-> reg convs_2 -> reg fcs_2 -> reg_2 | | ... | | /-> cls convs_k -> cls fcs_k -> cls_k |-- \-> reg convs_k -> reg fcs_k -> reg_k Args: num_instance (int): The number of branches after shared fcs. Defaults to 2. with_refine (bool): Whether to use refine module. Defaults to False. num_shared_convs (int): The number of shared convs. Defaults to 0. num_shared_fcs (int): The number of shared fcs. Defaults to 2. num_cls_convs (int): The number of cls convs. Defaults to 0. num_cls_fcs (int): The number of cls fcs. Defaults to 0. num_reg_convs (int): The number of reg convs. Defaults to 0. num_reg_fcs (int): The number of reg fcs. Defaults to 0. conv_out_channels (int): The number of conv out channels. Defaults to 256. fc_out_channels (int): The number of fc out channels. Defaults to 1024. init_cfg (dict or list[dict], optional): Initialization config dict. Defaults to None. """ # noqa: W605 def __init__(self, num_instance: int = 2, with_refine: bool = False, num_shared_convs: int = 0, num_shared_fcs: int = 2, num_cls_convs: int = 0, num_cls_fcs: int = 0, num_reg_convs: int = 0, num_reg_fcs: int = 0, conv_out_channels: int = 256, fc_out_channels: int = 1024, init_cfg: Optional[Union[dict, ConfigDict]] = None, *args, **kwargs) -> None: super().__init__(*args, init_cfg=init_cfg, **kwargs) assert (num_shared_convs + num_shared_fcs + num_cls_convs + num_cls_fcs + num_reg_convs + num_reg_fcs > 0) assert num_instance == 2, 'Currently only 2 instances are supported' if num_cls_convs > 0 or num_reg_convs > 0: assert num_shared_fcs == 0 if not self.with_cls: assert num_cls_convs == 0 and num_cls_fcs == 0 if not self.with_reg: assert num_reg_convs == 0 and num_reg_fcs == 0 self.num_instance = num_instance self.num_shared_convs = num_shared_convs self.num_shared_fcs = num_shared_fcs self.num_cls_convs = num_cls_convs self.num_cls_fcs = num_cls_fcs self.num_reg_convs = num_reg_convs self.num_reg_fcs = num_reg_fcs self.conv_out_channels = conv_out_channels self.fc_out_channels = fc_out_channels self.with_refine = with_refine # add shared convs and fcs self.shared_convs, self.shared_fcs, last_layer_dim = \ self._add_conv_fc_branch( self.num_shared_convs, self.num_shared_fcs, self.in_channels, True) self.shared_out_channels = last_layer_dim self.relu = nn.ReLU(inplace=True) if self.with_refine: refine_model_cfg = { 'type': 'Linear', 'in_features': self.shared_out_channels + 20, 'out_features': self.shared_out_channels } self.shared_fcs_ref = MODELS.build(refine_model_cfg) self.fc_cls_ref = nn.ModuleList() self.fc_reg_ref = nn.ModuleList() self.cls_convs = nn.ModuleList() self.cls_fcs = nn.ModuleList() self.reg_convs = nn.ModuleList() self.reg_fcs = nn.ModuleList() self.cls_last_dim = list() self.reg_last_dim = list() self.fc_cls = nn.ModuleList() self.fc_reg = nn.ModuleList() for k in range(self.num_instance): # add cls specific branch cls_convs, cls_fcs, cls_last_dim = self._add_conv_fc_branch( self.num_cls_convs, self.num_cls_fcs, self.shared_out_channels) self.cls_convs.append(cls_convs) self.cls_fcs.append(cls_fcs) self.cls_last_dim.append(cls_last_dim) # add reg specific branch reg_convs, reg_fcs, reg_last_dim = self._add_conv_fc_branch( self.num_reg_convs, self.num_reg_fcs, self.shared_out_channels) self.reg_convs.append(reg_convs) self.reg_fcs.append(reg_fcs) self.reg_last_dim.append(reg_last_dim) if self.num_shared_fcs == 0 and not self.with_avg_pool: if self.num_cls_fcs == 0: self.cls_last_dim *= self.roi_feat_area if self.num_reg_fcs == 0: self.reg_last_dim *= self.roi_feat_area if self.with_cls: if self.custom_cls_channels: cls_channels = self.loss_cls.get_cls_channels( self.num_classes) else: cls_channels = self.num_classes + 1 cls_predictor_cfg_ = self.cls_predictor_cfg.copy() # deepcopy cls_predictor_cfg_.update( in_features=self.cls_last_dim[k], out_features=cls_channels) self.fc_cls.append(MODELS.build(cls_predictor_cfg_)) if self.with_refine: self.fc_cls_ref.append(MODELS.build(cls_predictor_cfg_)) if self.with_reg: out_dim_reg = (4 if self.reg_class_agnostic else 4 * self.num_classes) reg_predictor_cfg_ = self.reg_predictor_cfg.copy() reg_predictor_cfg_.update( in_features=self.reg_last_dim[k], out_features=out_dim_reg) self.fc_reg.append(MODELS.build(reg_predictor_cfg_)) if self.with_refine: self.fc_reg_ref.append(MODELS.build(reg_predictor_cfg_)) if init_cfg is None: # when init_cfg is None, # It has been set to # [[dict(type='Normal', std=0.01, override=dict(name='fc_cls'))], # [dict(type='Normal', std=0.001, override=dict(name='fc_reg'))] # after `super(ConvFCBBoxHead, self).__init__()` # we only need to append additional configuration # for `shared_fcs`, `cls_fcs` and `reg_fcs` self.init_cfg += [ dict( type='Xavier', distribution='uniform', override=[ dict(name='shared_fcs'), dict(name='cls_fcs'), dict(name='reg_fcs') ]) ] def _add_conv_fc_branch(self, num_branch_convs: int, num_branch_fcs: int, in_channels: int, is_shared: bool = False) -> tuple: """Add shared or separable branch. convs -> avg pool (optional) -> fcs """ last_layer_dim = in_channels # add branch specific conv layers branch_convs = nn.ModuleList() if num_branch_convs > 0: for i in range(num_branch_convs): conv_in_channels = ( last_layer_dim if i == 0 else self.conv_out_channels) branch_convs.append( ConvModule( conv_in_channels, self.conv_out_channels, 3, padding=1)) last_layer_dim = self.conv_out_channels # add branch specific fc layers branch_fcs = nn.ModuleList() if num_branch_fcs > 0: # for shared branch, only consider self.with_avg_pool # for separated branches, also consider self.num_shared_fcs if (is_shared or self.num_shared_fcs == 0) and not self.with_avg_pool: last_layer_dim *= self.roi_feat_area for i in range(num_branch_fcs): fc_in_channels = ( last_layer_dim if i == 0 else self.fc_out_channels) branch_fcs.append( nn.Linear(fc_in_channels, self.fc_out_channels)) last_layer_dim = self.fc_out_channels return branch_convs, branch_fcs, last_layer_dim def forward(self, x: Tuple[Tensor]) -> tuple: """Forward features from the upstream network. Args: x (tuple[Tensor]): Features from the upstream network, each is a 4D-tensor. Returns: tuple: A tuple of classification scores and bbox prediction. - cls_score (Tensor): Classification scores for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * num_classes. - bbox_pred (Tensor): Box energies / deltas for all scale levels, each is a 4D-tensor, the channels number is num_base_priors * 4. - cls_score_ref (Tensor): The cls_score after refine model. - bbox_pred_ref (Tensor): The bbox_pred after refine model. """ # shared part if self.num_shared_convs > 0: for conv in self.shared_convs: x = conv(x) if self.num_shared_fcs > 0: if self.with_avg_pool: x = self.avg_pool(x) x = x.flatten(1) for fc in self.shared_fcs: x = self.relu(fc(x)) x_cls = x x_reg = x # separate branches cls_score = list() bbox_pred = list() for k in range(self.num_instance): for conv in self.cls_convs[k]: x_cls = conv(x_cls) if x_cls.dim() > 2: if self.with_avg_pool: x_cls = self.avg_pool(x_cls) x_cls = x_cls.flatten(1) for fc in self.cls_fcs[k]: x_cls = self.relu(fc(x_cls)) for conv in self.reg_convs[k]: x_reg = conv(x_reg) if x_reg.dim() > 2: if self.with_avg_pool: x_reg = self.avg_pool(x_reg) x_reg = x_reg.flatten(1) for fc in self.reg_fcs[k]: x_reg = self.relu(fc(x_reg)) cls_score.append(self.fc_cls[k](x_cls) if self.with_cls else None) bbox_pred.append(self.fc_reg[k](x_reg) if self.with_reg else None) if self.with_refine: x_ref = x cls_score_ref = list() bbox_pred_ref = list() for k in range(self.num_instance): feat_ref = cls_score[k].softmax(dim=-1) feat_ref = torch.cat((bbox_pred[k], feat_ref[:, 1][:, None]), dim=1).repeat(1, 4) feat_ref = torch.cat((x_ref, feat_ref), dim=1) feat_ref = F.relu_(self.shared_fcs_ref(feat_ref)) cls_score_ref.append(self.fc_cls_ref[k](feat_ref)) bbox_pred_ref.append(self.fc_reg_ref[k](feat_ref)) cls_score = torch.cat(cls_score, dim=1) bbox_pred = torch.cat(bbox_pred, dim=1) cls_score_ref = torch.cat(cls_score_ref, dim=1) bbox_pred_ref = torch.cat(bbox_pred_ref, dim=1) return cls_score, bbox_pred, cls_score_ref, bbox_pred_ref cls_score = torch.cat(cls_score, dim=1) bbox_pred = torch.cat(bbox_pred, dim=1) return cls_score, bbox_pred def get_targets(self, sampling_results: List[SamplingResult], rcnn_train_cfg: ConfigDict, concat: bool = True) -> tuple: """Calculate the ground truth for all samples in a batch according to the sampling_results. Almost the same as the implementation in bbox_head, we passed additional parameters pos_inds_list and neg_inds_list to `_get_targets_single` function. Args: sampling_results (List[obj:SamplingResult]): Assign results of all images in a batch after sampling. rcnn_train_cfg (obj:ConfigDict): `train_cfg` of RCNN. concat (bool): Whether to concatenate the results of all the images in a single batch. Returns: Tuple[Tensor]: Ground truth for proposals in a single image. Containing the following list of Tensors: - labels (list[Tensor],Tensor): Gt_labels for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - label_weights (list[Tensor]): Labels_weights for all proposals in a batch, each tensor in list has shape (num_proposals,) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals,). - bbox_targets (list[Tensor],Tensor): Regression target for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. - bbox_weights (list[tensor],Tensor): Regression weights for all proposals in a batch, each tensor in list has shape (num_proposals, 4) when `concat=False`, otherwise just a single tensor has shape (num_all_proposals, 4). """ labels = [] bbox_targets = [] bbox_weights = [] label_weights = [] for i in range(len(sampling_results)): sample_bboxes = torch.cat([ sampling_results[i].pos_gt_bboxes, sampling_results[i].neg_gt_bboxes ]) sample_priors = sampling_results[i].priors sample_priors = sample_priors.repeat(1, self.num_instance).reshape( -1, 4) sample_bboxes = sample_bboxes.reshape(-1, 4) if not self.reg_decoded_bbox: _bbox_targets = self.bbox_coder.encode(sample_priors, sample_bboxes) else: _bbox_targets = sample_priors _bbox_targets = _bbox_targets.reshape(-1, self.num_instance * 4) _bbox_weights = torch.ones(_bbox_targets.shape) _labels = torch.cat([ sampling_results[i].pos_gt_labels, sampling_results[i].neg_gt_labels ]) _labels_weights = torch.ones(_labels.shape) bbox_targets.append(_bbox_targets) bbox_weights.append(_bbox_weights) labels.append(_labels) label_weights.append(_labels_weights) if concat: labels = torch.cat(labels, 0) label_weights = torch.cat(label_weights, 0) bbox_targets = torch.cat(bbox_targets, 0) bbox_weights = torch.cat(bbox_weights, 0) return labels, label_weights, bbox_targets, bbox_weights def loss(self, cls_score: Tensor, bbox_pred: Tensor, rois: Tensor, labels: Tensor, label_weights: Tensor, bbox_targets: Tensor, bbox_weights: Tensor, **kwargs) -> dict: """Calculate the loss based on the network predictions and targets. Args: cls_score (Tensor): Classification prediction results of all class, has shape (batch_size * num_proposals_single_image, (num_classes + 1) * k), k represents the number of prediction boxes generated by each proposal box. bbox_pred (Tensor): Regression prediction results, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. rois (Tensor): RoIs with the shape (batch_size * num_proposals_single_image, 5) where the first column indicates batch id of each RoI. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). label_weights (Tensor): Labels_weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). bbox_targets (Tensor): Regression target for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. bbox_weights (Tensor): Regression weights for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k). Returns: dict: A dictionary of loss. """ losses = dict() if bbox_pred.numel(): loss_0 = self.emd_loss(bbox_pred[:, 0:4], cls_score[:, 0:2], bbox_pred[:, 4:8], cls_score[:, 2:4], bbox_targets, labels) loss_1 = self.emd_loss(bbox_pred[:, 4:8], cls_score[:, 2:4], bbox_pred[:, 0:4], cls_score[:, 0:2], bbox_targets, labels) loss = torch.cat([loss_0, loss_1], dim=1) _, min_indices = loss.min(dim=1) loss_emd = loss[torch.arange(loss.shape[0]), min_indices] loss_emd = loss_emd.mean() else: loss_emd = bbox_pred.sum() losses['loss_rcnn_emd'] = loss_emd return losses def emd_loss(self, bbox_pred_0: Tensor, cls_score_0: Tensor, bbox_pred_1: Tensor, cls_score_1: Tensor, targets: Tensor, labels: Tensor) -> Tensor: """Calculate the emd loss. Note: This implementation is modified from https://github.com/Purkialo/ CrowdDet/blob/master/lib/det_oprs/loss_opr.py Args: bbox_pred_0 (Tensor): Part of regression prediction results, has shape (batch_size * num_proposals_single_image, 4), the last dimension 4 represents [tl_x, tl_y, br_x, br_y]. cls_score_0 (Tensor): Part of classification prediction results, has shape (batch_size * num_proposals_single_image, (num_classes + 1)), where 1 represents the background. bbox_pred_1 (Tensor): The other part of regression prediction results, has shape (batch_size*num_proposals_single_image, 4). cls_score_1 (Tensor):The other part of classification prediction results, has shape (batch_size * num_proposals_single_image, (num_classes + 1)). targets (Tensor):Regression target for all proposals in a batch, has shape (batch_size * num_proposals_single_image, 4 * k), the last dimension 4 represents [tl_x, tl_y, br_x, br_y], k represents the number of prediction boxes generated by each proposal box. labels (Tensor): Gt_labels for all proposals in a batch, has shape (batch_size * num_proposals_single_image, k). Returns: torch.Tensor: The calculated loss. """ bbox_pred = torch.cat([bbox_pred_0, bbox_pred_1], dim=1).reshape(-1, bbox_pred_0.shape[-1]) cls_score = torch.cat([cls_score_0, cls_score_1], dim=1).reshape(-1, cls_score_0.shape[-1]) targets = targets.reshape(-1, 4) labels = labels.long().flatten() # masks valid_masks = labels >= 0 fg_masks = labels > 0 # multiple class bbox_pred = bbox_pred.reshape(-1, self.num_classes, 4) fg_gt_classes = labels[fg_masks] bbox_pred = bbox_pred[fg_masks, fg_gt_classes - 1, :] # loss for regression loss_bbox = self.loss_bbox(bbox_pred, targets[fg_masks]) loss_bbox = loss_bbox.sum(dim=1) # loss for classification labels = labels * valid_masks loss_cls = self.loss_cls(cls_score, labels) loss_cls[fg_masks] = loss_cls[fg_masks] + loss_bbox loss = loss_cls.reshape(-1, 2).sum(dim=1) return loss.reshape(-1, 1) def _predict_by_feat_single( self, roi: Tensor, cls_score: Tensor, bbox_pred: Tensor, img_meta: dict, rescale: bool = False, rcnn_test_cfg: Optional[ConfigDict] = None) -> InstanceData: """Transform a single image's features extracted from the head into bbox results. Args: roi (Tensor): Boxes to be transformed. Has shape (num_boxes, 5). last dimension 5 arrange as (batch_index, x1, y1, x2, y2). cls_score (Tensor): Box scores, has shape (num_boxes, num_classes + 1). bbox_pred (Tensor): Box energies / deltas. has shape (num_boxes, num_classes * 4). img_meta (dict): image information. rescale (bool): If True, return boxes in original image space. Defaults to False. rcnn_test_cfg (obj:`ConfigDict`): `test_cfg` of Bbox Head. Defaults to None Returns: :obj:`InstanceData`: Detection results of each image. Each item usually contains following keys. - scores (Tensor): Classification scores, has a shape (num_instance, ) - labels (Tensor): Labels of bboxes, has a shape (num_instances, ). - bboxes (Tensor): Has a shape (num_instances, 4), the last dimension 4 arrange as (x1, y1, x2, y2). """ cls_score = cls_score.reshape(-1, self.num_classes + 1) bbox_pred = bbox_pred.reshape(-1, 4) roi = roi.repeat_interleave(self.num_instance, dim=0) results = InstanceData() if roi.shape[0] == 0: return empty_instances([img_meta], roi.device, task_type='bbox', instance_results=[results])[0] scores = cls_score.softmax(dim=-1) if cls_score is not None else None img_shape = img_meta['img_shape'] bboxes = self.bbox_coder.decode( roi[..., 1:], bbox_pred, max_shape=img_shape) if rescale and bboxes.size(0) > 0: assert img_meta.get('scale_factor') is not None scale_factor = bboxes.new_tensor(img_meta['scale_factor']).repeat( (1, 2)) bboxes = (bboxes.view(bboxes.size(0), -1, 4) / scale_factor).view( bboxes.size()[0], -1) if rcnn_test_cfg is None: # This means that it is aug test. # It needs to return the raw results without nms. results.bboxes = bboxes results.scores = scores else: roi_idx = np.tile( np.arange(bboxes.shape[0] / self.num_instance)[:, None], (1, self.num_instance)).reshape(-1, 1)[:, 0] roi_idx = torch.from_numpy(roi_idx).to(bboxes.device).reshape( -1, 1) bboxes = torch.cat([bboxes, roi_idx], dim=1) det_bboxes, det_scores = self.set_nms( bboxes, scores[:, 1], rcnn_test_cfg.score_thr, rcnn_test_cfg.nms['iou_threshold'], rcnn_test_cfg.max_per_img) results.bboxes = det_bboxes[:, :-1] results.scores = det_scores results.labels = torch.zeros_like(det_scores) return results @staticmethod def set_nms(bboxes: Tensor, scores: Tensor, score_thr: float, iou_threshold: float, max_num: int = -1) -> Tuple[Tensor, Tensor]: """NMS for multi-instance prediction. Please refer to https://github.com/Purkialo/CrowdDet for more details. Args: bboxes (Tensor): predict bboxes. scores (Tensor): The score of each predict bbox. score_thr (float): bbox threshold, bboxes with scores lower than it will not be considered. iou_threshold (float): IoU threshold to be considered as conflicted. max_num (int, optional): if there are more than max_num bboxes after NMS, only top max_num will be kept. Default to -1. Returns: Tuple[Tensor, Tensor]: (bboxes, scores). """ bboxes = bboxes[scores > score_thr] scores = scores[scores > score_thr] ordered_scores, order = scores.sort(descending=True) ordered_bboxes = bboxes[order] roi_idx = ordered_bboxes[:, -1] keep = torch.ones(len(ordered_bboxes)) == 1 ruler = torch.arange(len(ordered_bboxes)) keep = keep.to(bboxes.device) ruler = ruler.to(bboxes.device) while ruler.shape[0] > 0: basement = ruler[0] ruler = ruler[1:] idx = roi_idx[basement] # calculate the body overlap basement_bbox = ordered_bboxes[:, :4][basement].reshape(-1, 4) ruler_bbox = ordered_bboxes[:, :4][ruler].reshape(-1, 4)
overlap = bbox_overlaps(basement_bbox, ruler_bbox)
4
2023-12-23 08:36:47+00:00
24k
see2023/Bert-VITS2-ext
train_ms.py
[ { "identifier": "config", "path": "config.py", "snippet": "class Resample_config:\nclass Preprocess_text_config:\nclass Bert_gen_config:\nclass Emo_gen_config:\nclass Train_ms_config:\nclass Webui_config:\nclass Server_config:\nclass Translate_config:\nclass Config:\n def __init__(self, in_dir: str, out_dir: str, sampling_rate: int = 44100):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n transcription_path: str,\n cleaned_path: str,\n train_path: str,\n val_path: str,\n config_path: str,\n val_per_lang: int = 5,\n max_val_total: int = 10000,\n clean: bool = True,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n num_processes: int = 2,\n device: str = \"cuda\",\n use_multi_device: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n config_path: str,\n env: Dict[str, any],\n base: Dict[str, any],\n model: str,\n num_workers: int,\n spec_cache: bool,\n keep_ckpts: int,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self,\n device: str,\n model: str,\n v_model: str,\n config_path: str,\n language_identification_library: str,\n port: int = 7860,\n share: bool = False,\n debug: bool = False,\n ):\n def from_dict(cls, dataset_path: str, data: Dict[str, any]):\n def __init__(\n self, models: List[Dict[str, any]], port: int = 5000, device: str = \"cuda\"\n ):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, app_key: str, secret_key: str):\n def from_dict(cls, data: Dict[str, any]):\n def __init__(self, config_path: str):" }, { "identifier": "TextAudioSpeakerLoader", "path": "data_utils.py", "snippet": "class TextAudioSpeakerLoader(torch.utils.data.Dataset):\n \"\"\"\n 1) loads audio, speaker_id, text pairs\n 2) normalizes text and converts them to sequences of integers\n 3) computes spectrograms from audio files.\n \"\"\"\n\n def __init__(self, audiopaths_sid_text, hparams):\n self.audiopaths_sid_text = load_filepaths_and_text(audiopaths_sid_text)\n self.max_wav_value = hparams.max_wav_value\n self.sampling_rate = hparams.sampling_rate\n self.filter_length = hparams.filter_length\n self.hop_length = hparams.hop_length\n self.win_length = hparams.win_length\n self.sampling_rate = hparams.sampling_rate\n self.spk_map = hparams.spk2id\n self.hparams = hparams\n\n self.use_mel_spec_posterior = getattr(\n hparams, \"use_mel_posterior_encoder\", False\n )\n if self.use_mel_spec_posterior:\n self.n_mel_channels = getattr(hparams, \"n_mel_channels\", 80)\n\n self.cleaned_text = getattr(hparams, \"cleaned_text\", False)\n\n self.add_blank = hparams.add_blank\n self.min_text_len = getattr(hparams, \"min_text_len\", 1)\n self.max_text_len = getattr(hparams, \"max_text_len\", 384)\n\n random.seed(1234)\n random.shuffle(self.audiopaths_sid_text)\n self._filter()\n\n def _filter(self):\n \"\"\"\n Filter text & store spec lengths\n \"\"\"\n # Store spectrogram lengths for Bucketing\n # wav_length ~= file_size / (wav_channels * Bytes per dim) = file_size / (1 * 2)\n # spec_length = wav_length // hop_length\n\n audiopaths_sid_text_new = []\n lengths = []\n skipped = 0\n logger.info(\"Init dataset...\")\n for _id, spk, language, text, phones, tone, word2ph in tqdm(\n self.audiopaths_sid_text\n ):\n audiopath = f\"{_id}\"\n if self.min_text_len <= len(phones) and len(phones) <= self.max_text_len:\n phones = phones.split(\" \")\n tone = [int(i) for i in tone.split(\" \")]\n word2ph = [int(i) for i in word2ph.split(\" \")]\n audiopaths_sid_text_new.append(\n [audiopath, spk, language, text, phones, tone, word2ph]\n )\n lengths.append(os.path.getsize(audiopath) // (2 * self.hop_length))\n else:\n skipped += 1\n logger.info(\n \"skipped: \"\n + str(skipped)\n + \", total: \"\n + str(len(self.audiopaths_sid_text))\n )\n self.audiopaths_sid_text = audiopaths_sid_text_new\n self.lengths = lengths\n\n def get_audio_text_speaker_pair(self, audiopath_sid_text):\n # separate filename, speaker_id and text\n audiopath, sid, language, text, phones, tone, word2ph = audiopath_sid_text\n\n bert, ja_bert, en_bert, phones, tone, language = self.get_text(\n text, word2ph, phones, tone, language, audiopath\n )\n\n spec, wav = self.get_audio(audiopath)\n sid = torch.LongTensor([int(self.spk_map[sid])])\n\n return (phones, spec, wav, sid, tone, language, bert, ja_bert, en_bert)\n\n def get_audio(self, filename):\n audio_norm, sampling_rate = torchaudio.load(filename, frame_offset=0, num_frames=-1, normalize=True, channels_first=True)\n '''\n # from https://github.com/YYuX-1145/Bert-VITS2-Integration-package\n audio, sampling_rate = load_wav_to_torch(filename)\n if sampling_rate != self.sampling_rate:\n raise ValueError(\n \"{} {} SR doesn't match target {} SR\".format(\n filename, sampling_rate, self.sampling_rate\n )\n )\n audio_norm = audio / self.max_wav_value\n audio_norm = audio_norm.unsqueeze(0)\n '''\n spec_filename = filename.replace(\".wav\", \".spec.pt\")\n if self.use_mel_spec_posterior:\n spec_filename = spec_filename.replace(\".spec.pt\", \".mel.pt\")\n try:\n spec = torch.load(spec_filename)\n except:\n if self.use_mel_spec_posterior:\n spec = mel_spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.n_mel_channels,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n self.hparams.mel_fmin,\n self.hparams.mel_fmax,\n center=False,\n )\n else:\n spec = spectrogram_torch(\n audio_norm,\n self.filter_length,\n self.sampling_rate,\n self.hop_length,\n self.win_length,\n center=False,\n )\n spec = torch.squeeze(spec, 0)\n if config.train_ms_config.spec_cache:\n torch.save(spec, spec_filename)\n return spec, audio_norm\n\n def get_text(self, text, word2ph, phone, tone, language_str, wav_path):\n phone, tone, language = cleaned_text_to_sequence(phone, tone, language_str)\n if self.add_blank:\n phone = commons.intersperse(phone, 0)\n tone = commons.intersperse(tone, 0)\n language = commons.intersperse(language, 0)\n for i in range(len(word2ph)):\n word2ph[i] = word2ph[i] * 2\n word2ph[0] += 1\n bert_path = wav_path.replace(\".wav\", \".bert.pt\")\n try:\n bert_ori = torch.load(bert_path)\n assert bert_ori.shape[-1] == len(phone)\n except Exception as e:\n logger.warning(\"Bert load Failed\")\n logger.warning(e)\n\n if language_str == \"ZH\":\n bert = bert_ori\n ja_bert = torch.randn(1024, len(phone))\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"JP\":\n bert = torch.randn(1024, len(phone))\n ja_bert = bert_ori\n en_bert = torch.randn(1024, len(phone))\n elif language_str == \"EN\":\n bert = torch.randn(1024, len(phone))\n ja_bert = torch.randn(1024, len(phone))\n en_bert = bert_ori\n phone = torch.LongTensor(phone)\n tone = torch.LongTensor(tone)\n language = torch.LongTensor(language)\n return bert, ja_bert, en_bert, phone, tone, language\n\n def get_sid(self, sid):\n sid = torch.LongTensor([int(sid)])\n return sid\n\n def __getitem__(self, index):\n return self.get_audio_text_speaker_pair(self.audiopaths_sid_text[index])\n\n def __len__(self):\n return len(self.audiopaths_sid_text)" }, { "identifier": "TextAudioSpeakerCollate", "path": "data_utils.py", "snippet": "class TextAudioSpeakerCollate:\n \"\"\"Zero-pads model inputs and targets\"\"\"\n\n def __init__(self, return_ids=False):\n self.return_ids = return_ids\n\n def __call__(self, batch):\n \"\"\"Collate's training batch from normalized text, audio and speaker identities\n PARAMS\n ------\n batch: [text_normalized, spec_normalized, wav_normalized, sid]\n \"\"\"\n # Right zero-pad all one-hot text sequences to max input length\n _, ids_sorted_decreasing = torch.sort(\n torch.LongTensor([x[1].size(1) for x in batch]), dim=0, descending=True\n )\n\n max_text_len = max([len(x[0]) for x in batch])\n max_spec_len = max([x[1].size(1) for x in batch])\n max_wav_len = max([x[2].size(1) for x in batch])\n\n text_lengths = torch.LongTensor(len(batch))\n spec_lengths = torch.LongTensor(len(batch))\n wav_lengths = torch.LongTensor(len(batch))\n sid = torch.LongTensor(len(batch))\n\n text_padded = torch.LongTensor(len(batch), max_text_len)\n tone_padded = torch.LongTensor(len(batch), max_text_len)\n language_padded = torch.LongTensor(len(batch), max_text_len)\n bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n ja_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n en_bert_padded = torch.FloatTensor(len(batch), 1024, max_text_len)\n\n spec_padded = torch.FloatTensor(len(batch), batch[0][1].size(0), max_spec_len)\n wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len)\n text_padded.zero_()\n tone_padded.zero_()\n language_padded.zero_()\n spec_padded.zero_()\n wav_padded.zero_()\n bert_padded.zero_()\n ja_bert_padded.zero_()\n en_bert_padded.zero_()\n\n for i in range(len(ids_sorted_decreasing)):\n row = batch[ids_sorted_decreasing[i]]\n\n text = row[0]\n text_padded[i, : text.size(0)] = text\n text_lengths[i] = text.size(0)\n\n spec = row[1]\n spec_padded[i, :, : spec.size(1)] = spec\n spec_lengths[i] = spec.size(1)\n\n wav = row[2]\n wav_padded[i, :, : wav.size(1)] = wav\n wav_lengths[i] = wav.size(1)\n\n sid[i] = row[3]\n\n tone = row[4]\n tone_padded[i, : tone.size(0)] = tone\n\n language = row[5]\n language_padded[i, : language.size(0)] = language\n\n bert = row[6]\n bert_padded[i, :, : bert.size(1)] = bert\n\n ja_bert = row[7]\n ja_bert_padded[i, :, : ja_bert.size(1)] = ja_bert\n\n en_bert = row[8]\n en_bert_padded[i, :, : en_bert.size(1)] = en_bert\n\n return (\n text_padded,\n text_lengths,\n spec_padded,\n spec_lengths,\n wav_padded,\n wav_lengths,\n sid,\n tone_padded,\n language_padded,\n bert_padded,\n ja_bert_padded,\n en_bert_padded,\n )" }, { "identifier": "DistributedBucketSampler", "path": "data_utils.py", "snippet": "class DistributedBucketSampler(torch.utils.data.distributed.DistributedSampler):\n \"\"\"\n Maintain similar input lengths in a batch.\n Length groups are specified by boundaries.\n Ex) boundaries = [b1, b2, b3] -> any batch is included either {x | b1 < length(x) <=b2} or {x | b2 < length(x) <= b3}.\n\n It removes samples which are not included in the boundaries.\n Ex) boundaries = [b1, b2, b3] -> any x s.t. length(x) <= b1 or length(x) > b3 are discarded.\n \"\"\"\n\n def __init__(\n self,\n dataset,\n batch_size,\n boundaries,\n num_replicas=None,\n rank=None,\n shuffle=True,\n ):\n super().__init__(dataset, num_replicas=num_replicas, rank=rank, shuffle=shuffle)\n self.lengths = dataset.lengths\n self.batch_size = batch_size\n self.boundaries = boundaries\n\n self.buckets, self.num_samples_per_bucket = self._create_buckets()\n self.total_size = sum(self.num_samples_per_bucket)\n self.num_samples = self.total_size // self.num_replicas\n\n def _create_buckets(self):\n buckets = [[] for _ in range(len(self.boundaries) - 1)]\n for i in range(len(self.lengths)):\n length = self.lengths[i]\n idx_bucket = self._bisect(length)\n if idx_bucket != -1:\n buckets[idx_bucket].append(i)\n\n try:\n for i in range(len(buckets) - 1, 0, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n assert all(len(bucket) > 0 for bucket in buckets)\n # When one bucket is not traversed\n except Exception as e:\n print(\"Bucket warning \", e)\n for i in range(len(buckets) - 1, -1, -1):\n if len(buckets[i]) == 0:\n buckets.pop(i)\n self.boundaries.pop(i + 1)\n\n num_samples_per_bucket = []\n for i in range(len(buckets)):\n len_bucket = len(buckets[i])\n total_batch_size = self.num_replicas * self.batch_size\n rem = (\n total_batch_size - (len_bucket % total_batch_size)\n ) % total_batch_size\n num_samples_per_bucket.append(len_bucket + rem)\n return buckets, num_samples_per_bucket\n\n def __iter__(self):\n # deterministically shuffle based on epoch\n g = torch.Generator()\n g.manual_seed(self.epoch)\n\n indices = []\n if self.shuffle:\n for bucket in self.buckets:\n indices.append(torch.randperm(len(bucket), generator=g).tolist())\n else:\n for bucket in self.buckets:\n indices.append(list(range(len(bucket))))\n\n batches = []\n for i in range(len(self.buckets)):\n bucket = self.buckets[i]\n len_bucket = len(bucket)\n if len_bucket == 0:\n continue\n ids_bucket = indices[i]\n num_samples_bucket = self.num_samples_per_bucket[i]\n\n # add extra samples to make it evenly divisible\n rem = num_samples_bucket - len_bucket\n ids_bucket = (\n ids_bucket\n + ids_bucket * (rem // len_bucket)\n + ids_bucket[: (rem % len_bucket)]\n )\n\n # subsample\n ids_bucket = ids_bucket[self.rank :: self.num_replicas]\n\n # batching\n for j in range(len(ids_bucket) // self.batch_size):\n batch = [\n bucket[idx]\n for idx in ids_bucket[\n j * self.batch_size : (j + 1) * self.batch_size\n ]\n ]\n batches.append(batch)\n\n if self.shuffle:\n batch_ids = torch.randperm(len(batches), generator=g).tolist()\n batches = [batches[i] for i in batch_ids]\n self.batches = batches\n\n assert len(self.batches) * self.batch_size == self.num_samples\n return iter(self.batches)\n\n def _bisect(self, x, lo=0, hi=None):\n if hi is None:\n hi = len(self.boundaries) - 1\n\n if hi > lo:\n mid = (hi + lo) // 2\n if self.boundaries[mid] < x and x <= self.boundaries[mid + 1]:\n return mid\n elif x <= self.boundaries[mid]:\n return self._bisect(x, lo, mid)\n else:\n return self._bisect(x, mid + 1, hi)\n else:\n return -1\n\n def __len__(self):\n return self.num_samples // self.batch_size" }, { "identifier": "AudioVisemesLoader", "path": "data_utils.py", "snippet": "class AudioVisemesLoader(torch.utils.data.Dataset):\n \"\"\"\n loads audio, visemes torch variable pairs from visemes list file .\n file is like: \n ./records/date_time.z.npy|./records/date_time.npy\n \"\"\"\n \n def __init__(self, audio_visemes_list_file, hparams):\n self.audio_visemes_list_items = load_filepaths_and_text(audio_visemes_list_file)\n print('audio_visemes_list_items: ', len(self.audio_visemes_list_items))\n random.seed(1234)\n random.shuffle(self.audio_visemes_list_items)\n self.max_visemes_len = 1210\n self.min_visemes_len = 1190\n self._filter()\n\n\n def _filter(self):\n # check if the file exists, and can parse as torch tensor\n audio_visemes_list_items_new = []\n for audio_file, visemes_file in self.audio_visemes_list_items:\n if os.path.exists(audio_file) and os.path.exists(visemes_file):\n # check using torch.load\n try:\n audio = torch.load(audio_file)\n visemes = np.load(visemes_file)\n if visemes.shape[0] < self.min_visemes_len:\n print('drop this data: --------- visemes.shape[0] < self.min_visemes_len: ', visemes.shape[0], visemes_file)\n continue\n audio_visemes_list_items_new.append([audio_file, visemes_file])\n except Exception as e:\n print('error: ', audio_file, visemes_file)\n print(e)\n self.audio_visemes_list_items = audio_visemes_list_items_new\n print('audio_visemes_list_items after filter: ', len(self.audio_visemes_list_items))\n\n def __getitem__(self, index):\n # read these two torch.tensor\n audio_file, visemes_file = self.audio_visemes_list_items[index]\n audio_z = torch.load(audio_file).squeeze(0).detach()\n # [192, seq_len(1722)]\n\n visemes = np.load(visemes_file)\n visemes = torch.from_numpy(visemes)\n #[seq_len(1194), 61]\n visemes = visemes.transpose(0, 1)\n #[61, seq_len(1194)]\n if visemes.shape[1] > self.max_visemes_len:\n # cut the extra part\n # print('__getitem__ 1 cut visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n visemes = visemes[:, :self.max_visemes_len]\n elif visemes.shape[1] < self.max_visemes_len:\n # padding to max_visemes_len with last frame\n # print('__getitem__ 2 padding visemes from ', visemes.shape[0], ' to ', self.max_visemes_len, 'file: ', visemes_file)\n # last_frame = visemes[-1]\n # visemes = np.concatenate([visemes, np.tile(last_frame, (self.max_visemes_len - visemes.shape[0], 1))], axis=0)\n # visemes = torch.from_numpy(visemes)\n pass\n\n visemes_offset = 0.08 # 将visemes延迟n s\n visemes_offset_frames = int(visemes_offset * const_map.ARKIT_FPS)\n visemes = visemes[:, visemes_offset_frames:]\n\n audio_z_offset = 0.0\n audio_z_offset_frames = int(audio_z_offset * const_map.Z_FPS)\n audio_z = audio_z[:, audio_z_offset_frames:]\n\n # 获取二者的时长,将过长的一方多的部分丢弃\n visemes_duration = visemes.shape[1] / const_map.ARKIT_FPS\n audio_z_duration = audio_z.shape[1] / const_map.Z_FPS\n if visemes_duration > audio_z_duration:\n visemes = visemes[:, :int(audio_z_duration * const_map.ARKIT_FPS)]\n elif visemes_duration < audio_z_duration:\n audio_z = audio_z[:, :int(visemes_duration * const_map.Z_FPS)]\n\n\n # print('__getitem__ 3 audio.shape: ', audio.shape, 'visemes.shape: ', visemes.shape,'file: ', visemes_file)\n return audio_z, visemes\n\n def __len__(self):\n return len(self.audio_visemes_list_items)" }, { "identifier": "SynthesizerTrn", "path": "models.py", "snippet": "class SynthesizerTrn(nn.Module):\n \"\"\"\n Synthesizer for Training\n \"\"\"\n\n def __init__(\n self,\n n_vocab,\n spec_channels,\n segment_size,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n n_speakers=256,\n gin_channels=256,\n use_sdp=True,\n n_flow_layer=4,\n n_layers_trans_flow=4,\n flow_share_parameter=False,\n use_transformer_flow=True,\n **kwargs\n ):\n super().__init__()\n self.n_vocab = n_vocab\n self.spec_channels = spec_channels\n self.inter_channels = inter_channels\n self.hidden_channels = hidden_channels\n self.filter_channels = filter_channels\n self.n_heads = n_heads\n self.n_layers = n_layers\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.resblock = resblock\n self.resblock_kernel_sizes = resblock_kernel_sizes\n self.resblock_dilation_sizes = resblock_dilation_sizes\n self.upsample_rates = upsample_rates\n self.upsample_initial_channel = upsample_initial_channel\n self.upsample_kernel_sizes = upsample_kernel_sizes\n self.segment_size = segment_size\n self.n_speakers = n_speakers\n self.gin_channels = gin_channels\n self.n_layers_trans_flow = n_layers_trans_flow\n self.use_spk_conditioned_encoder = kwargs.get(\n \"use_spk_conditioned_encoder\", True\n )\n self.use_sdp = use_sdp\n self.use_noise_scaled_mas = kwargs.get(\"use_noise_scaled_mas\", False)\n self.mas_noise_scale_initial = kwargs.get(\"mas_noise_scale_initial\", 0.01)\n self.noise_scale_delta = kwargs.get(\"noise_scale_delta\", 2e-6)\n self.current_mas_noise_scale = self.mas_noise_scale_initial\n if self.use_spk_conditioned_encoder and gin_channels > 0:\n self.enc_gin_channels = gin_channels\n self.enc_p = TextEncoder(\n n_vocab,\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers,\n kernel_size,\n p_dropout,\n gin_channels=self.enc_gin_channels,\n )\n self.dec = Generator(\n inter_channels,\n resblock,\n resblock_kernel_sizes,\n resblock_dilation_sizes,\n upsample_rates,\n upsample_initial_channel,\n upsample_kernel_sizes,\n gin_channels=gin_channels,\n )\n self.enc_q = PosteriorEncoder(\n spec_channels,\n inter_channels,\n hidden_channels,\n 5,\n 1,\n 16,\n gin_channels=gin_channels,\n )\n if use_transformer_flow:\n self.flow = TransformerCouplingBlock(\n inter_channels,\n hidden_channels,\n filter_channels,\n n_heads,\n n_layers_trans_flow,\n 5,\n p_dropout,\n n_flow_layer,\n gin_channels=gin_channels,\n share_parameter=flow_share_parameter,\n )\n else:\n self.flow = ResidualCouplingBlock(\n inter_channels,\n hidden_channels,\n 5,\n 1,\n n_flow_layer,\n gin_channels=gin_channels,\n )\n self.sdp = StochasticDurationPredictor(\n hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels\n )\n self.dp = DurationPredictor(\n hidden_channels, 256, 3, 0.5, gin_channels=gin_channels\n )\n\n if n_speakers >= 1:\n self.emb_g = nn.Embedding(n_speakers, gin_channels)\n else:\n self.ref_enc = ReferenceEncoder(spec_channels, gin_channels)\n\n def forward(\n self,\n x,\n x_lengths,\n y,\n y_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n ):\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g)\n z_p = self.flow(z, y_mask, g=g)\n\n with torch.no_grad():\n # negative cross-entropy\n s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t]\n neg_cent1 = torch.sum(\n -0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent2 = torch.matmul(\n -0.5 * (z_p**2).transpose(1, 2), s_p_sq_r\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent3 = torch.matmul(\n z_p.transpose(1, 2), (m_p * s_p_sq_r)\n ) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s]\n neg_cent4 = torch.sum(\n -0.5 * (m_p**2) * s_p_sq_r, [1], keepdim=True\n ) # [b, 1, t_s]\n neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4\n if self.use_noise_scaled_mas:\n epsilon = (\n torch.std(neg_cent)\n * torch.randn_like(neg_cent)\n * self.current_mas_noise_scale\n )\n neg_cent = neg_cent + epsilon\n\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = (\n monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1))\n .unsqueeze(1)\n .detach()\n )\n\n w = attn.sum(2)\n\n l_length_sdp = self.sdp(x, x_mask, w, g=g)\n l_length_sdp = l_length_sdp / torch.sum(x_mask)\n\n logw_ = torch.log(w + 1e-6) * x_mask\n logw = self.dp(x, x_mask, g=g)\n logw_sdp = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=1.0)\n l_length_dp = torch.sum((logw - logw_) ** 2, [1, 2]) / torch.sum(\n x_mask\n ) # for averaging\n l_length_sdp += torch.sum((logw_sdp - logw_) ** 2, [1, 2]) / torch.sum(x_mask)\n\n l_length = l_length_dp + l_length_sdp\n\n # expand prior\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2)\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2)\n\n z_slice, ids_slice = commons.rand_slice_segments(\n z, y_lengths, self.segment_size\n )\n o = self.dec(z_slice, g=g)\n return (\n o,\n l_length,\n attn,\n ids_slice,\n x_mask,\n y_mask,\n (z, z_p, m_p, logs_p, m_q, logs_q),\n (x, logw, logw_, logw_sdp),\n g,\n )\n\n def infer(\n self,\n x,\n x_lengths,\n sid,\n tone,\n language,\n bert,\n ja_bert,\n en_bert,\n noise_scale=0.667,\n length_scale=1,\n noise_scale_w=0.8,\n max_len=None,\n sdp_ratio=0,\n y=None,\n ):\n # x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths, tone, language, bert)\n # g = self.gst(y)\n if self.n_speakers > 0:\n g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1]\n else:\n g = self.ref_enc(y.transpose(1, 2)).unsqueeze(-1)\n x, m_p, logs_p, x_mask = self.enc_p(\n x, x_lengths, tone, language, bert, ja_bert, en_bert, g=g\n )\n logw = self.sdp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) * (\n sdp_ratio\n ) + self.dp(x, x_mask, g=g) * (1 - sdp_ratio)\n w = torch.exp(logw) * x_mask * length_scale\n w_ceil = torch.ceil(w)\n y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long()\n y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(\n x_mask.dtype\n )\n attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1)\n attn = commons.generate_path(w_ceil, attn_mask)\n\n m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(\n 1, 2\n ) # [b, t', t], [b, t, d] -> [b, d, t']\n\n z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale\n z = self.flow(z_p, y_mask, g=g, reverse=True)\n o = self.dec((z * y_mask)[:, :, :max_len], g=g)\n return o, attn, y_mask, (z, z_p, m_p, logs_p)\n\n def get_post_enc_dec(self):\n return self.enc_q, self.dec" }, { "identifier": "MultiPeriodDiscriminator", "path": "models.py", "snippet": "class MultiPeriodDiscriminator(torch.nn.Module):\n def __init__(self, use_spectral_norm=False):\n super(MultiPeriodDiscriminator, self).__init__()\n periods = [2, 3, 5, 7, 11]\n\n discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)]\n discs = discs + [\n DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods\n ]\n self.discriminators = nn.ModuleList(discs)\n\n def forward(self, y, y_hat):\n y_d_rs = []\n y_d_gs = []\n fmap_rs = []\n fmap_gs = []\n for i, d in enumerate(self.discriminators):\n y_d_r, fmap_r = d(y)\n y_d_g, fmap_g = d(y_hat)\n y_d_rs.append(y_d_r)\n y_d_gs.append(y_d_g)\n fmap_rs.append(fmap_r)\n fmap_gs.append(fmap_g)\n\n return y_d_rs, y_d_gs, fmap_rs, fmap_gs" }, { "identifier": "DurationDiscriminator", "path": "models.py", "snippet": "class DurationDiscriminator(nn.Module): # vits2\n def __init__(\n self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0\n ):\n super().__init__()\n\n self.in_channels = in_channels\n self.filter_channels = filter_channels\n self.kernel_size = kernel_size\n self.p_dropout = p_dropout\n self.gin_channels = gin_channels\n\n self.drop = nn.Dropout(p_dropout)\n self.conv_1 = nn.Conv1d(\n in_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_1 = modules.LayerNorm(filter_channels)\n self.conv_2 = nn.Conv1d(\n filter_channels, filter_channels, kernel_size, padding=kernel_size // 2\n )\n self.norm_2 = modules.LayerNorm(filter_channels)\n self.dur_proj = nn.Conv1d(1, filter_channels, 1)\n\n self.LSTM = nn.LSTM(\n 2 * filter_channels, filter_channels, batch_first=True, bidirectional=True\n )\n\n if gin_channels != 0:\n self.cond = nn.Conv1d(gin_channels, in_channels, 1)\n\n self.output_layer = nn.Sequential(\n nn.Linear(2 * filter_channels, 1), nn.Sigmoid()\n )\n\n def forward_probability(self, x, dur):\n dur = self.dur_proj(dur)\n x = torch.cat([x, dur], dim=1)\n x = x.transpose(1, 2)\n x, _ = self.LSTM(x)\n output_prob = self.output_layer(x)\n return output_prob\n\n def forward(self, x, x_mask, dur_r, dur_hat, g=None):\n x = torch.detach(x)\n if g is not None:\n g = torch.detach(g)\n x = x + self.cond(g)\n x = self.conv_1(x * x_mask)\n x = torch.relu(x)\n x = self.norm_1(x)\n x = self.drop(x)\n x = self.conv_2(x * x_mask)\n x = torch.relu(x)\n x = self.norm_2(x)\n x = self.drop(x)\n\n output_probs = []\n for dur in [dur_r, dur_hat]:\n output_prob = self.forward_probability(x, dur)\n output_probs.append(output_prob)\n\n return output_probs" }, { "identifier": "WavLMDiscriminator", "path": "models.py", "snippet": "class WavLMDiscriminator(nn.Module):\n \"\"\"docstring for Discriminator.\"\"\"\n\n def __init__(\n self, slm_hidden=768, slm_layers=13, initial_channel=64, use_spectral_norm=False\n ):\n super(WavLMDiscriminator, self).__init__()\n norm_f = weight_norm if use_spectral_norm == False else spectral_norm\n self.pre = norm_f(\n Conv1d(slm_hidden * slm_layers, initial_channel, 1, 1, padding=0)\n )\n\n self.convs = nn.ModuleList(\n [\n norm_f(\n nn.Conv1d(\n initial_channel, initial_channel * 2, kernel_size=5, padding=2\n )\n ),\n norm_f(\n nn.Conv1d(\n initial_channel * 2,\n initial_channel * 4,\n kernel_size=5,\n padding=2,\n )\n ),\n norm_f(\n nn.Conv1d(initial_channel * 4, initial_channel * 4, 5, 1, padding=2)\n ),\n ]\n )\n\n self.conv_post = norm_f(Conv1d(initial_channel * 4, 1, 3, 1, padding=1))\n\n def forward(self, x):\n x = self.pre(x)\n\n fmap = []\n for l in self.convs:\n x = l(x)\n x = F.leaky_relu(x, modules.LRELU_SLOPE)\n fmap.append(x)\n x = self.conv_post(x)\n x = torch.flatten(x, 1, -1)\n\n return x" }, { "identifier": "VisemesNet", "path": "models.py", "snippet": "class VisemesNet(nn.Module):\n def active(self, x):\n # active_fun: 0: null, 1: tanh, 2: relu, 3: LeakyReLU\n if self.active_fun == 1:\n return torch.tanh(x)\n elif self.active_fun == 2:\n return torch.relu(x)\n elif self.active_fun == 3:\n return self.leakyReLU(x)\n else:\n return x\n\n def __init__(self, hidden_channels, lstm_bidirectional=True, active_fun = 3, enable_conv=True, \n use_transformer = False, enable_dropout=True):\n super(VisemesNet, self).__init__()\n self.lstm_bidirectional = lstm_bidirectional\n self.lstm_directions = 2 if lstm_bidirectional else 1\n self.use_transformer = use_transformer\n self.enable_dropout = enable_dropout\n if active_fun == 3:\n self.leakyReLU = nn.LeakyReLU(negative_slope=0.01)\n if use_transformer:\n num_heads=8\n num_layers=3\n dim_feedforward=512\n dropout=0.1\n activation=\"relu\"\n self.transformer_encoder_layer = nn.TransformerEncoderLayer(\n d_model=hidden_channels, \n nhead=num_heads,\n dim_feedforward=dim_feedforward,\n dropout=dropout,\n activation=activation,\n batch_first=True\n )\n self.transformer_encoder = nn.TransformerEncoder(self.transformer_encoder_layer, num_layers=num_layers)\n else:\n self.lstm = nn.LSTM(input_size=hidden_channels, hidden_size=128, num_layers=3, batch_first=True, bidirectional=lstm_bidirectional)\n if use_transformer:\n self.fc1 = nn.Linear(hidden_channels, 96)\n else:\n self.fc1 = nn.Linear(128 * self.lstm_directions, 96)\n self.fc2 = nn.Linear(96, 61)\n dropout_rate = 0.5\n if self.enable_dropout:\n self.dropout = nn.Dropout(dropout_rate)\n conv_kernel_pre = 15\n conv_kernel_post = 11\n self.conv1d_pre = nn.Conv1d(in_channels=hidden_channels, out_channels=hidden_channels, kernel_size=conv_kernel_pre, stride=1, padding=conv_kernel_pre//2)\n self.conv1d_post = nn.Conv1d(in_channels=61, out_channels=61, kernel_size=conv_kernel_post, stride=1, padding=conv_kernel_post//2)\n self.enable_conv = enable_conv\n self.active_fun = active_fun\n\n def forward(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.use_transformer:\n return self.forward_transformer(x, y)\n else:\n return self.forward_lstm(x, y)\n\n def forward_transformer(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n # batch_first: True (batch, seq, feature); False (seq, batch, feature).\n x = x.transpose(1, 2)\n\n expressions = self.transformer_encoder(x)\n \n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n # expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n\n return expressions \n\n def forward_lstm(self, x, y=None):\n # x [batch_size, hidden_channels, seq_len]\n if self.enable_conv:\n x = self.conv1d_pre(x)\n x = x.transpose(1, 2)\n # x [batch_size, seq_len, hidden_channels]\n expressions = None\n expressions, _ = self.lstm(x)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc1(expressions)\n expressions = self.active(expressions)\n if self.enable_dropout:\n expressions = self.dropout(expressions)\n expressions = self.fc2(expressions)\n\n expressions = expressions.transpose(1, 2)\n if self.enable_conv:\n expressions = self.conv1d_post(expressions)\n return expressions\n \n def init_weights(self):\n # 初始化权重\n for m in self.modules():\n if isinstance(m, nn.Linear):\n nn.init.xavier_uniform_(m.weight.data)\n if m.bias is not None:\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.LSTM):\n for name, param in m.named_parameters():\n if 'weight_ih' in name:\n nn.init.xavier_uniform_(param.data)\n elif 'weight_hh' in name:\n nn.init.orthogonal_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.BatchNorm1d):\n nn.init.constant_(m.weight.data, 1)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.Conv1d):\n nn.init.xavier_uniform_(m.weight.data)\n nn.init.constant_(m.bias.data, 0)\n elif isinstance(m, nn.TransformerEncoderLayer):\n for name, param in m.named_parameters():\n if 'weight' in name:\n if param.dim() == 1:\n nn.init.normal_(param.data)\n else:\n nn.init.xavier_uniform_(param.data)\n elif 'bias' in name:\n nn.init.constant_(param.data, 0)\n elif isinstance(m, nn.TransformerEncoder):\n for param in m.parameters():\n if param.dim() > 1:\n nn.init.xavier_uniform_(param.data)\n else:\n nn.init.constant_(param.data, 0)" }, { "identifier": "generator_loss", "path": "losses.py", "snippet": "def generator_loss(disc_outputs):\n loss = 0\n gen_losses = []\n for dg in disc_outputs:\n dg = dg.float()\n l = torch.mean((1 - dg) ** 2)\n gen_losses.append(l)\n loss += l\n\n return loss, gen_losses" }, { "identifier": "discriminator_loss", "path": "losses.py", "snippet": "def discriminator_loss(disc_real_outputs, disc_generated_outputs):\n loss = 0\n r_losses = []\n g_losses = []\n for dr, dg in zip(disc_real_outputs, disc_generated_outputs):\n dr = dr.float()\n dg = dg.float()\n r_loss = torch.mean((1 - dr) ** 2)\n g_loss = torch.mean(dg**2)\n loss += r_loss + g_loss\n r_losses.append(r_loss.item())\n g_losses.append(g_loss.item())\n\n return loss, r_losses, g_losses" }, { "identifier": "feature_loss", "path": "losses.py", "snippet": "def feature_loss(fmap_r, fmap_g):\n loss = 0\n for dr, dg in zip(fmap_r, fmap_g):\n for rl, gl in zip(dr, dg):\n rl = rl.float().detach()\n gl = gl.float()\n loss += torch.mean(torch.abs(rl - gl))\n\n return loss * 2" }, { "identifier": "kl_loss", "path": "losses.py", "snippet": "def kl_loss(z_p, logs_q, m_p, logs_p, z_mask):\n \"\"\"\n z_p, logs_q: [b, h, t_t]\n m_p, logs_p: [b, h, t_t]\n \"\"\"\n z_p = z_p.float()\n logs_q = logs_q.float()\n m_p = m_p.float()\n logs_p = logs_p.float()\n z_mask = z_mask.float()\n\n kl = logs_p - logs_q - 0.5\n kl += 0.5 * ((z_p - m_p) ** 2) * torch.exp(-2.0 * logs_p)\n kl = torch.sum(kl * z_mask)\n l = kl / torch.sum(z_mask)\n return l" }, { "identifier": "WavLMLoss", "path": "losses.py", "snippet": "class WavLMLoss(torch.nn.Module):\n def __init__(self, model, wd, model_sr, slm_sr=16000):\n super(WavLMLoss, self).__init__()\n self.wavlm = AutoModel.from_pretrained(model)\n self.wd = wd\n self.resample = torchaudio.transforms.Resample(model_sr, slm_sr)\n self.wavlm.eval()\n for param in self.wavlm.parameters():\n param.requires_grad = False\n\n def forward(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16.squeeze(), output_hidden_states=True\n ).hidden_states\n\n floss = 0\n for er, eg in zip(wav_embeddings, y_rec_embeddings):\n floss += torch.mean(torch.abs(er - eg))\n\n return floss.mean()\n\n def generator(self, y_rec):\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_df_hat_g = self.wd(y_rec_embeddings)\n loss_gen = torch.mean((1 - y_df_hat_g) ** 2)\n\n return loss_gen\n\n def discriminator(self, wav, y_rec):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_rec_16 = self.resample(y_rec)\n y_rec_embeddings = self.wavlm(\n input_values=y_rec_16, output_hidden_states=True\n ).hidden_states\n\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n y_rec_embeddings = (\n torch.stack(y_rec_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n y_d_gs = self.wd(y_rec_embeddings)\n\n y_df_hat_r, y_df_hat_g = y_d_rs, y_d_gs\n\n r_loss = torch.mean((1 - y_df_hat_r) ** 2)\n g_loss = torch.mean((y_df_hat_g) ** 2)\n\n loss_disc_f = r_loss + g_loss\n\n return loss_disc_f.mean()\n\n def discriminator_forward(self, wav):\n with torch.no_grad():\n wav_16 = self.resample(wav)\n wav_embeddings = self.wavlm(\n input_values=wav_16, output_hidden_states=True\n ).hidden_states\n y_embeddings = (\n torch.stack(wav_embeddings, dim=1)\n .transpose(-1, -2)\n .flatten(start_dim=1, end_dim=2)\n )\n\n y_d_rs = self.wd(y_embeddings)\n\n return y_d_rs" }, { "identifier": "mel_spectrogram_torch", "path": "mel_processing.py", "snippet": "def mel_spectrogram_torch(\n y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False\n):\n if torch.min(y) < -1.0:\n print(\"min value is \", torch.min(y))\n if torch.max(y) > 1.0:\n print(\"max value is \", torch.max(y))\n\n global mel_basis, hann_window\n dtype_device = str(y.dtype) + \"_\" + str(y.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n wnsize_dtype_device = str(win_size) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=y.dtype, device=y.device\n )\n if wnsize_dtype_device not in hann_window:\n hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(\n dtype=y.dtype, device=y.device\n )\n\n y = torch.nn.functional.pad(\n y.unsqueeze(1),\n (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)),\n mode=\"reflect\",\n )\n y = y.squeeze(1)\n\n spec = torch.stft(\n y,\n n_fft,\n hop_length=hop_size,\n win_length=win_size,\n window=hann_window[wnsize_dtype_device],\n center=center,\n pad_mode=\"reflect\",\n normalized=False,\n onesided=True,\n return_complex=False,\n )\n\n spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6)\n\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n\n return spec" }, { "identifier": "spec_to_mel_torch", "path": "mel_processing.py", "snippet": "def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax):\n global mel_basis\n dtype_device = str(spec.dtype) + \"_\" + str(spec.device)\n fmax_dtype_device = str(fmax) + \"_\" + dtype_device\n if fmax_dtype_device not in mel_basis:\n mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax)\n mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(\n dtype=spec.dtype, device=spec.device\n )\n spec = torch.matmul(mel_basis[fmax_dtype_device], spec)\n spec = spectral_normalize_torch(spec)\n return spec" }, { "identifier": "symbols", "path": "text/symbols.py", "snippet": "" } ]
import platform import os import torch import torch.distributed as dist import logging import argparse import datetime import gc import commons import utils from torch.nn import functional as F from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from torch.nn.parallel import DistributedDataParallel as DDP from torch.cuda.amp import autocast, GradScaler from tqdm import tqdm from config import config from data_utils import ( TextAudioSpeakerLoader, TextAudioSpeakerCollate, DistributedBucketSampler, AudioVisemesLoader, ) from models import ( SynthesizerTrn, MultiPeriodDiscriminator, DurationDiscriminator, WavLMDiscriminator, VisemesNet, ) from losses import ( generator_loss, discriminator_loss, feature_loss, kl_loss, WavLMLoss, ) from mel_processing import mel_spectrogram_torch, spec_to_mel_torch from text.symbols import symbols
15,278
) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() scheduler_wd.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc, net_wd, wl = nets optim_g, optim_d, optim_dur_disc, optim_wd = optims scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() net_wd.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length )
# flake8: noqa: E402 logging.getLogger("numba").setLevel(logging.WARNING) logger = logging.getLogger(__name__) torch.backends.cuda.matmul.allow_tf32 = True torch.backends.cudnn.allow_tf32 = ( True # If encontered training problem,please try to disable TF32. ) torch.set_float32_matmul_precision("medium") torch.backends.cuda.sdp_kernel("flash") torch.backends.cuda.enable_flash_sdp(True) torch.backends.cuda.enable_mem_efficient_sdp( True ) # Not available if torch version is lower than 2.0 global_step = 0 global_visemes_step = 0 def run_only_visemes(hps): # 使用最简单的单机模式,仅训练隐变量z到表情(visemes)的全连接 VisemesFCNet 的参数 global global_visemes_step torch.manual_seed(hps.train.seed) torch.cuda.set_device(0) train_dataset = AudioVisemesLoader(hps.data.training_visemes_files, hps.data) train_loader = DataLoader(train_dataset, num_workers=0, shuffle=False, pin_memory=True, batch_size=1, drop_last=True) eval_dataset = AudioVisemesLoader(hps.data.validation_visemes_files, hps.data) eval_loader = DataLoader(eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False) net_v = VisemesNet(hps.model.hidden_channels).cuda() latest_model_path = utils.latest_checkpoint_path(hps.model_dir, "V_*.pth") if latest_model_path is not None: _, optim_d, _, epoch_str = utils.load_checkpoint(latest_model_path, net_v, None, skip_optimizer=False) else : epoch_str = 1 global_visemes_step = 0 net_v.init_weights() optim_v = torch.optim.AdamW( net_v.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps) optim_v.param_groups[0]['initial_lr'] = hps.train.learning_rate scheduler_v = torch.optim.lr_scheduler.ExponentialLR(optim_v, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2, ) scaler = GradScaler(enabled=hps.train.bf16_run) for epoch in range(epoch_str, hps.train.epochs + 1): train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler) scheduler_v.step() if epoch % hps.train.eval_interval == 0: eval_visemes_only(epoch, hps, net_v, eval_loader) utils.save_checkpoint(net_v, optim_v,hps.train.learning_rate , epoch, os.path.join(hps.model_dir, "V_{}.pth".format(epoch))) def train_visemes_only(epoch, hps, net_v, train_loader, optim_v, scaler): for batch_idx, (spec, visemes) in tqdm(enumerate(train_loader)): spec, visemes = spec.cuda(), visemes.cuda() with autocast(enabled=hps.train.bf16_run): # 通过VisemesNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) optim_v.zero_grad() scaler.scale(visemes_hat_mse).backward() scaler.unscale_(optim_v) grad_norm_v = commons.clip_grad_value_(net_v.parameters(), None) scaler.step(optim_v) global global_visemes_step global_visemes_step += 1 if batch_idx % hps.train.log_interval == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tvisemes_hat_mse: {:.6f}\tgrad_norm_v: {:.6f}'.format( epoch, batch_idx * len(spec), len(train_loader.dataset), 100. * batch_idx / len(train_loader), visemes_hat_mse.item(), grad_norm_v)) def get_visemes_mse(visemes, visemes_hat): if visemes.shape[-1] != visemes_hat.shape[-1]: # 如果y和x的最低维度不一样 visemes_hat = F.interpolate(visemes_hat, size=visemes.shape[-1], mode='linear', align_corners=True) # 对x进行线性插值,使其形状与y一致 visemes_hat_mse = torch.mean(torch.pow(visemes_hat - visemes, 2)) return visemes_hat_mse def eval_visemes_only(epoch, hps, net_v, eval_loader): net_v.eval() with torch.no_grad(): visemes_hat_mse_sum = 0.0 for batch_idx, (spec, visemes) in tqdm(enumerate(eval_loader)): spec, visemes = spec.cuda(), visemes.cuda() # 通过VisemesFCNet从z生成visemes_hat,和均方差 visemes_hat = net_v(spec) visemes_hat_mse = get_visemes_mse(visemes, visemes_hat) visemes_hat_mse_sum += visemes_hat_mse # print('visemes_hat_mse', visemes_hat_mse) break visemes_hat_mse_avg = visemes_hat_mse_sum / (batch_idx + 1) log_str = '------------------ eval epoch: {} visemes_hat_mse_avg: {:.6f}'.format(epoch, visemes_hat_mse_avg) print(log_str) logger.warning(log_str) net_v.train() def run(): # 环境变量解析 envs = config.train_ms_config.env for env_name, env_value in envs.items(): if env_name not in os.environ.keys(): print("加载config中的配置{}".format(str(env_value))) os.environ[env_name] = str(env_value) print( "加载环境变量 \nMASTER_ADDR: {},\nMASTER_PORT: {},\nWORLD_SIZE: {},\nRANK: {},\nLOCAL_RANK: {}".format( os.environ["MASTER_ADDR"], os.environ["MASTER_PORT"], os.environ["WORLD_SIZE"], os.environ["RANK"], os.environ["LOCAL_RANK"], ) ) backend = "nccl" if platform.system() == "Windows": backend = "gloo" # If Windows,switch to gloo backend. dist.init_process_group( backend=backend, init_method="env://", timeout=datetime.timedelta(seconds=300), ) # Use torchrun instead of mp.spawn rank = dist.get_rank() local_rank = int(os.environ["LOCAL_RANK"]) n_gpus = dist.get_world_size() # 命令行/config.yml配置解析 # hps = utils.get_hparams() parser = argparse.ArgumentParser() # 非必要不建议使用命令行配置,请使用config.yml文件 parser.add_argument( "-c", "--config", type=str, default=config.train_ms_config.config_path, help="JSON file for configuration", ) parser.add_argument( "-m", "--model", type=str, help="数据集文件夹路径,请注意,数据不再默认放在/logs文件夹下。如果需要用命令行配置,请声明相对于根目录的路径", default=config.dataset_path, ) parser.add_argument('--visemes', dest='visemes', action="store_true", default=False, help="train visemes only, lock the encoder and decoder") args = parser.parse_args() model_dir = os.path.join(args.model, config.train_ms_config.model) if not os.path.exists(model_dir): os.makedirs(model_dir) hps = utils.get_hparams_from_file(args.config) hps.model_dir = model_dir set_logger(hps) if args.visemes: run_only_visemes(hps) # 比较路径是否相同 if os.path.realpath(args.config) != os.path.realpath( config.train_ms_config.config_path ): with open(args.config, "r", encoding="utf-8") as f: data = f.read() with open(config.train_ms_config.config_path, "w", encoding="utf-8") as f: f.write(data) torch.manual_seed(hps.train.seed) torch.cuda.set_device(local_rank) global global_step if rank == 0: logger = utils.get_logger(hps.model_dir) logger.info(hps) utils.check_git_hash(hps.model_dir) writer = SummaryWriter(log_dir=hps.model_dir) writer_eval = SummaryWriter(log_dir=os.path.join(hps.model_dir, "eval")) train_dataset = TextAudioSpeakerLoader(hps.data.training_files, hps.data) train_sampler = DistributedBucketSampler( train_dataset, hps.train.batch_size, [32, 300, 400, 500, 600, 700, 800, 900, 1000], num_replicas=n_gpus, rank=rank, shuffle=True, ) collate_fn = TextAudioSpeakerCollate() train_loader = DataLoader( train_dataset, num_workers=min(config.train_ms_config.num_workers, os.cpu_count() - 1), shuffle=False, pin_memory=True, collate_fn=collate_fn, batch_sampler=train_sampler, persistent_workers=True, prefetch_factor=4, ) # DataLoader config could be adjusted. if rank == 0: eval_dataset = TextAudioSpeakerLoader(hps.data.validation_files, hps.data) eval_loader = DataLoader( eval_dataset, num_workers=0, shuffle=False, batch_size=1, pin_memory=True, drop_last=False, collate_fn=collate_fn, ) if ( "use_noise_scaled_mas" in hps.model.keys() and hps.model.use_noise_scaled_mas is True ): print("Using noise scaled MAS for VITS2") mas_noise_scale_initial = 0.01 noise_scale_delta = 2e-6 else: print("Using normal MAS for VITS1") mas_noise_scale_initial = 0.0 noise_scale_delta = 0.0 if ( "use_duration_discriminator" in hps.model.keys() and hps.model.use_duration_discriminator is True ): print("Using duration discriminator for VITS2") net_dur_disc = DurationDiscriminator( hps.model.hidden_channels, hps.model.hidden_channels, 3, 0.1, gin_channels=hps.model.gin_channels if hps.data.n_speakers != 0 else 0, ).cuda(local_rank) else: net_dur_disc = None if ( "use_spk_conditioned_encoder" in hps.model.keys() and hps.model.use_spk_conditioned_encoder is True ): if hps.data.n_speakers == 0: raise ValueError( "n_speakers must be > 0 when using spk conditioned encoder to train multi-speaker model" ) else: print("Using normal encoder for VITS1") net_g = SynthesizerTrn( len(symbols), hps.data.filter_length // 2 + 1, hps.train.segment_size // hps.data.hop_length, n_speakers=hps.data.n_speakers, mas_noise_scale_initial=mas_noise_scale_initial, noise_scale_delta=noise_scale_delta, **hps.model, ).cuda(local_rank) if getattr(hps.train, "freeze_ZH_bert", False): print("Freezing ZH bert encoder !!!") for param in net_g.enc_p.bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_EN_bert", False): print("Freezing EN bert encoder !!!") for param in net_g.enc_p.en_bert_proj.parameters(): param.requires_grad = False if getattr(hps.train, "freeze_JP_bert", False): print("Freezing JP bert encoder !!!") for param in net_g.enc_p.ja_bert_proj.parameters(): param.requires_grad = False net_d = MultiPeriodDiscriminator(hps.model.use_spectral_norm).cuda(local_rank) net_wd = WavLMDiscriminator( hps.model.slm.hidden, hps.model.slm.nlayers, hps.model.slm.initial_channel ).cuda(local_rank) optim_g = torch.optim.AdamW( filter(lambda p: p.requires_grad, net_g.parameters()), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_d = torch.optim.AdamW( net_d.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) optim_wd = torch.optim.AdamW( net_wd.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) if net_dur_disc is not None: optim_dur_disc = torch.optim.AdamW( net_dur_disc.parameters(), hps.train.learning_rate, betas=hps.train.betas, eps=hps.train.eps, ) else: optim_dur_disc = None net_g = DDP(net_g, device_ids=[local_rank], bucket_cap_mb=512) net_d = DDP(net_d, device_ids=[local_rank], bucket_cap_mb=512) net_wd = DDP(net_wd, device_ids=[local_rank], bucket_cap_mb=512) if net_dur_disc is not None: net_dur_disc = DDP( net_dur_disc, device_ids=[local_rank], bucket_cap_mb=512, ) # 下载底模 if config.train_ms_config.base["use_base_model"]: utils.download_checkpoint( hps.model_dir, config.train_ms_config.base, token=config.openi_token, mirror=config.mirror, ) dur_resume_lr = hps.train.learning_rate wd_resume_lr = hps.train.learning_rate if net_dur_disc is not None: try: _, _, dur_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "DUR_*.pth"), net_dur_disc, optim_dur_disc, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_dur_disc.param_groups[0].get("initial_lr"): optim_dur_disc.param_groups[0]["initial_lr"] = dur_resume_lr except: print("Initialize dur_disc") try: _, optim_g, g_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "G_*.pth"), net_g, optim_g, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) _, optim_d, d_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "D_*.pth"), net_d, optim_d, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_g.param_groups[0].get("initial_lr"): optim_g.param_groups[0]["initial_lr"] = g_resume_lr if not optim_d.param_groups[0].get("initial_lr"): optim_d.param_groups[0]["initial_lr"] = d_resume_lr epoch_str = max(epoch_str, 1) # global_step = (epoch_str - 1) * len(train_loader) global_step = int( utils.get_steps(utils.latest_checkpoint_path(hps.model_dir, "G_*.pth")) ) print( f"******************检测到模型存在,epoch为 {epoch_str},gloabl step为 {global_step}*********************" ) except Exception as e: print(e) epoch_str = 1 global_step = 0 try: _, optim_wd, wd_resume_lr, epoch_str = utils.load_checkpoint( utils.latest_checkpoint_path(hps.model_dir, "WD_*.pth"), net_wd, optim_wd, skip_optimizer=hps.train.skip_optimizer if "skip_optimizer" in hps.train else True, ) if not optim_wd.param_groups[0].get("initial_lr"): optim_wd.param_groups[0]["initial_lr"] = wd_resume_lr except Exception as e: print(e) scheduler_g = torch.optim.lr_scheduler.ExponentialLR( optim_g, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_d = torch.optim.lr_scheduler.ExponentialLR( optim_d, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) scheduler_wd = torch.optim.lr_scheduler.ExponentialLR( optim_wd, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) if net_dur_disc is not None: scheduler_dur_disc = torch.optim.lr_scheduler.ExponentialLR( optim_dur_disc, gamma=hps.train.lr_decay, last_epoch=epoch_str - 2 ) else: scheduler_dur_disc = None scaler = GradScaler(enabled=hps.train.bf16_run) wl = WavLMLoss( hps.model.slm.model, net_wd, hps.data.sampling_rate, hps.model.slm.sr, ).to(local_rank) for epoch in range(epoch_str, hps.train.epochs + 1): if rank == 0: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, eval_loader], logger, [writer, writer_eval], ) else: train_and_evaluate( rank, local_rank, epoch, hps, [net_g, net_d, net_dur_disc, net_wd, wl], [optim_g, optim_d, optim_dur_disc, optim_wd], [scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd], scaler, [train_loader, None], None, None, ) scheduler_g.step() scheduler_d.step() scheduler_wd.step() if net_dur_disc is not None: scheduler_dur_disc.step() def train_and_evaluate( rank, local_rank, epoch, hps, nets, optims, schedulers, scaler, loaders, logger, writers, ): net_g, net_d, net_dur_disc, net_wd, wl = nets optim_g, optim_d, optim_dur_disc, optim_wd = optims scheduler_g, scheduler_d, scheduler_dur_disc, scheduler_wd = schedulers train_loader, eval_loader = loaders if writers is not None: writer, writer_eval = writers train_loader.batch_sampler.set_epoch(epoch) global global_step net_g.train() net_d.train() net_wd.train() if net_dur_disc is not None: net_dur_disc.train() for batch_idx, ( x, x_lengths, spec, spec_lengths, y, y_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) in enumerate(tqdm(train_loader)): if net_g.module.use_noise_scaled_mas: current_mas_noise_scale = ( net_g.module.mas_noise_scale_initial - net_g.module.noise_scale_delta * global_step ) net_g.module.current_mas_noise_scale = max(current_mas_noise_scale, 0.0) x, x_lengths = x.cuda(local_rank, non_blocking=True), x_lengths.cuda( local_rank, non_blocking=True ) spec, spec_lengths = spec.cuda( local_rank, non_blocking=True ), spec_lengths.cuda(local_rank, non_blocking=True) y, y_lengths = y.cuda(local_rank, non_blocking=True), y_lengths.cuda( local_rank, non_blocking=True ) speakers = speakers.cuda(local_rank, non_blocking=True) tone = tone.cuda(local_rank, non_blocking=True) language = language.cuda(local_rank, non_blocking=True) bert = bert.cuda(local_rank, non_blocking=True) ja_bert = ja_bert.cuda(local_rank, non_blocking=True) en_bert = en_bert.cuda(local_rank, non_blocking=True) with autocast(enabled=hps.train.bf16_run, dtype=torch.bfloat16): ( y_hat, l_length, attn, ids_slice, x_mask, z_mask, (z, z_p, m_p, logs_p, m_q, logs_q), (hidden_x, logw, logw_, logw_sdp), g, ) = net_g( x, x_lengths, spec, spec_lengths, speakers, tone, language, bert, ja_bert, en_bert, ) mel = spec_to_mel_torch( spec, hps.data.filter_length, hps.data.n_mel_channels, hps.data.sampling_rate, hps.data.mel_fmin, hps.data.mel_fmax, ) y_mel = commons.slice_segments( mel, ids_slice, hps.train.segment_size // hps.data.hop_length )
y_hat_mel = mel_spectrogram_torch(
15
2023-12-27 03:09:11+00:00
24k
chinhsuanwu/ifusion-threestudio
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n # 4D Gaussian Annealing\n anneal_density_blob_std_config: Optional[dict] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )\n\n def update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ) -> None:\n if self.cfg.anneal_density_blob_std_config is not None:\n min_step = self.cfg.anneal_density_blob_std_config.min_anneal_step\n max_step = self.cfg.anneal_density_blob_std_config.max_anneal_step\n if global_step >= min_step and global_step <= max_step:\n end_val = self.cfg.anneal_density_blob_std_config.end_val\n start_val = self.cfg.anneal_density_blob_std_config.start_val\n self.density_blob_std = start_val + (global_step - min_step) * (\n end_val - start_val\n ) / (max_step - min_step)" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n elif config.otype == \"HashGridSpatialTime\":\n encoding = TCNNEncodingSpatialTime(n_input_dims, config) # 4D-fy encoding\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,498
"+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitVolume):
4
2023-12-27 20:30:33+00:00
24k
open-mmlab/Amphion
modules/wenet_extractor/transformer/encoder.py
[ { "identifier": "MultiHeadedAttention", "path": "modules/wenet_extractor/transformer/attention.py", "snippet": "class MultiHeadedAttention(nn.Module):\n \"\"\"Multi-Head Attention layer.\n\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(self, n_head: int, n_feat: int, dropout_rate: float):\n \"\"\"Construct an MultiHeadedAttention object.\"\"\"\n super().__init__()\n assert n_feat % n_head == 0\n # We assume d_v always equals d_k\n self.d_k = n_feat // n_head\n self.h = n_head\n self.linear_q = nn.Linear(n_feat, n_feat)\n self.linear_k = nn.Linear(n_feat, n_feat)\n self.linear_v = nn.Linear(n_feat, n_feat)\n self.linear_out = nn.Linear(n_feat, n_feat)\n self.dropout = nn.Dropout(p=dropout_rate)\n\n def forward_qkv(\n self, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Transform query, key and value.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n\n Returns:\n torch.Tensor: Transformed query tensor, size\n (#batch, n_head, time1, d_k).\n torch.Tensor: Transformed key tensor, size\n (#batch, n_head, time2, d_k).\n torch.Tensor: Transformed value tensor, size\n (#batch, n_head, time2, d_k).\n\n \"\"\"\n n_batch = query.size(0)\n q = self.linear_q(query).view(n_batch, -1, self.h, self.d_k)\n k = self.linear_k(key).view(n_batch, -1, self.h, self.d_k)\n v = self.linear_v(value).view(n_batch, -1, self.h, self.d_k)\n q = q.transpose(1, 2) # (batch, head, time1, d_k)\n k = k.transpose(1, 2) # (batch, head, time2, d_k)\n v = v.transpose(1, 2) # (batch, head, time2, d_k)\n\n return q, k, v\n\n def forward_attention(\n self,\n value: torch.Tensor,\n scores: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n ) -> torch.Tensor:\n \"\"\"Compute attention context vector.\n\n Args:\n value (torch.Tensor): Transformed value, size\n (#batch, n_head, time2, d_k).\n scores (torch.Tensor): Attention score, size\n (#batch, n_head, time1, time2).\n mask (torch.Tensor): Mask, size (#batch, 1, time2) or\n (#batch, time1, time2), (0, 0, 0) means fake mask.\n\n Returns:\n torch.Tensor: Transformed value (#batch, time1, d_model)\n weighted by the attention score (#batch, time1, time2).\n\n \"\"\"\n n_batch = value.size(0)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be True?\n # 1. onnx(16/4) [WHY? Because we feed real cache & real mask for the\n # 1st chunk to ease the onnx export.]\n # 2. pytorch training\n if mask.size(2) > 0: # time2 > 0\n mask = mask.unsqueeze(1).eq(0) # (batch, 1, *, time2)\n # For last chunk, time2 might be larger than scores.size(-1)\n mask = mask[:, :, :, : scores.size(-1)] # (batch, 1, *, time2)\n scores = scores.masked_fill(mask, -float(\"inf\"))\n attn = torch.softmax(scores, dim=-1).masked_fill(\n mask, 0.0\n ) # (batch, head, time1, time2)\n # NOTE(xcsong): When will `if mask.size(2) > 0` be False?\n # 1. onnx(16/-1, -1/-1, 16/0)\n # 2. jit (16/-1, -1/-1, 16/0, 16/4)\n else:\n attn = torch.softmax(scores, dim=-1) # (batch, head, time1, time2)\n\n p_attn = self.dropout(attn)\n x = torch.matmul(p_attn, value) # (batch, head, time1, d_k)\n x = (\n x.transpose(1, 2).contiguous().view(n_batch, -1, self.h * self.d_k)\n ) # (batch, time1, d_model)\n\n return self.linear_out(x) # (batch, time1, d_model)\n\n def forward(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n pos_emb: torch.Tensor = torch.empty(0),\n cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute scaled dot product attention.\n\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2).\n 1.When applying cross attention between decoder and encoder,\n the batch padding mask for input is in (#batch, 1, T) shape.\n 2.When applying self attention of encoder,\n the mask is in (#batch, T, T) shape.\n 3.When applying self attention of decoder,\n the mask is in (#batch, L, L) shape.\n 4.If the different position in decoder see different block\n of the encoder, such as Mocha, the passed in mask could be\n in (#batch, L, T) shape. But there is no such case in current\n Wenet.\n cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n\n\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n\n # NOTE(xcsong):\n # when export onnx model, for 1st chunk, we feed\n # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)\n # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).\n # In all modes, `if cache.size(0) > 0` will alwayse be `True`\n # and we will always do splitting and\n # concatnation(this will simplify onnx export). Note that\n # it's OK to concat & split zero-shaped tensors(see code below).\n # when export jit model, for 1st chunk, we always feed\n # cache(0, 0, 0, 0) since jit supports dynamic if-branch.\n # >>> a = torch.ones((1, 2, 0, 4))\n # >>> b = torch.ones((1, 2, 3, 4))\n # >>> c = torch.cat((a, b), dim=2)\n # >>> torch.equal(b, c) # True\n # >>> d = torch.split(a, 2, dim=-1)\n # >>> torch.equal(d[0], d[1]) # True\n if cache.size(0) > 0:\n key_cache, value_cache = torch.split(cache, cache.size(-1) // 2, dim=-1)\n k = torch.cat([key_cache, k], dim=2)\n v = torch.cat([value_cache, v], dim=2)\n # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's\n # non-trivial to calculate `next_cache_start` here.\n new_cache = torch.cat((k, v), dim=-1)\n\n scores = torch.matmul(q, k.transpose(-2, -1)) / math.sqrt(self.d_k)\n return self.forward_attention(v, scores, mask), new_cache" }, { "identifier": "RelPositionMultiHeadedAttention", "path": "modules/wenet_extractor/transformer/attention.py", "snippet": "class RelPositionMultiHeadedAttention(MultiHeadedAttention):\n \"\"\"Multi-Head Attention layer with relative position encoding.\n Paper: https://arxiv.org/abs/1901.02860\n Args:\n n_head (int): The number of heads.\n n_feat (int): The number of features.\n dropout_rate (float): Dropout rate.\n \"\"\"\n\n def __init__(self, n_head, n_feat, dropout_rate):\n \"\"\"Construct an RelPositionMultiHeadedAttention object.\"\"\"\n super().__init__(n_head, n_feat, dropout_rate)\n # linear transformation for positional encoding\n self.linear_pos = nn.Linear(n_feat, n_feat, bias=False)\n # these two learnable bias are used in matrix c and matrix d\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n self.pos_bias_u = nn.Parameter(torch.Tensor(self.h, self.d_k))\n self.pos_bias_v = nn.Parameter(torch.Tensor(self.h, self.d_k))\n torch.nn.init.xavier_uniform_(self.pos_bias_u)\n torch.nn.init.xavier_uniform_(self.pos_bias_v)\n\n def rel_shift(self, x, zero_triu: bool = False):\n \"\"\"Compute relative positinal encoding.\n Args:\n x (torch.Tensor): Input tensor (batch, time, size).\n zero_triu (bool): If true, return the lower triangular part of\n the matrix.\n Returns:\n torch.Tensor: Output tensor.\n \"\"\"\n\n zero_pad = torch.zeros(\n (x.size()[0], x.size()[1], x.size()[2], 1), device=x.device, dtype=x.dtype\n )\n x_padded = torch.cat([zero_pad, x], dim=-1)\n\n x_padded = x_padded.view(x.size()[0], x.size()[1], x.size(3) + 1, x.size(2))\n x = x_padded[:, :, 1:].view_as(x)\n\n if zero_triu:\n ones = torch.ones((x.size(2), x.size(3)))\n x = x * torch.tril(ones, x.size(3) - x.size(2))[None, None, :, :]\n\n return x\n\n def forward(\n self,\n query: torch.Tensor,\n key: torch.Tensor,\n value: torch.Tensor,\n mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n pos_emb: torch.Tensor = torch.empty(0),\n cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute 'Scaled Dot Product Attention' with rel. positional encoding.\n Args:\n query (torch.Tensor): Query tensor (#batch, time1, size).\n key (torch.Tensor): Key tensor (#batch, time2, size).\n value (torch.Tensor): Value tensor (#batch, time2, size).\n mask (torch.Tensor): Mask tensor (#batch, 1, time2) or\n (#batch, time1, time2), (0, 0, 0) means fake mask.\n pos_emb (torch.Tensor): Positional embedding tensor\n (#batch, time2, size).\n cache (torch.Tensor): Cache tensor (1, head, cache_t, d_k * 2),\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n Returns:\n torch.Tensor: Output tensor (#batch, time1, d_model).\n torch.Tensor: Cache tensor (1, head, cache_t + time1, d_k * 2)\n where `cache_t == chunk_size * num_decoding_left_chunks`\n and `head * d_k == size`\n \"\"\"\n q, k, v = self.forward_qkv(query, key, value)\n q = q.transpose(1, 2) # (batch, time1, head, d_k)\n\n # NOTE(xcsong):\n # when export onnx model, for 1st chunk, we feed\n # cache(1, head, 0, d_k * 2) (16/-1, -1/-1, 16/0 mode)\n # or cache(1, head, real_cache_t, d_k * 2) (16/4 mode).\n # In all modes, `if cache.size(0) > 0` will alwayse be `True`\n # and we will always do splitting and\n # concatnation(this will simplify onnx export). Note that\n # it's OK to concat & split zero-shaped tensors(see code below).\n # when export jit model, for 1st chunk, we always feed\n # cache(0, 0, 0, 0) since jit supports dynamic if-branch.\n # >>> a = torch.ones((1, 2, 0, 4))\n # >>> b = torch.ones((1, 2, 3, 4))\n # >>> c = torch.cat((a, b), dim=2)\n # >>> torch.equal(b, c) # True\n # >>> d = torch.split(a, 2, dim=-1)\n # >>> torch.equal(d[0], d[1]) # True\n if cache.size(0) > 0:\n key_cache, value_cache = torch.split(cache, cache.size(-1) // 2, dim=-1)\n k = torch.cat([key_cache, k], dim=2)\n v = torch.cat([value_cache, v], dim=2)\n # NOTE(xcsong): We do cache slicing in encoder.forward_chunk, since it's\n # non-trivial to calculate `next_cache_start` here.\n new_cache = torch.cat((k, v), dim=-1)\n\n n_batch_pos = pos_emb.size(0)\n p = self.linear_pos(pos_emb).view(n_batch_pos, -1, self.h, self.d_k)\n p = p.transpose(1, 2) # (batch, head, time1, d_k)\n\n # (batch, head, time1, d_k)\n q_with_bias_u = (q + self.pos_bias_u).transpose(1, 2)\n # (batch, head, time1, d_k)\n q_with_bias_v = (q + self.pos_bias_v).transpose(1, 2)\n\n # compute attention score\n # first compute matrix a and matrix c\n # as described in https://arxiv.org/abs/1901.02860 Section 3.3\n # (batch, head, time1, time2)\n matrix_ac = torch.matmul(q_with_bias_u, k.transpose(-2, -1))\n\n # compute matrix b and matrix d\n # (batch, head, time1, time2)\n matrix_bd = torch.matmul(q_with_bias_v, p.transpose(-2, -1))\n # Remove rel_shift since it is useless in speech recognition,\n # and it requires special attention for streaming.\n # matrix_bd = self.rel_shift(matrix_bd)\n\n scores = (matrix_ac + matrix_bd) / math.sqrt(\n self.d_k\n ) # (batch, head, time1, time2)\n\n return self.forward_attention(v, scores, mask), new_cache" }, { "identifier": "ConvolutionModule", "path": "modules/wenet_extractor/transformer/convolution.py", "snippet": "class ConvolutionModule(nn.Module):\n \"\"\"ConvolutionModule in Conformer model.\"\"\"\n\n def __init__(\n self,\n channels: int,\n kernel_size: int = 15,\n activation: nn.Module = nn.ReLU(),\n norm: str = \"batch_norm\",\n causal: bool = False,\n bias: bool = True,\n ):\n \"\"\"Construct an ConvolutionModule object.\n Args:\n channels (int): The number of channels of conv layers.\n kernel_size (int): Kernel size of conv layers.\n causal (int): Whether use causal convolution or not\n \"\"\"\n super().__init__()\n\n self.pointwise_conv1 = nn.Conv1d(\n channels,\n 2 * channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n # self.lorder is used to distinguish if it's a causal convolution,\n # if self.lorder > 0: it's a causal convolution, the input will be\n # padded with self.lorder frames on the left in forward.\n # else: it's a symmetrical convolution\n if causal:\n padding = 0\n self.lorder = kernel_size - 1\n else:\n # kernel_size should be an odd number for none causal convolution\n assert (kernel_size - 1) % 2 == 0\n padding = (kernel_size - 1) // 2\n self.lorder = 0\n self.depthwise_conv = nn.Conv1d(\n channels,\n channels,\n kernel_size,\n stride=1,\n padding=padding,\n groups=channels,\n bias=bias,\n )\n\n assert norm in [\"batch_norm\", \"layer_norm\"]\n if norm == \"batch_norm\":\n self.use_layer_norm = False\n self.norm = nn.BatchNorm1d(channels)\n else:\n self.use_layer_norm = True\n self.norm = nn.LayerNorm(channels)\n\n self.pointwise_conv2 = nn.Conv1d(\n channels,\n channels,\n kernel_size=1,\n stride=1,\n padding=0,\n bias=bias,\n )\n self.activation = activation\n\n def forward(\n self,\n x: torch.Tensor,\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n cache: torch.Tensor = torch.zeros((0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute convolution module.\n Args:\n x (torch.Tensor): Input tensor (#batch, time, channels).\n mask_pad (torch.Tensor): used for batch padding (#batch, 1, time),\n (0, 0, 0) means fake mask.\n cache (torch.Tensor): left context cache, it is only\n used in causal convolution (#batch, channels, cache_t),\n (0, 0, 0) meas fake cache.\n Returns:\n torch.Tensor: Output tensor (#batch, time, channels).\n \"\"\"\n # exchange the temporal dimension and the feature dimension\n x = x.transpose(1, 2) # (#batch, channels, time)\n\n # mask batch padding\n if mask_pad.size(2) > 0: # time > 0\n x.masked_fill_(~mask_pad, 0.0)\n\n if self.lorder > 0:\n if cache.size(2) == 0: # cache_t == 0\n x = nn.functional.pad(x, (self.lorder, 0), \"constant\", 0.0)\n else:\n assert cache.size(0) == x.size(0) # equal batch\n assert cache.size(1) == x.size(1) # equal channel\n x = torch.cat((cache, x), dim=2)\n assert x.size(2) > self.lorder\n new_cache = x[:, :, -self.lorder :]\n else:\n # It's better we just return None if no cache is required,\n # However, for JIT export, here we just fake one tensor instead of\n # None.\n new_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)\n\n # GLU mechanism\n x = self.pointwise_conv1(x) # (batch, 2*channel, dim)\n x = nn.functional.glu(x, dim=1) # (batch, channel, dim)\n\n # 1D Depthwise Conv\n x = self.depthwise_conv(x)\n if self.use_layer_norm:\n x = x.transpose(1, 2)\n x = self.activation(self.norm(x))\n if self.use_layer_norm:\n x = x.transpose(1, 2)\n x = self.pointwise_conv2(x)\n # mask batch padding\n if mask_pad.size(2) > 0: # time > 0\n x.masked_fill_(~mask_pad, 0.0)\n\n return x.transpose(1, 2), new_cache" }, { "identifier": "PositionalEncoding", "path": "modules/wenet_extractor/transformer/embedding.py", "snippet": "class PositionalEncoding(torch.nn.Module):\n \"\"\"Positional encoding.\n\n :param int d_model: embedding dim\n :param float dropout_rate: dropout rate\n :param int max_len: maximum input length\n\n PE(pos, 2i) = sin(pos/(10000^(2i/dmodel)))\n PE(pos, 2i+1) = cos(pos/(10000^(2i/dmodel)))\n \"\"\"\n\n def __init__(\n self,\n d_model: int,\n dropout_rate: float,\n max_len: int = 5000,\n reverse: bool = False,\n ):\n \"\"\"Construct an PositionalEncoding object.\"\"\"\n super().__init__()\n self.d_model = d_model\n self.xscale = math.sqrt(self.d_model)\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n self.max_len = max_len\n\n self.pe = torch.zeros(self.max_len, self.d_model)\n position = torch.arange(0, self.max_len, dtype=torch.float32).unsqueeze(1)\n div_term = torch.exp(\n torch.arange(0, self.d_model, 2, dtype=torch.float32)\n * -(math.log(10000.0) / self.d_model)\n )\n self.pe[:, 0::2] = torch.sin(position * div_term)\n self.pe[:, 1::2] = torch.cos(position * div_term)\n self.pe = self.pe.unsqueeze(0)\n\n def forward(\n self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Add positional encoding.\n\n Args:\n x (torch.Tensor): Input. Its shape is (batch, time, ...)\n offset (int, torch.tensor): position offset\n\n Returns:\n torch.Tensor: Encoded tensor. Its shape is (batch, time, ...)\n torch.Tensor: for compatibility to RelPositionalEncoding\n \"\"\"\n\n self.pe = self.pe.to(x.device)\n pos_emb = self.position_encoding(offset, x.size(1), False)\n x = x * self.xscale + pos_emb\n return self.dropout(x), self.dropout(pos_emb)\n\n def position_encoding(\n self, offset: Union[int, torch.Tensor], size: int, apply_dropout: bool = True\n ) -> torch.Tensor:\n \"\"\"For getting encoding in a streaming fashion\n\n Attention!!!!!\n we apply dropout only once at the whole utterance level in a none\n streaming way, but will call this function several times with\n increasing input size in a streaming scenario, so the dropout will\n be applied several times.\n\n Args:\n offset (int or torch.tensor): start offset\n size (int): required size of position encoding\n\n Returns:\n torch.Tensor: Corresponding encoding\n \"\"\"\n # How to subscript a Union type:\n # https://github.com/pytorch/pytorch/issues/69434\n if isinstance(offset, int):\n assert offset + size < self.max_len\n pos_emb = self.pe[:, offset : offset + size]\n elif isinstance(offset, torch.Tensor) and offset.dim() == 0: # scalar\n assert offset + size < self.max_len\n pos_emb = self.pe[:, offset : offset + size]\n else: # for batched streaming decoding on GPU\n assert torch.max(offset) + size < self.max_len\n index = offset.unsqueeze(1) + torch.arange(0, size).to(\n offset.device\n ) # B X T\n flag = index > 0\n # remove negative offset\n index = index * flag\n pos_emb = F.embedding(index, self.pe[0]) # B X T X d_model\n\n if apply_dropout:\n pos_emb = self.dropout(pos_emb)\n return pos_emb" }, { "identifier": "RelPositionalEncoding", "path": "modules/wenet_extractor/transformer/embedding.py", "snippet": "class RelPositionalEncoding(PositionalEncoding):\n \"\"\"Relative positional encoding module.\n See : Appendix B in https://arxiv.org/abs/1901.02860\n Args:\n d_model (int): Embedding dimension.\n dropout_rate (float): Dropout rate.\n max_len (int): Maximum input length.\n \"\"\"\n\n def __init__(self, d_model: int, dropout_rate: float, max_len: int = 5000):\n \"\"\"Initialize class.\"\"\"\n super().__init__(d_model, dropout_rate, max_len, reverse=True)\n\n def forward(\n self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Compute positional encoding.\n Args:\n x (torch.Tensor): Input tensor (batch, time, `*`).\n Returns:\n torch.Tensor: Encoded tensor (batch, time, `*`).\n torch.Tensor: Positional embedding tensor (1, time, `*`).\n \"\"\"\n self.pe = self.pe.to(x.device)\n x = x * self.xscale\n pos_emb = self.position_encoding(offset, x.size(1), False)\n return self.dropout(x), self.dropout(pos_emb)" }, { "identifier": "NoPositionalEncoding", "path": "modules/wenet_extractor/transformer/embedding.py", "snippet": "class NoPositionalEncoding(torch.nn.Module):\n \"\"\"No position encoding\"\"\"\n\n def __init__(self, d_model: int, dropout_rate: float):\n super().__init__()\n self.d_model = d_model\n self.dropout = torch.nn.Dropout(p=dropout_rate)\n\n def forward(\n self, x: torch.Tensor, offset: Union[int, torch.Tensor] = 0\n ) -> Tuple[torch.Tensor, torch.Tensor]:\n \"\"\"Just return zero vector for interface compatibility\"\"\"\n pos_emb = torch.zeros(1, x.size(1), self.d_model).to(x.device)\n return self.dropout(x), pos_emb\n\n def position_encoding(\n self, offset: Union[int, torch.Tensor], size: int\n ) -> torch.Tensor:\n return torch.zeros(1, size, self.d_model)" }, { "identifier": "TransformerEncoderLayer", "path": "modules/wenet_extractor/transformer/encoder_layer.py", "snippet": "class TransformerEncoderLayer(nn.Module):\n \"\"\"Encoder layer module.\n\n Args:\n size (int): Input dimension.\n self_attn (torch.nn.Module): Self-attention module instance.\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention`\n instance can be used as the argument.\n feed_forward (torch.nn.Module): Feed-forward module instance.\n `PositionwiseFeedForward`, instance can be used as the argument.\n dropout_rate (float): Dropout rate.\n normalize_before (bool):\n True: use layer_norm before each sub-block.\n False: to use layer_norm after each sub-block.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n self_attn: torch.nn.Module,\n feed_forward: torch.nn.Module,\n dropout_rate: float,\n normalize_before: bool = True,\n ):\n \"\"\"Construct an EncoderLayer object.\"\"\"\n super().__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.norm1 = nn.LayerNorm(size, eps=1e-5)\n self.norm2 = nn.LayerNorm(size, eps=1e-5)\n self.dropout = nn.Dropout(dropout_rate)\n self.size = size\n self.normalize_before = normalize_before\n\n def forward(\n self,\n x: torch.Tensor,\n mask: torch.Tensor,\n pos_emb: torch.Tensor,\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Compute encoded features.\n\n Args:\n x (torch.Tensor): (#batch, time, size)\n mask (torch.Tensor): Mask tensor for the input (#batch, time,time),\n (0, 0, 0) means fake mask.\n pos_emb (torch.Tensor): just for interface compatibility\n to ConformerEncoderLayer\n mask_pad (torch.Tensor): does not used in transformer layer,\n just for unified api with conformer.\n att_cache (torch.Tensor): Cache tensor of the KEY & VALUE\n (#batch=1, head, cache_t1, d_k * 2), head * d_k == size.\n cnn_cache (torch.Tensor): Convolution cache in conformer layer\n (#batch=1, size, cache_t2), not used here, it's for interface\n compatibility to ConformerEncoderLayer.\n Returns:\n torch.Tensor: Output tensor (#batch, time, size).\n torch.Tensor: Mask tensor (#batch, time, time).\n torch.Tensor: att_cache tensor,\n (#batch=1, head, cache_t1 + time, d_k * 2).\n torch.Tensor: cnn_cahce tensor (#batch=1, size, cache_t2).\n\n \"\"\"\n residual = x\n if self.normalize_before:\n x = self.norm1(x)\n x_att, new_att_cache = self.self_attn(x, x, x, mask, cache=att_cache)\n x = residual + self.dropout(x_att)\n if not self.normalize_before:\n x = self.norm1(x)\n\n residual = x\n if self.normalize_before:\n x = self.norm2(x)\n x = residual + self.dropout(self.feed_forward(x))\n if not self.normalize_before:\n x = self.norm2(x)\n\n fake_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)\n return x, mask, new_att_cache, fake_cnn_cache" }, { "identifier": "ConformerEncoderLayer", "path": "modules/wenet_extractor/transformer/encoder_layer.py", "snippet": "class ConformerEncoderLayer(nn.Module):\n \"\"\"Encoder layer module.\n Args:\n size (int): Input dimension.\n self_attn (torch.nn.Module): Self-attention module instance.\n `MultiHeadedAttention` or `RelPositionMultiHeadedAttention`\n instance can be used as the argument.\n feed_forward (torch.nn.Module): Feed-forward module instance.\n `PositionwiseFeedForward` instance can be used as the argument.\n feed_forward_macaron (torch.nn.Module): Additional feed-forward module\n instance.\n `PositionwiseFeedForward` instance can be used as the argument.\n conv_module (torch.nn.Module): Convolution module instance.\n `ConvlutionModule` instance can be used as the argument.\n dropout_rate (float): Dropout rate.\n normalize_before (bool):\n True: use layer_norm before each sub-block.\n False: use layer_norm after each sub-block.\n \"\"\"\n\n def __init__(\n self,\n size: int,\n self_attn: torch.nn.Module,\n feed_forward: Optional[nn.Module] = None,\n feed_forward_macaron: Optional[nn.Module] = None,\n conv_module: Optional[nn.Module] = None,\n dropout_rate: float = 0.1,\n normalize_before: bool = True,\n ):\n \"\"\"Construct an EncoderLayer object.\"\"\"\n super().__init__()\n self.self_attn = self_attn\n self.feed_forward = feed_forward\n self.feed_forward_macaron = feed_forward_macaron\n self.conv_module = conv_module\n self.norm_ff = nn.LayerNorm(size, eps=1e-5) # for the FNN module\n self.norm_mha = nn.LayerNorm(size, eps=1e-5) # for the MHA module\n if feed_forward_macaron is not None:\n self.norm_ff_macaron = nn.LayerNorm(size, eps=1e-5)\n self.ff_scale = 0.5\n else:\n self.ff_scale = 1.0\n if self.conv_module is not None:\n self.norm_conv = nn.LayerNorm(size, eps=1e-5) # for the CNN module\n self.norm_final = nn.LayerNorm(\n size, eps=1e-5\n ) # for the final output of the block\n self.dropout = nn.Dropout(dropout_rate)\n self.size = size\n self.normalize_before = normalize_before\n\n def forward(\n self,\n x: torch.Tensor,\n mask: torch.Tensor,\n pos_emb: torch.Tensor,\n mask_pad: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool),\n att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0)),\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Compute encoded features.\n\n Args:\n x (torch.Tensor): (#batch, time, size)\n mask (torch.Tensor): Mask tensor for the input (#batch, time,time),\n (0, 0, 0) means fake mask.\n pos_emb (torch.Tensor): positional encoding, must not be None\n for ConformerEncoderLayer.\n mask_pad (torch.Tensor): batch padding mask used for conv module.\n (#batch, 1,time), (0, 0, 0) means fake mask.\n att_cache (torch.Tensor): Cache tensor of the KEY & VALUE\n (#batch=1, head, cache_t1, d_k * 2), head * d_k == size.\n cnn_cache (torch.Tensor): Convolution cache in conformer layer\n (#batch=1, size, cache_t2)\n Returns:\n torch.Tensor: Output tensor (#batch, time, size).\n torch.Tensor: Mask tensor (#batch, time, time).\n torch.Tensor: att_cache tensor,\n (#batch=1, head, cache_t1 + time, d_k * 2).\n torch.Tensor: cnn_cahce tensor (#batch, size, cache_t2).\n \"\"\"\n\n # whether to use macaron style\n if self.feed_forward_macaron is not None:\n residual = x\n if self.normalize_before:\n x = self.norm_ff_macaron(x)\n x = residual + self.ff_scale * self.dropout(self.feed_forward_macaron(x))\n if not self.normalize_before:\n x = self.norm_ff_macaron(x)\n\n # multi-headed self-attention module\n residual = x\n if self.normalize_before:\n x = self.norm_mha(x)\n x_att, new_att_cache = self.self_attn(x, x, x, mask, pos_emb, att_cache)\n x = residual + self.dropout(x_att)\n if not self.normalize_before:\n x = self.norm_mha(x)\n\n # convolution module\n # Fake new cnn cache here, and then change it in conv_module\n new_cnn_cache = torch.zeros((0, 0, 0), dtype=x.dtype, device=x.device)\n if self.conv_module is not None:\n residual = x\n if self.normalize_before:\n x = self.norm_conv(x)\n x, new_cnn_cache = self.conv_module(x, mask_pad, cnn_cache)\n x = residual + self.dropout(x)\n\n if not self.normalize_before:\n x = self.norm_conv(x)\n\n # feed forward module\n residual = x\n if self.normalize_before:\n x = self.norm_ff(x)\n\n x = residual + self.ff_scale * self.dropout(self.feed_forward(x))\n if not self.normalize_before:\n x = self.norm_ff(x)\n\n if self.conv_module is not None:\n x = self.norm_final(x)\n\n return x, mask, new_att_cache, new_cnn_cache" }, { "identifier": "PositionwiseFeedForward", "path": "modules/wenet_extractor/transformer/positionwise_feed_forward.py", "snippet": "class PositionwiseFeedForward(torch.nn.Module):\n \"\"\"Positionwise feed forward layer.\n\n FeedForward are appied on each position of the sequence.\n The output dim is same with the input dim.\n\n Args:\n idim (int): Input dimenstion.\n hidden_units (int): The number of hidden units.\n dropout_rate (float): Dropout rate.\n activation (torch.nn.Module): Activation function\n \"\"\"\n\n def __init__(\n self,\n idim: int,\n hidden_units: int,\n dropout_rate: float,\n activation: torch.nn.Module = torch.nn.ReLU(),\n ):\n \"\"\"Construct a PositionwiseFeedForward object.\"\"\"\n super(PositionwiseFeedForward, self).__init__()\n self.w_1 = torch.nn.Linear(idim, hidden_units)\n self.activation = activation\n self.dropout = torch.nn.Dropout(dropout_rate)\n self.w_2 = torch.nn.Linear(hidden_units, idim)\n\n def forward(self, xs: torch.Tensor) -> torch.Tensor:\n \"\"\"Forward function.\n\n Args:\n xs: input tensor (B, L, D)\n Returns:\n output tensor, (B, L, D)\n \"\"\"\n return self.w_2(self.dropout(self.activation(self.w_1(xs))))" }, { "identifier": "Conv2dSubsampling4", "path": "modules/wenet_extractor/transformer/subsampling.py", "snippet": "class Conv2dSubsampling4(BaseSubsampling):\n \"\"\"Convolutional 2D subsampling (to 1/4 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(\n self, idim: int, odim: int, dropout_rate: float, pos_enc_class: torch.nn.Module\n ):\n \"\"\"Construct an Conv2dSubsampling4 object.\"\"\"\n super().__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.out = torch.nn.Sequential(\n torch.nn.Linear(odim * (((idim - 1) // 2 - 1) // 2), odim)\n )\n self.pos_enc = pos_enc_class\n # The right context for every conv layer is computed by:\n # (kernel_size - 1) * frame_rate_of_this_layer\n self.subsampling_rate = 4\n # 6 = (3 - 1) * 1 + (3 - 1) * 2\n self.right_context = 6\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 4.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 4.\n torch.Tensor: positional encoding\n\n \"\"\"\n x = x.unsqueeze(1) # (b, c=1, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.out(x.transpose(1, 2).contiguous().view(b, t, c * f))\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2]" }, { "identifier": "Conv2dSubsampling6", "path": "modules/wenet_extractor/transformer/subsampling.py", "snippet": "class Conv2dSubsampling6(BaseSubsampling):\n \"\"\"Convolutional 2D subsampling (to 1/6 length).\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n pos_enc (torch.nn.Module): Custom position encoding layer.\n \"\"\"\n\n def __init__(\n self, idim: int, odim: int, dropout_rate: float, pos_enc_class: torch.nn.Module\n ):\n \"\"\"Construct an Conv2dSubsampling6 object.\"\"\"\n super().__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 5, 3),\n torch.nn.ReLU(),\n )\n self.linear = torch.nn.Linear(odim * (((idim - 1) // 2 - 2) // 3), odim)\n self.pos_enc = pos_enc_class\n # 10 = (3 - 1) * 1 + (5 - 1) * 2\n self.subsampling_rate = 6\n self.right_context = 10\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Subsample x.\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 6.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 6.\n torch.Tensor: positional encoding\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask[:, :, 2::2][:, :, 4::3]" }, { "identifier": "Conv2dSubsampling8", "path": "modules/wenet_extractor/transformer/subsampling.py", "snippet": "class Conv2dSubsampling8(BaseSubsampling):\n \"\"\"Convolutional 2D subsampling (to 1/8 length).\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(\n self, idim: int, odim: int, dropout_rate: float, pos_enc_class: torch.nn.Module\n ):\n \"\"\"Construct an Conv2dSubsampling8 object.\"\"\"\n super().__init__()\n self.conv = torch.nn.Sequential(\n torch.nn.Conv2d(1, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n torch.nn.Conv2d(odim, odim, 3, 2),\n torch.nn.ReLU(),\n )\n self.linear = torch.nn.Linear(\n odim * ((((idim - 1) // 2 - 1) // 2 - 1) // 2), odim\n )\n self.pos_enc = pos_enc_class\n self.subsampling_rate = 8\n # 14 = (3 - 1) * 1 + (3 - 1) * 2 + (3 - 1) * 4\n self.right_context = 14\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Subsample x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: Subsampled tensor (#batch, time', odim),\n where time' = time // 8.\n torch.Tensor: Subsampled mask (#batch, 1, time'),\n where time' = time // 8.\n torch.Tensor: positional encoding\n \"\"\"\n x = x.unsqueeze(1) # (b, c, t, f)\n x = self.conv(x)\n b, c, t, f = x.size()\n x = self.linear(x.transpose(1, 2).contiguous().view(b, t, c * f))\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask[:, :, 2::2][:, :, 2::2][:, :, 2::2]" }, { "identifier": "LinearNoSubsampling", "path": "modules/wenet_extractor/transformer/subsampling.py", "snippet": "class LinearNoSubsampling(BaseSubsampling):\n \"\"\"Linear transform the input without subsampling\n\n Args:\n idim (int): Input dimension.\n odim (int): Output dimension.\n dropout_rate (float): Dropout rate.\n\n \"\"\"\n\n def __init__(\n self, idim: int, odim: int, dropout_rate: float, pos_enc_class: torch.nn.Module\n ):\n \"\"\"Construct an linear object.\"\"\"\n super().__init__()\n self.out = torch.nn.Sequential(\n torch.nn.Linear(idim, odim),\n torch.nn.LayerNorm(odim, eps=1e-5),\n torch.nn.Dropout(dropout_rate),\n )\n self.pos_enc = pos_enc_class\n self.right_context = 0\n self.subsampling_rate = 1\n\n def forward(\n self,\n x: torch.Tensor,\n x_mask: torch.Tensor,\n offset: Union[int, torch.Tensor] = 0,\n ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:\n \"\"\"Input x.\n\n Args:\n x (torch.Tensor): Input tensor (#batch, time, idim).\n x_mask (torch.Tensor): Input mask (#batch, 1, time).\n\n Returns:\n torch.Tensor: linear input tensor (#batch, time', odim),\n where time' = time .\n torch.Tensor: linear input mask (#batch, 1, time'),\n where time' = time .\n\n \"\"\"\n x = self.out(x)\n x, pos_emb = self.pos_enc(x, offset)\n return x, pos_emb, x_mask" }, { "identifier": "get_activation", "path": "modules/wenet_extractor/utils/common.py", "snippet": "def get_activation(act):\n \"\"\"Return activation function.\"\"\"\n # Lazy load to avoid unused import\n from modules.wenet_extractor.transformer.swish import Swish\n\n activation_funcs = {\n \"hardtanh\": torch.nn.Hardtanh,\n \"tanh\": torch.nn.Tanh,\n \"relu\": torch.nn.ReLU,\n \"selu\": torch.nn.SELU,\n \"swish\": getattr(torch.nn, \"SiLU\", Swish),\n \"gelu\": torch.nn.GELU,\n }\n\n return activation_funcs[act]()" }, { "identifier": "make_pad_mask", "path": "modules/wenet_extractor/utils/mask.py", "snippet": "def make_pad_mask(lengths: torch.Tensor, max_len: int = 0) -> torch.Tensor:\n \"\"\"Make mask tensor containing indices of padded part.\n\n See description of make_non_pad_mask.\n\n Args:\n lengths (torch.Tensor): Batch of lengths (B,).\n Returns:\n torch.Tensor: Mask tensor containing indices of padded part.\n\n Examples:\n >>> lengths = [5, 3, 2]\n >>> make_pad_mask(lengths)\n masks = [[0, 0, 0, 0 ,0],\n [0, 0, 0, 1, 1],\n [0, 0, 1, 1, 1]]\n \"\"\"\n batch_size = lengths.size(0)\n max_len = max_len if max_len > 0 else lengths.max().item()\n seq_range = torch.arange(0, max_len, dtype=torch.int64, device=lengths.device)\n seq_range_expand = seq_range.unsqueeze(0).expand(batch_size, max_len)\n seq_length_expand = lengths.unsqueeze(-1)\n mask = seq_range_expand >= seq_length_expand\n return mask" }, { "identifier": "add_optional_chunk_mask", "path": "modules/wenet_extractor/utils/mask.py", "snippet": "def add_optional_chunk_mask(\n xs: torch.Tensor,\n masks: torch.Tensor,\n use_dynamic_chunk: bool,\n use_dynamic_left_chunk: bool,\n decoding_chunk_size: int,\n static_chunk_size: int,\n num_decoding_left_chunks: int,\n):\n \"\"\"Apply optional mask for encoder.\n\n Args:\n xs (torch.Tensor): padded input, (B, L, D), L for max length\n mask (torch.Tensor): mask for xs, (B, 1, L)\n use_dynamic_chunk (bool): whether to use dynamic chunk or not\n use_dynamic_left_chunk (bool): whether to use dynamic left chunk for\n training.\n decoding_chunk_size (int): decoding chunk size for dynamic chunk, it's\n 0: default for training, use random dynamic chunk.\n <0: for decoding, use full chunk.\n >0: for decoding, use fixed chunk size as set.\n static_chunk_size (int): chunk size for static chunk training/decoding\n if it's greater than 0, if use_dynamic_chunk is true,\n this parameter will be ignored\n num_decoding_left_chunks: number of left chunks, this is for decoding,\n the chunk size is decoding_chunk_size.\n >=0: use num_decoding_left_chunks\n <0: use all left chunks\n\n Returns:\n torch.Tensor: chunk mask of the input xs.\n \"\"\"\n # Whether to use chunk mask or not\n if use_dynamic_chunk:\n max_len = xs.size(1)\n if decoding_chunk_size < 0:\n chunk_size = max_len\n num_left_chunks = -1\n elif decoding_chunk_size > 0:\n chunk_size = decoding_chunk_size\n num_left_chunks = num_decoding_left_chunks\n else:\n # chunk size is either [1, 25] or full context(max_len).\n # Since we use 4 times subsampling and allow up to 1s(100 frames)\n # delay, the maximum frame is 100 / 4 = 25.\n chunk_size = torch.randint(1, max_len, (1,)).item()\n num_left_chunks = -1\n if chunk_size > max_len // 2:\n chunk_size = max_len\n else:\n chunk_size = chunk_size % 25 + 1\n if use_dynamic_left_chunk:\n max_left_chunks = (max_len - 1) // chunk_size\n num_left_chunks = torch.randint(0, max_left_chunks, (1,)).item()\n chunk_masks = subsequent_chunk_mask(\n xs.size(1), chunk_size, num_left_chunks, xs.device\n ) # (L, L)\n chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)\n chunk_masks = masks & chunk_masks # (B, L, L)\n elif static_chunk_size > 0:\n num_left_chunks = num_decoding_left_chunks\n chunk_masks = subsequent_chunk_mask(\n xs.size(1), static_chunk_size, num_left_chunks, xs.device\n ) # (L, L)\n chunk_masks = chunk_masks.unsqueeze(0) # (1, L, L)\n chunk_masks = masks & chunk_masks # (B, L, L)\n else:\n chunk_masks = masks\n return chunk_masks" } ]
from typing import Tuple from modules.wenet_extractor.transformer.attention import MultiHeadedAttention from modules.wenet_extractor.transformer.attention import ( RelPositionMultiHeadedAttention, ) from modules.wenet_extractor.transformer.convolution import ConvolutionModule from modules.wenet_extractor.transformer.embedding import PositionalEncoding from modules.wenet_extractor.transformer.embedding import RelPositionalEncoding from modules.wenet_extractor.transformer.embedding import NoPositionalEncoding from modules.wenet_extractor.transformer.encoder_layer import TransformerEncoderLayer from modules.wenet_extractor.transformer.encoder_layer import ConformerEncoderLayer from modules.wenet_extractor.transformer.positionwise_feed_forward import ( PositionwiseFeedForward, ) from modules.wenet_extractor.transformer.subsampling import Conv2dSubsampling4 from modules.wenet_extractor.transformer.subsampling import Conv2dSubsampling6 from modules.wenet_extractor.transformer.subsampling import Conv2dSubsampling8 from modules.wenet_extractor.transformer.subsampling import LinearNoSubsampling from modules.wenet_extractor.utils.common import get_activation from modules.wenet_extractor.utils.mask import make_pad_mask from modules.wenet_extractor.utils.mask import add_optional_chunk_mask import torch
15,865
if self.normalize_before: xs = self.after_norm(xs) # NOTE(xcsong): shape(r_att_cache) is (elayers, head, ?, d_k * 2), # ? may be larger than cache_t1, it depends on required_cache_size r_att_cache = torch.cat(r_att_cache, dim=0) # NOTE(xcsong): shape(r_cnn_cache) is (e, b=1, hidden-dim, cache_t2) r_cnn_cache = torch.cat(r_cnn_cache, dim=0) return (xs, r_att_cache, r_cnn_cache) def forward_chunk_by_chunk( self, xs: torch.Tensor, decoding_chunk_size: int, num_decoding_left_chunks: int = -1, ) -> Tuple[torch.Tensor, torch.Tensor]: """Forward input chunk by chunk with chunk_size like a streaming fashion Here we should pay special attention to computation cache in the streaming style forward chunk by chunk. Three things should be taken into account for computation in the current network: 1. transformer/conformer encoder layers output cache 2. convolution in conformer 3. convolution in subsampling However, we don't implement subsampling cache for: 1. We can control subsampling module to output the right result by overlapping input instead of cache left context, even though it wastes some computation, but subsampling only takes a very small fraction of computation in the whole model. 2. Typically, there are several covolution layers with subsampling in subsampling module, it is tricky and complicated to do cache with different convolution layers with different subsampling rate. 3. Currently, nn.Sequential is used to stack all the convolution layers in subsampling, we need to rewrite it to make it work with cache, which is not prefered. Args: xs (torch.Tensor): (1, max_len, dim) chunk_size (int): decoding chunk size """ assert decoding_chunk_size > 0 # The model is trained by static or dynamic chunk assert self.static_chunk_size > 0 or self.use_dynamic_chunk subsampling = self.embed.subsampling_rate context = self.embed.right_context + 1 # Add current frame stride = subsampling * decoding_chunk_size decoding_window = (decoding_chunk_size - 1) * subsampling + context num_frames = xs.size(1) att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device) cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device) outputs = [] offset = 0 required_cache_size = decoding_chunk_size * num_decoding_left_chunks # Feed forward overlap input step by step for cur in range(0, num_frames - context + 1, stride): end = min(cur + decoding_window, num_frames) chunk_xs = xs[:, cur:end, :] (y, att_cache, cnn_cache) = self.forward_chunk( chunk_xs, offset, required_cache_size, att_cache, cnn_cache ) outputs.append(y) offset += y.size(1) ys = torch.cat(outputs, 1) masks = torch.ones((1, 1, ys.size(1)), device=ys.device, dtype=torch.bool) return ys, masks class TransformerEncoder(BaseEncoder): """Transformer encoder module.""" def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: str = "conv2d", pos_enc_layer_type: str = "abs_pos", normalize_before: bool = True, static_chunk_size: int = 0, use_dynamic_chunk: bool = False, global_cmvn: torch.nn.Module = None, use_dynamic_left_chunk: bool = False, ): """Construct TransformerEncoder See Encoder for the meaning of each parameter. """ super().__init__( input_size, output_size, attention_heads, linear_units, num_blocks, dropout_rate, positional_dropout_rate, attention_dropout_rate, input_layer, pos_enc_layer_type, normalize_before, static_chunk_size, use_dynamic_chunk, global_cmvn, use_dynamic_left_chunk, ) self.encoders = torch.nn.ModuleList( [ TransformerEncoderLayer( output_size, MultiHeadedAttention( attention_heads, output_size, attention_dropout_rate ),
# This module is from [WeNet](https://github.com/wenet-e2e/wenet). # ## Citations # ```bibtex # @inproceedings{yao2021wenet, # title={WeNet: Production oriented Streaming and Non-streaming End-to-End Speech Recognition Toolkit}, # author={Yao, Zhuoyuan and Wu, Di and Wang, Xiong and Zhang, Binbin and Yu, Fan and Yang, Chao and Peng, Zhendong and Chen, Xiaoyu and Xie, Lei and Lei, Xin}, # booktitle={Proc. Interspeech}, # year={2021}, # address={Brno, Czech Republic }, # organization={IEEE} # } # @article{zhang2022wenet, # title={WeNet 2.0: More Productive End-to-End Speech Recognition Toolkit}, # author={Zhang, Binbin and Wu, Di and Peng, Zhendong and Song, Xingchen and Yao, Zhuoyuan and Lv, Hang and Xie, Lei and Yang, Chao and Pan, Fuping and Niu, Jianwei}, # journal={arXiv preprint arXiv:2203.15455}, # year={2022} # } # """Encoder definition.""" class BaseEncoder(torch.nn.Module): def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: str = "conv2d", pos_enc_layer_type: str = "abs_pos", normalize_before: bool = True, static_chunk_size: int = 0, use_dynamic_chunk: bool = False, global_cmvn: torch.nn.Module = None, use_dynamic_left_chunk: bool = False, ): """ Args: input_size (int): input dim output_size (int): dimension of attention attention_heads (int): the number of heads of multi head attention linear_units (int): the hidden units number of position-wise feed forward num_blocks (int): the number of decoder blocks dropout_rate (float): dropout rate attention_dropout_rate (float): dropout rate in attention positional_dropout_rate (float): dropout rate after adding positional encoding input_layer (str): input layer type. optional [linear, conv2d, conv2d6, conv2d8] pos_enc_layer_type (str): Encoder positional encoding layer type. opitonal [abs_pos, scaled_abs_pos, rel_pos, no_pos] normalize_before (bool): True: use layer_norm before each sub-block of a layer. False: use layer_norm after each sub-block of a layer. static_chunk_size (int): chunk size for static chunk training and decoding use_dynamic_chunk (bool): whether use dynamic chunk size for training or not, You can only use fixed chunk(chunk_size > 0) or dyanmic chunk size(use_dynamic_chunk = True) global_cmvn (Optional[torch.nn.Module]): Optional GlobalCMVN module use_dynamic_left_chunk (bool): whether use dynamic left chunk in dynamic chunk training """ super().__init__() self._output_size = output_size if pos_enc_layer_type == "abs_pos": pos_enc_class = PositionalEncoding elif pos_enc_layer_type == "rel_pos": pos_enc_class = RelPositionalEncoding elif pos_enc_layer_type == "no_pos": pos_enc_class = NoPositionalEncoding else: raise ValueError("unknown pos_enc_layer: " + pos_enc_layer_type) if input_layer == "linear": subsampling_class = LinearNoSubsampling elif input_layer == "conv2d": subsampling_class = Conv2dSubsampling4 elif input_layer == "conv2d6": subsampling_class = Conv2dSubsampling6 elif input_layer == "conv2d8": subsampling_class = Conv2dSubsampling8 else: raise ValueError("unknown input_layer: " + input_layer) self.global_cmvn = global_cmvn self.embed = subsampling_class( input_size, output_size, dropout_rate, pos_enc_class(output_size, positional_dropout_rate), ) self.normalize_before = normalize_before self.after_norm = torch.nn.LayerNorm(output_size, eps=1e-5) self.static_chunk_size = static_chunk_size self.use_dynamic_chunk = use_dynamic_chunk self.use_dynamic_left_chunk = use_dynamic_left_chunk def output_size(self) -> int: return self._output_size def forward( self, xs: torch.Tensor, xs_lens: torch.Tensor, decoding_chunk_size: int = 0, num_decoding_left_chunks: int = -1, ) -> Tuple[torch.Tensor, torch.Tensor]: """Embed positions in tensor. Args: xs: padded input tensor (B, T, D) xs_lens: input length (B) decoding_chunk_size: decoding chunk size for dynamic chunk 0: default for training, use random dynamic chunk. <0: for decoding, use full chunk. >0: for decoding, use fixed chunk size as set. num_decoding_left_chunks: number of left chunks, this is for decoding, the chunk size is decoding_chunk_size. >=0: use num_decoding_left_chunks <0: use all left chunks Returns: encoder output tensor xs, and subsampled masks xs: padded output tensor (B, T' ~= T/subsample_rate, D) masks: torch.Tensor batch padding mask after subsample (B, 1, T' ~= T/subsample_rate) """ T = xs.size(1) masks = ~make_pad_mask(xs_lens, T).unsqueeze(1) # (B, 1, T) if self.global_cmvn is not None: xs = self.global_cmvn(xs) xs, pos_emb, masks = self.embed(xs, masks) mask_pad = masks # (B, 1, T/subsample_rate) chunk_masks = add_optional_chunk_mask( xs, masks, self.use_dynamic_chunk, self.use_dynamic_left_chunk, decoding_chunk_size, self.static_chunk_size, num_decoding_left_chunks, ) for layer in self.encoders: xs, chunk_masks, _, _ = layer(xs, chunk_masks, pos_emb, mask_pad) if self.normalize_before: xs = self.after_norm(xs) # Here we assume the mask is not changed in encoder layers, so just # return the masks before encoder layers, and the masks will be used # for cross attention with decoder later return xs, masks def forward_chunk( self, xs: torch.Tensor, offset: int, required_cache_size: int, att_cache: torch.Tensor = torch.zeros(0, 0, 0, 0), cnn_cache: torch.Tensor = torch.zeros(0, 0, 0, 0), att_mask: torch.Tensor = torch.ones((0, 0, 0), dtype=torch.bool), ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: """ Forward just one chunk Args: xs (torch.Tensor): chunk input, with shape (b=1, time, mel-dim), where `time == (chunk_size - 1) * subsample_rate + \ subsample.right_context + 1` offset (int): current offset in encoder output time stamp required_cache_size (int): cache size required for next chunk compuation >=0: actual cache size <0: means all history cache is required att_cache (torch.Tensor): cache tensor for KEY & VALUE in transformer/conformer attention, with shape (elayers, head, cache_t1, d_k * 2), where `head * d_k == hidden-dim` and `cache_t1 == chunk_size * num_decoding_left_chunks`. cnn_cache (torch.Tensor): cache tensor for cnn_module in conformer, (elayers, b=1, hidden-dim, cache_t2), where `cache_t2 == cnn.lorder - 1` Returns: torch.Tensor: output of current input xs, with shape (b=1, chunk_size, hidden-dim). torch.Tensor: new attention cache required for next chunk, with dynamic shape (elayers, head, ?, d_k * 2) depending on required_cache_size. torch.Tensor: new conformer cnn cache required for next chunk, with same shape as the original cnn_cache. """ assert xs.size(0) == 1 # tmp_masks is just for interface compatibility tmp_masks = torch.ones(1, xs.size(1), device=xs.device, dtype=torch.bool) tmp_masks = tmp_masks.unsqueeze(1) if self.global_cmvn is not None: xs = self.global_cmvn(xs) # NOTE(xcsong): Before embed, shape(xs) is (b=1, time, mel-dim) xs, pos_emb, _ = self.embed(xs, tmp_masks, offset) # NOTE(xcsong): After embed, shape(xs) is (b=1, chunk_size, hidden-dim) elayers, cache_t1 = att_cache.size(0), att_cache.size(2) chunk_size = xs.size(1) attention_key_size = cache_t1 + chunk_size pos_emb = self.embed.position_encoding( offset=offset - cache_t1, size=attention_key_size ) if required_cache_size < 0: next_cache_start = 0 elif required_cache_size == 0: next_cache_start = attention_key_size else: next_cache_start = max(attention_key_size - required_cache_size, 0) r_att_cache = [] r_cnn_cache = [] for i, layer in enumerate(self.encoders): # NOTE(xcsong): Before layer.forward # shape(att_cache[i:i + 1]) is (1, head, cache_t1, d_k * 2), # shape(cnn_cache[i]) is (b=1, hidden-dim, cache_t2) xs, _, new_att_cache, new_cnn_cache = layer( xs, att_mask, pos_emb, att_cache=att_cache[i : i + 1] if elayers > 0 else att_cache, cnn_cache=cnn_cache[i] if cnn_cache.size(0) > 0 else cnn_cache, ) # NOTE(xcsong): After layer.forward # shape(new_att_cache) is (1, head, attention_key_size, d_k * 2), # shape(new_cnn_cache) is (b=1, hidden-dim, cache_t2) r_att_cache.append(new_att_cache[:, :, next_cache_start:, :]) r_cnn_cache.append(new_cnn_cache.unsqueeze(0)) if self.normalize_before: xs = self.after_norm(xs) # NOTE(xcsong): shape(r_att_cache) is (elayers, head, ?, d_k * 2), # ? may be larger than cache_t1, it depends on required_cache_size r_att_cache = torch.cat(r_att_cache, dim=0) # NOTE(xcsong): shape(r_cnn_cache) is (e, b=1, hidden-dim, cache_t2) r_cnn_cache = torch.cat(r_cnn_cache, dim=0) return (xs, r_att_cache, r_cnn_cache) def forward_chunk_by_chunk( self, xs: torch.Tensor, decoding_chunk_size: int, num_decoding_left_chunks: int = -1, ) -> Tuple[torch.Tensor, torch.Tensor]: """Forward input chunk by chunk with chunk_size like a streaming fashion Here we should pay special attention to computation cache in the streaming style forward chunk by chunk. Three things should be taken into account for computation in the current network: 1. transformer/conformer encoder layers output cache 2. convolution in conformer 3. convolution in subsampling However, we don't implement subsampling cache for: 1. We can control subsampling module to output the right result by overlapping input instead of cache left context, even though it wastes some computation, but subsampling only takes a very small fraction of computation in the whole model. 2. Typically, there are several covolution layers with subsampling in subsampling module, it is tricky and complicated to do cache with different convolution layers with different subsampling rate. 3. Currently, nn.Sequential is used to stack all the convolution layers in subsampling, we need to rewrite it to make it work with cache, which is not prefered. Args: xs (torch.Tensor): (1, max_len, dim) chunk_size (int): decoding chunk size """ assert decoding_chunk_size > 0 # The model is trained by static or dynamic chunk assert self.static_chunk_size > 0 or self.use_dynamic_chunk subsampling = self.embed.subsampling_rate context = self.embed.right_context + 1 # Add current frame stride = subsampling * decoding_chunk_size decoding_window = (decoding_chunk_size - 1) * subsampling + context num_frames = xs.size(1) att_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device) cnn_cache: torch.Tensor = torch.zeros((0, 0, 0, 0), device=xs.device) outputs = [] offset = 0 required_cache_size = decoding_chunk_size * num_decoding_left_chunks # Feed forward overlap input step by step for cur in range(0, num_frames - context + 1, stride): end = min(cur + decoding_window, num_frames) chunk_xs = xs[:, cur:end, :] (y, att_cache, cnn_cache) = self.forward_chunk( chunk_xs, offset, required_cache_size, att_cache, cnn_cache ) outputs.append(y) offset += y.size(1) ys = torch.cat(outputs, 1) masks = torch.ones((1, 1, ys.size(1)), device=ys.device, dtype=torch.bool) return ys, masks class TransformerEncoder(BaseEncoder): """Transformer encoder module.""" def __init__( self, input_size: int, output_size: int = 256, attention_heads: int = 4, linear_units: int = 2048, num_blocks: int = 6, dropout_rate: float = 0.1, positional_dropout_rate: float = 0.1, attention_dropout_rate: float = 0.0, input_layer: str = "conv2d", pos_enc_layer_type: str = "abs_pos", normalize_before: bool = True, static_chunk_size: int = 0, use_dynamic_chunk: bool = False, global_cmvn: torch.nn.Module = None, use_dynamic_left_chunk: bool = False, ): """Construct TransformerEncoder See Encoder for the meaning of each parameter. """ super().__init__( input_size, output_size, attention_heads, linear_units, num_blocks, dropout_rate, positional_dropout_rate, attention_dropout_rate, input_layer, pos_enc_layer_type, normalize_before, static_chunk_size, use_dynamic_chunk, global_cmvn, use_dynamic_left_chunk, ) self.encoders = torch.nn.ModuleList( [ TransformerEncoderLayer( output_size, MultiHeadedAttention( attention_heads, output_size, attention_dropout_rate ),
PositionwiseFeedForward(output_size, linear_units, dropout_rate),
8
2023-11-15 09:19:27+00:00
24k
BobaZooba/xllm
tests/unit/core/test_dependencies.py
[ { "identifier": "LMCollator", "path": "src/xllm/collators/lm.py", "snippet": "class LMCollator(BaseCollator):\n \"\"\"\n `LMCollator` is a data collator class specifically designed to prepare batches of data for language modeling tasks.\n Extending the `BaseCollator`, it adapts the general data collation procedure to suit the sequential nature of\n language models, where each token in an input sequence is used to predict the next token.\n\n The `LMCollator` provides a streamlined approach to handle the conversion of raw text data into the tokenized and\n tensor-formatted inputs required by language models during training. Its primary functionality is implemented in\n the `parse_batch` method, which oversees this conversion process.\n\n This collator is needed for simple language modeling. It compiles the lists of texts from each example into a\n single text by concatenating them using a separator.\n\n Key functionalities provided by `LMCollator`:\n\n - `parse_batch`: A method that processes a batch of `RawSample` objects, creating the tensors needed for training.\n It generates input token IDs (`input_ids`), an attention mask to differentiate between real tokens and padding\n (`attention_mask`), and the labels for training the language model (`labels`).\n\n Attributes (inherited from `BaseCollator`):\n\n - `tokenizer`: The tokenizer used to convert raw text into token IDs.\n - `max_length`: The maximum length allowed for tokenized sequences. Longer sequences will be truncated\n to this length.\n - `separator`: A string used to join text pieces within each raw sample, if necessary.\n\n The `LMCollator` is particularly useful when training Transformer-based language models like GPT on\n next-token prediction tasks. Since it already applies common preprocessing steps such as tokenization, padding,\n truncation, and sequence shifting, it makes setting up training pipelines simpler and more efficient.\n\n Usage of the `LMCollator` facilitates the generation of data in the precise format required by language models,\n enabling practitioners to focus on model architecture and performance rather than boilerplate data preparation code.\n \"\"\"\n\n def parse_batch(self, raw_batch: List[RawSample]) -> Batch:\n \"\"\"\n Processes a batch of raw text samples and converts them into a suitable format for language model training,\n specifically for tasks involving language modeling (LM) such as next-token prediction.\n\n Args:\n raw_batch (`List[RawSample]`):\n A list of dictionaries, where each `RawSample` dictionary contains a key-value pair.\n The key is defined by `enums.General.text_parts` and the value is expected to be either a string,\n a numeric value, or a list of these types representing segments of text to include in the batch.\n\n Returns:\n `Batch`: A dictionary containing the following keys, each associated with its corresponding tensor:\n - `enums.Transformers.input_ids`: The input tokens, with the last token removed from each sequence,\n as input to the model.\n - `enums.Transformers.attention_mask`: The attention mask, indicating valid tokens (as 1) and padding\n tokens (as 0) for the model, also with the last token removed from each sequence.\n - `enums.Transformers.labels`: The labels used for training the language model, obtained by shifting\n the input IDs by one token to the right to predict the next token.\n\n The `parse_batch` method operates in the following steps:\n\n - Joins the text segments for each sample in the batch using the specified separator.\n - Tokenizes the combined texts using the provided tokenizer, with padding to the longest sequence in the batch\n and truncation to the specified maximum length.\n - Constructs the input IDs and attention mask for the model input, ensuring to remove the last token from each,\n as it has no subsequent token to predict.\n - Prepares the labels by shifting the input sequence by one token position, facilitating the LM's task of\n predicting the next token in the sequence.\n\n This collator is tailored for language modeling, where the inputs and labels are often closely related\n sequences, with labels simply being the input sequence offset by one token. It integrates smoothly with\n training routines that utilize PyTorch's DataLoader and other training utilities.\n \"\"\"\n\n texts = list()\n\n for sample in raw_batch:\n item = sample[enums.General.text_parts]\n if isinstance(item, (str, int, float)):\n texts.append(self.separator.join(str(item)))\n else:\n texts.append(self.separator.join(str(i) for i in item))\n\n tokenized = self.tokenizer(\n texts,\n return_tensors=\"pt\",\n padding=True,\n truncation=True,\n max_length=self.max_length,\n )\n\n batch = {\n enums.Transformers.input_ids: tokenized.input_ids[:, :-1],\n enums.Transformers.attention_mask: tokenized.attention_mask[:, :-1],\n enums.Transformers.labels: tokenized.input_ids[:, 1:],\n }\n\n return batch" }, { "identifier": "collators_registry", "path": "src/xllm/collators/registry.py", "snippet": "" }, { "identifier": "Config", "path": "src/xllm/core/config.py", "snippet": "class Config:\n \"\"\"\n The `Config` class serves as a comprehensive configuration schema for managing various parameters required during\n the setup and execution of experiments relating to language models, such as training, quantization, and\n optimization.\n\n Write more here:\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#config\n - https://github.com/BobaZooba/xllm/blob/main/DOCS.md#detailed-config-explanation\n\n This dataclass is used to encapsulate and standardize the configuration for a diverse range of tasks including\n dataset preparation, tokenizer and model initialization, training, evaluation, and interactions with remote services\n like the Hugging Face Model Hub.\n\n Attributes in this class cover aspects like model name and path, tokenizer settings, dataset paths, training\n strategies, quantization methods, hardware acceleration, logging, output directories, and more. The class provides\n properties with custom logic to resolve specific configurations and validation checks to ensure the environment is\n appropriately set up before proceeding with the workflow.\n\n Customization and flexibility are core to this class, as it provides reasonable defaults while also allowing for\n detailed and scalable configurations catered to advanced tasks such as leveraging LoRA, FSDP, deepspeed stage\n setups, and applying incremental quantization techniques like GPTQ and bits-and-bytes.\n\n Methods within the class include:\n - `check`: Performs checks across various attributes for compatibility and correctness.\n - Property getters such as `correct_tokenizer_name_or_path`, `lora_target_modules`, `dtype`, `deepspeed`, `fsdp`,\n and `lora_model_name_or_path_for_fusing` to fetch calculated or defaulted values based on attribute settings.\n\n Subclassing can be done to extend or modify the functionality of the `Config` class to address specific experimental\n scenarios or customized workflows. It is the central piece for orchestrating experimental setups and is intimately\n integrated with the rest of the codebase that operates on top of these configurations.\n\n Attributes:\n\n General Settings:\n - `experiment_key`: An enumeration key to specify the experiment type.\n - `save_safetensors`: A boolean value to indicate whether to use safe serialization for tensors.\n - `max_shard_size`: The maximum shard size when pushing the model to the HuggingFace Hub.\n - `local_rank`: Local rank for distributed training, used for logging and saving.\n - `use_gradient_checkpointing`: If set to `True`, enables gradient checkpointing to reduce memory usage at\n the cost of a slower backward pass.\n - `trainer_key`: An enumeration key to select the trainer using the trainers_registry.\n - `force_fp32`: Forces loading the model in fp32 precision, if set to `True`.\n - `force_fp16`: Forces loading the model in fp16 precision, if set to `True`.\n - `from_gptq`: Indicates if a GPTQ quantized model is being loaded.\n - `huggingface_hub_token`: Token for uploading models to HuggingFace Hub.\n - `deepspeed_stage`: Predefined DeepSpeed stage for optimization.\n - `deepspeed_config_path`: Path to the DeepSpeed config file.\n - `fsdp_strategy`: The strategy to be used for Fully Sharded Data Parallelism (FSDP).\n - `fsdp_offload`: If set to `True`, offloads weights to CPU when using FSDP to save memory.\n - `seed`: Seed for random number generators to ensure reproducibility.\n - `stabilize`: Converts some model weights to fp32 and others to bf16 for stabilization.\n - `path_to_env_file`: Custom path to the .env file for reading environment variables.\n\n Data Preparation:\n - `prepare_dataset`: Flags whether to prepare the dataset during the \"prepare\" stage.\n\n LoRA Fusing:\n - `lora_hub_model_id`: Name of the LoRA model on the hub for fusion.\n - `lora_model_local_path`: Local path to LoRA model to be fused.\n - `fused_model_local_path`: Local path to save the fused model.\n - `fuse_after_training`: If `True`, will fuse the model post-training.\n\n GPTQ Quantization:\n - `quantization_dataset_id`: Dataset ID for GPTQ quantization.\n - `quantization_max_samples`: Maximum number of samples to use during GPTQ quantization.\n - `quantized_model_path`: Path to save the GPTQ quantized model.\n - `quantized_hub_model_id`: Name of the model at the hub post-GPTQ quantization.\n - `quantized_hub_private_repo`: If set to `True`, creates a private repository for the quantized model.\n\n Dataset Related:\n - `dataset_key`: Key to select the dataset from the datasets_registry.\n - `train_local_path_to_data`: Local path to the training data file.\n - `eval_local_path_to_data`: Local path to the evaluation data file.\n - `shuffle`: If `True`, shuffles the training data.\n - `max_eval_samples`: Maximum number of examples to use for evaluation.\n - `add_eval_to_train_if_no_path`: If `True`, adds evaluation data to training if there's no separate eval path.\n\n Tokenizer Settings:\n - `tokenizer_name_or_path`: Name or path to the tokenizer.\n - `tokenizer_use_fast`: If `True`, uses the fast version of the tokenizer.\n - `tokenizer_padding_side`: Sets padding side to 'right' or 'left'.\n\n Data Collator Settings:\n - `collator_key`: Key to select the collator from the collators_registry.\n - `max_length`: Maximum sequence length for the model.\n\n Model Configuration:\n - `model_name_or_path`: Name or path to the model to be used.\n - `push_to_hub_bos_add_bos_token`: Adds BOS token when uploading tokenization configuration to the hub.\n - `use_flash_attention_2`: Flags the use of flash attention 2.\n - `trust_remote_code`: If `True`, trusts remote code from the HuggingFace Hub.\n - `device_map`: Device map for placing model layers on specific devices.\n - `prepare_model_for_kbit_training`: If `True`, prepares the model for k-bit training.\n\n BitsAndBytes Integration:\n - `load_in_8bit`: Load the model in 8-bit mode using bitsandbytes.\n - `load_in_4bit`: Load the model in 4-bit mode using bitsandbytes.\n - `llm_int8_threshold`: Threshold for detecting outliers in the model weights.\n - `llm_int8_has_fp16_weight`: If `True`, the model will have fp16 weights.\n - `bnb_4bit_use_double_quant`: If `True`, a second quantization step is used for 4-bit weights.\n - `bnb_4bit_quant_type`: Specifies the quantization type used for 4-bit weights.\n - `bnb_quantize_after_model_init`: Determines when the quantization should occur.\n\n GPTQ Specific Parameters:\n - `gptq_bits`: Number of bits for GPTQ quantization.\n - `gptq_group_size`: Group size for GPTQ quantization.\n - `gptq_disable_exllama`: If `True`, disables ExLlama kernels during GPTQ quantization.\n\n LoRA Specific Parameters:\n - `apply_lora`: If `True`, applies LoRA to the model.\n - `lora_rank`: LoRA rank to define the size of the LoRA matrices.\n - `lora_alpha`: Multiplication factor for the resulting LoRA matrix.\n - `lora_dropout`: Dropout rate for LoRA.\n - `raw_lora_target_modules`: Comma-separated string of module names to apply LoRA, or 'all' to apply broadly.\n\n Training Arguments:\n - `output_dir`: Path to save training outputs.\n - `per_device_train_batch_size`: Batch size per device for training.\n - `do_eval`: If `True`, performs evaluation.\n - `per_device_eval_batch_size`: Batch size per device for evaluation.\n - `gradient_accumulation_steps`: Number of steps to accumulate gradients for larger effective batch size.\n - `eval_accumulation_steps`: Number of steps to accumulate gradients during evaluation.\n - `eval_delay`: Delay before the first evaluation.\n - `eval_steps`: Number of update steps between evaluations.\n - `warmup_steps`: Number of steps for learning rate warmup.\n - `max_steps`: Maximum number of training steps.\n - `num_train_epochs`: Number of epochs for training.\n - `learning_rate`: Learning rate for the optimizer.\n - `max_grad_norm`: Gradient clipping threshold.\n - `weight_decay`: Coefficient for weight decay regularization.\n - `label_smoothing_factor`: Label smoothing factor.\n - `logging_steps`: Number of steps between logging intermediate results.\n - `save_steps`: Number of training steps between checkpoints and model upload.\n - `save_total_limit`: Maximum number of checkpoints to keep.\n - `optim`: Optimizer name, overwritten by DeepSpeed if used.\n - `push_to_hub`: If `True`, model checkpoints are uploaded to HuggingFace Hub.\n - `hub_model_id`: Name of the model on the HuggingFace Hub.\n - `hub_private_repo`: If `True`, creates a private repository on the HuggingFace Hub.\n\n Weights & Biases Integration:\n - `report_to_wandb`: If `True`, logs metrics to Weights & Biases.\n - `wandb_api_key`: API key for Weights & Biases.\n - `wandb_project`: Project name on Weights & Biases.\n - `wandb_entity`: Entity name (user or organization) on Weights & Biases.\n\n Example of creating a `Config` object:\n ```python\n config = Config(\n model_name_or_path='gpt2',\n dataset_key='my_dataset',\n gradient_accumulation_steps=8,\n max_length=512,\n deepspeed_stage=\"3\",\n )\n ```\n\n Note:\n - Throughout the codebase, `Config` instances are passed around to provide a unified source of configurations\n for various components.\n - It is crucial to ensure all required settings are properly set in a `Config` object before it is utilized,\n particularly when overriding defaults or specifying custom configurations.\n \"\"\"\n\n # general\n experiment_key: str = field(\n default=enums.Experiments.base,\n metadata={\"help\": \"Experiment class key\"},\n )\n save_safetensors: bool = field(\n default=True,\n metadata={\n \"help\": \"Use safe serialization (safe tensors) or not\",\n },\n )\n max_shard_size: str = field(\n default=\"10GB\", metadata={\"help\": \"max_shard_size for the model pushing to the HuggingFace Hub\"}\n )\n local_rank: int = field(\n default=0,\n metadata={\n \"help\": \"Local rank for logging and saving. Works only in distributed training\",\n },\n )\n use_gradient_checkpointing: bool = field(\n default=False,\n metadata={\n \"help\": \"If True, use gradient checkpointing to save memory at the expense of slower backward pass\",\n },\n )\n trainer_key: str = field(\n default=enums.Trainers.lm,\n metadata={\n \"help\": \"Key of the trainer for loading from trainers_registry\",\n },\n )\n force_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp32 when model loading\",\n },\n )\n force_fp16: bool = field(\n default=False,\n metadata={\n \"help\": \"Force using fp16 when model loading\",\n },\n )\n from_gptq: bool = field(\n default=False,\n metadata={\n \"help\": \"If you loadining GPTQ quantized model\",\n },\n )\n huggingface_hub_token: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"HuggingFace Hub token. You can also set this key using .env file\",\n },\n )\n single_gpu: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Indicates that you are learning on the same GPU. It is necessary to use DeepSpeed on a single GPU\",\n },\n )\n master_port: int = field(\n default=9994,\n metadata={\n \"help\": \"Master port for running DeepSpeed on a single GPU. Modify if RuntimeError: Address already in use\",\n },\n )\n deepspeed_stage: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Predifined DeepSpeed stage\",\n },\n )\n deepspeed_config_path: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Path to DeepSpeed config\",\n },\n )\n fsdp_strategy: str = field(\n default=\"\",\n metadata={\n \"help\": \"FSDP strategy\",\n },\n )\n fsdp_offload: bool = field(\n default=True,\n metadata={\n \"help\": \"Offload weights when using FSDP\",\n },\n )\n seed: int = field(\n default=42,\n metadata={\n \"help\": \"Seed value for random operations\",\n },\n )\n stabilize: bool = field(\n default=False,\n metadata={\n \"help\": \"Stabilize the model. Convert some weights (e.g. LoRA) to bf16\",\n },\n )\n norm_fp32: bool = field(\n default=False,\n metadata={\n \"help\": \"Convert norm to fp32\",\n },\n )\n path_to_env_file: Optional[str] = field(\n default=\"./.env\",\n metadata={\"help\": \"Custom path to .env file\"},\n )\n\n # prepare\n prepare_dataset: bool = field(\n default=True,\n metadata={\n \"help\": 'Prepare the dataset. Works only at \"prepare\" stage',\n },\n )\n\n # fuse\n lora_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. The name of the LoRA model at the hub for fusing. Example: BobaZooba/Shurale\",\n },\n )\n lora_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Fusing LoRA. Local path to the LoRA model\",\n },\n )\n fused_model_local_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Local path to fused model. Useful if you want to quantize model after fusing on the same machine\",\n },\n )\n fuse_after_training: bool = field(\n default=False,\n metadata={\n \"help\": \"Fuse or not model after training\",\n },\n )\n\n # gptq quantization\n quantization_dataset_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Dataset id for GPTQ quantization. You can install either the idi dataset, or use any dataset\",\n },\n )\n quantization_max_samples: int = field(\n default=1024,\n metadata={\n \"help\": \"Max samples for GPTQ quantization if you use custom dataset\",\n },\n )\n quantized_model_path: str = field(\n default=\"./quantized_model/\",\n metadata={\n \"help\": \"Path to GPTQ quantized model\",\n },\n )\n quantized_hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub for GPTQ quantization. Example: BobaZooba/Shurale-GPTQ\",\n },\n )\n quantized_hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository for GPTQ quantization model or not\",\n },\n )\n\n # dataset\n dataset_key: str = field(\n default=enums.Datasets.soda,\n metadata={\n \"help\": \"Key of the dataset for loading from datasets_registry\",\n },\n )\n train_local_path_to_data: str = field(\n default=\"./train.jsonl\",\n metadata={\n \"help\": \"The path to the local training data file\",\n },\n )\n eval_local_path_to_data: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The path to the local eval data file\",\n },\n )\n shuffle: bool = field(\n default=True,\n metadata={\n \"help\": \"Shuffle training data\",\n },\n )\n max_eval_samples: int = field(\n default=1_000,\n metadata={\n \"help\": \"Maximum number of examples for evaluation\",\n },\n )\n add_eval_to_train_if_no_path: bool = field(\n default=False,\n metadata={\n \"help\": \"Add evaluation data to training data if their number is greater than max_eval_samples\",\n },\n )\n\n # tokenizer\n tokenizer_name_or_path: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Tokenizer name or path. If the value is not set, \"\n \"then it will be taken from the model_name_or_path\",\n },\n )\n tokenizer_use_fast: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Use fast flag for the tokenizer\",\n },\n )\n tokenizer_padding_side: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Padding side of the collator: None, right or left\",\n },\n )\n\n # collator\n collator_key: str = field(\n default=enums.Collators.lm,\n metadata={\n \"help\": \"Key of the collator for loading from collators_registry\",\n },\n )\n max_length: int = field(\n default=2048,\n metadata={\n \"help\": \"Max sequence length of the model\",\n },\n )\n\n # model\n model_name_or_path: str = field(\n default=\"mistralai/Mistral-7B-v0.1\",\n metadata={\n \"help\": \"Model name or path. It could be from HuggingFace or locally\",\n },\n )\n push_to_hub_bos_add_bos_token: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload to the hub tokenization config with add_bos_token equals to True. Might be helpful for TGI\"\n },\n )\n use_flash_attention_2: bool = field(\n default=False,\n metadata={\n \"help\": \"Use or not flash attention 2. Requires 1) CUDA >= 11.6; 2) install flash-attn 3) compatible model\",\n },\n )\n trust_remote_code: bool = field(\n default=False,\n metadata={\n \"help\": \"Trust remote code from HuggingFace\",\n },\n )\n device_map: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Device map for loading the model\",\n },\n )\n prepare_model_for_kbit_training: Optional[bool] = field(\n default=None,\n metadata={\n \"help\": \"Prepare or not for kbit training\",\n },\n )\n offload_folder: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Offloading folder. Helps for fusing in colab\",\n },\n )\n\n # bitsandbytes\n load_in_8bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 8 bit using bitsandbytes\",\n },\n )\n load_in_4bit: bool = field(\n default=False,\n metadata={\n \"help\": \"Load the model in 4 bit using bitsandbytes\",\n },\n )\n llm_int8_threshold: float = field(\n default=6.0,\n metadata={\n \"help\": \"Threshold for outlier detection\",\n },\n )\n llm_int8_has_fp16_weight: bool = field(\n default=True,\n metadata={\n \"help\": \"LLM has weights in fp16\",\n },\n )\n bnb_4bit_use_double_quant: bool = field(\n default=True,\n metadata={\n \"help\": \"Double quantization. \"\n \"This will enable a second quantization after the first \"\n \"one to save an additional 0.4 bits per parameter\",\n },\n )\n bnb_4bit_quant_type: str = field(\n default=\"nf4\",\n metadata={\n \"help\": \"Quantization type for 4 bit\",\n },\n )\n bnb_quantize_after_model_init: bool = field(\n default=False, metadata={\"help\": \"If False, quantization will be at model init\"}\n )\n\n # gptq\n gptq_bits: int = field(\n default=4,\n metadata={\n \"help\": \"Bits for GPTQ quantization\",\n },\n )\n gptq_group_size: int = field(\n default=128,\n metadata={\n \"help\": \"Group size for GPTQ quantization\",\n },\n )\n gptq_disable_exllama: bool = field(\n default=True,\n metadata={\n \"help\": \"Disable ExLlama kernels for GPTQ quantization\",\n },\n )\n\n # lora\n apply_lora: bool = field(\n default=False,\n metadata={\n \"help\": \"Apply LoRA to the model or not\",\n },\n )\n lora_rank: int = field(\n default=8,\n metadata={\n \"help\": \"LoRA rank value. LoRA matrices W_A x R and R x W_B, where R is LoRA rank\",\n },\n )\n lora_alpha: int = field(\n default=32,\n metadata={\n \"help\": \"LoRA alpha value. The resulting LoRA matrix will be multiplied by this value\",\n },\n )\n lora_dropout: float = field(\n default=0.1,\n metadata={\n \"help\": \"LoRA dropout value\",\n },\n )\n raw_lora_target_modules: str = field(\n default=\"all\",\n metadata={\n \"help\": 'Names of modules to apply LoRA. A comma-separated string, for example: \"k,q,v\". '\n 'When setting the value \"all\", LoRA will be applied to all linear layers, except for the '\n \"input embeddings and the lm_head.\",\n },\n )\n\n # training arguments\n output_dir: str = field(\n default=\"./outputs/\",\n metadata={\n \"help\": \"The path to the directory where the artifacts will be saved\",\n },\n )\n per_device_train_batch_size: int = field(\n default=2,\n metadata={\n \"help\": \"Batch size on each GPU\",\n },\n )\n do_eval: bool = field(\n default=False,\n metadata={\n \"help\": \"Run eval or not\",\n },\n )\n per_device_eval_batch_size: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Batch size on each GPU for evaluation. \"\n \"If None per_device_eval_batch_size equals to per_device_train_batch_size\",\n },\n )\n gradient_accumulation_steps: int = field(\n default=1,\n metadata={\n \"help\": \"Number of steps to accumulate gradients\",\n },\n )\n eval_accumulation_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Number of steps to accumulate gradients at evaluation.\"\n \"If None eval_accumulation_steps equals to gradient_accumulation_steps\",\n },\n )\n eval_delay: int = field(\n default=0,\n metadata={\n \"help\": \"Number of epochs or steps to wait for before the first \"\n \"evaluation can be performed, depending on the evaluation_strategy\"\n },\n )\n eval_steps: Optional[int] = field(\n default=1_000, metadata={\"help\": \"Number of update steps between two evaluations\"}\n )\n warmup_steps: int = field(\n default=1_000,\n metadata={\n \"help\": \"Number of steps to warm up\",\n },\n )\n max_steps: Optional[int] = field(\n default=None,\n metadata={\n \"help\": \"Maximum number of training steps\",\n },\n )\n num_train_epochs: int = field(\n default=1,\n metadata={\n \"help\": \"Number of training epochs\",\n },\n )\n learning_rate: float = field(\n default=2e-4,\n metadata={\n \"help\": \"Learning rate value\",\n },\n )\n max_grad_norm: float = field(\n default=1.0,\n metadata={\n \"help\": \"Clip grad value\",\n },\n )\n weight_decay: float = field(\n default=0.001,\n metadata={\n \"help\": \"Weight decay value\",\n },\n )\n label_smoothing_factor: float = field(\n default=0.0,\n metadata={\n \"help\": \"Label smoothing value\",\n },\n )\n logging_steps: int = field(\n default=10,\n metadata={\n \"help\": \"Number of steps between logging\",\n },\n )\n save_steps: int = field(\n default=100,\n metadata={\n \"help\": \"The number of training steps between saving the checkpoint and uploading to the hub\",\n },\n )\n save_total_limit: int = field(\n default=1,\n metadata={\n \"help\": \"The number of checkpoints that are saved locally\",\n },\n )\n optim: Optional[str] = field(\n default=\"paged_adamw_8bit\",\n metadata={\n \"help\": \"Optimizer name. It will be overwritten if you use deepspeed\",\n },\n )\n push_to_hub: bool = field(\n default=False,\n metadata={\n \"help\": \"Upload the model to the hub. \"\n \"The model will be uploaded to the hub every save_steps. \"\n \"If LoRA is used, then LoRA's weights will be loaded onto the hub\",\n },\n )\n hub_model_id: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"The name of the model at the hub. Example: BobaZooba/Shurale\",\n },\n )\n hub_private_repo: bool = field(\n default=True,\n metadata={\n \"help\": \"Private repository or not\",\n },\n )\n neftune_noise_alpha: Optional[float] = field(\n default=None,\n metadata={\n \"help\": \"If not None, this will activate NEFTune noise embeddings. \"\n \"This can drastically improve model performance for instruction fine-tuning\",\n },\n )\n\n # training traction\n project_name: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Project name for training traction services like W&B\",\n },\n )\n report_to_wandb: bool = field(\n default=False,\n metadata={\n \"help\": \"Report or not to Weight & Biases\",\n },\n )\n wandb_api_key: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases API key. You can also set this key using .env file\",\n },\n )\n wandb_project: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Depreacted, use project_name. Weight & Biases project name\",\n },\n )\n wandb_entity: Optional[str] = field(\n default=None,\n metadata={\n \"help\": \"Weight & Biases entity name (user or company)\",\n },\n )\n\n def __post_init__(self):\n if self.huggingface_hub_token is not None:\n os.environ[enums.EnvironmentVariables.huggingface_hub_token] = self.huggingface_hub_token\n dist_logger(message=f\"Environment variable {enums.EnvironmentVariables.huggingface_hub_token} set\")\n\n if self.report_to_wandb:\n for key, value in zip(\n [\n enums.EnvironmentVariables.wandb_api_key,\n enums.EnvironmentVariables.wandb_project,\n enums.EnvironmentVariables.wandb_entity,\n ],\n [\n self.wandb_api_key,\n self.correct_project_name,\n self.wandb_entity,\n ],\n ):\n if value is not None:\n os.environ[key] = value\n dist_logger(message=f\"Environment variable {key} set\")\n else:\n os.environ[enums.EnvironmentVariables.wandb_disabled] = \"true\"\n\n @property\n def correct_project_name(self) -> Optional[str]:\n if self.project_name is not None and self.wandb_project is not None:\n dist_logger.warning(\n message=\"You set both project_name and wandb_project.\"\n \"Priority set to project_name for experiment tracking\"\n )\n return self.project_name\n elif self.project_name is not None:\n return self.project_name\n elif self.wandb_project is not None:\n dist_logger.warning(message=\"wandb_project is depreacted, please use project_name instead\")\n return self.wandb_project\n else:\n return None\n\n def check_hub(self) -> None:\n if self.push_to_hub and self.hub_model_id is None:\n raise ValueError(\"You want to push to HF hub, but hub_model_id is None\")\n elif self.hub_model_id is not None and not self.push_to_hub:\n dist_logger.warning(\"You set hub_model_id, but push_to_hub is False\")\n\n return None\n\n def apply_deepspeed_single_gpu(self) -> None:\n os.environ[enums.EnvironmentVariables.master_address] = \"localhost\"\n os.environ[enums.EnvironmentVariables.master_port] = str(self.master_port)\n os.environ[enums.EnvironmentVariables.rank] = \"0\"\n os.environ[enums.EnvironmentVariables.local_rank] = \"0\"\n os.environ[enums.EnvironmentVariables.world_size] = \"1\"\n\n def check_deepspeed(self) -> None:\n if self.deepspeed is not None:\n spec = find_spec(\"deepspeed\")\n\n if spec is None:\n raise ImportError(\"Deepspeed is not None, but failed to import deepspeed. Please install deepspeed.\")\n\n if self.single_gpu:\n self.apply_deepspeed_single_gpu()\n\n return None\n\n def check_flash_attention(self) -> None:\n if self.use_flash_attention_2:\n if not torch.cuda.is_available():\n raise ImportError(\"You want to use flash_attention_2, but CUDA is not available\")\n\n spec = find_spec(\"flash_attn\")\n\n if spec is None:\n raise ImportError(\n \"You want to use flash_attention_2, but flash-attn is not installed. Please install flash-attn.\"\n )\n\n return None\n\n def check_auto_gptq(self) -> None:\n spec = find_spec(\"auto_gptq\")\n\n if spec is None:\n raise ImportError(\n \"You want to quantize model using GPTQ, but auto-gptq is not installed. Please install auto-gptq.\"\n )\n\n return None\n\n def check(self) -> None:\n \"\"\"\n Performs a series of checks to validate the configuration for compatibility with the training environment.\n\n This method is responsible for ensuring that the environment is properly set up for the actions specified in\n the configuration object, such as pushing to Hugging Face's hub, using deepspeed, and using flash attention.\n\n It includes the following checks:\n - Verifies that credentials for Hugging Face hub are provided if the model is intended to be pushed to the hub.\n - Validates that deepspeed is installed if it is specified in the configuration.\n - Ensures that the necessary packages are installed for using flash attention if configured to do so.\n\n Does not return any value.\n\n Raises:\n - ValueError: If the configuration for hub interaction is incorrect.\n - ImportError: If any of the required libraries (e.g., deepspeed, flash-attn, auto-gptq) are not installed.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(...)\n # Before proceeding with training or other operations, run checks to ensure environment compatibility.\n config.check()\n ```\n\n Note:\n - Always invoke this method after initializing a `Config` object and before proceeding with model training\n or other operations that rely on the configuration settings.\n \"\"\"\n self.check_hub()\n self.check_deepspeed()\n self.check_flash_attention()\n\n return None\n\n @property\n def correct_tokenizer_name_or_path(self) -> str:\n \"\"\"\n Resolves the tokenizer name or path to be used for initializing the tokenizer.\n\n This property ensures that if a specific tokenizer name or path is not provided in the configuration object,\n the model name or path is used instead, maintaining consistency between model and tokenizer.\n\n Returns:\n `str`: The name or path of the tokenizer to be used. If `tokenizer_name_or_path` is specified in `Config`\n object, that value is used. Otherwise, `model_name_or_path` is returned as the default tokenizer identifier.\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(model_name_or_path=\"gpt2\", tokenizer_name_or_path=None)\n tokenizer_name_or_path = config.correct_tokenizer_name_or_path\n # tokenizer_name_or_path now holds the value \"gpt2\"\n ```\n\n Note:\n - It is a common practice to use the same identifier for both the model and its corresponding tokenizer.\n This property handles such a case automatically when the `tokenizer_name_or_path` is not explicitly set.\n \"\"\"\n if self.tokenizer_name_or_path is not None:\n return self.tokenizer_name_or_path\n else:\n return self.model_name_or_path\n\n @property\n def lora_target_modules(self) -> Optional[List[str]]:\n \"\"\"\n Interprets the LoRA target modules setting from the configuration to determine which model modules to apply\n LoRA to.\n\n LoRA (Low-Rank Adaptation) is a parameter-efficient training method that modifies specific layers within a\n model. This property is responsible for parsing the `raw_lora_target_modules` configuration to identify\n the specific modules (like attention key, query, and value matrices) that LoRA will be applied to.\n\n Returns:\n Optional[List[str]]: A list of module names to apply LoRA to if specified, otherwise `None` if LoRA should\n be applied to all eligible modules as determined by the string \"all\" in `raw_lora_target_modules`.\n\n Raises:\n ValueError: If `raw_lora_target_modules` is not set.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with LoRA targets specified.\n config = Config(raw_lora_target_modules=\"k,q,v\")\n lora_modules = config.lora_target_modules\n # lora_modules now holds the list ['k', 'q', 'v'].\n ```\n\n Note:\n - The `raw_lora_target_modules` should be provided as a comma-separated string specifying the target\n modules. If LoRA should be applied broadly, the value \"all\" can be used.\n \"\"\"\n if self.raw_lora_target_modules == \"all\":\n return None\n elif self.raw_lora_target_modules is not None:\n modules_names = [module_name.strip() for module_name in self.raw_lora_target_modules.split(\",\")]\n return modules_names\n else:\n raise ValueError(\"raw_lora_target_modules doesn't set\")\n\n @property\n def dtype(self) -> torch.dtype:\n \"\"\"\n Determines the appropriate PyTorch data type for the model based on availability of CUDA and configuration\n settings.\n\n This property assists in setting computational precision for training and inference (e.g., FP32, FP16, BF16),\n basing the decision on system capabilities and user preferences as defined in the `Config` object. The selected\n data type can impact both the computational efficiency and memory usage of the model operations.\n\n Returns:\n `torch.dtype`: The data type to be used for the model tensors. This can be one of the following based on the\n system's CUDA support and configuration flags: `torch.float32` (FP32), `torch.float16` (FP16), or\n `torch.bfloat16` (BF16).\n\n Example usage:\n ```python\n from xllm import Config\n\n config = Config(force_fp32=False, force_fp16=True)\n model_dtype = config.dtype\n # If CUDA is available and BF16 is supported, model_dtype will be `torch.bfloat16`.\n # Otherwise, it falls back to `torch.float16` due to the forced FP16 configuration.\n ```\n\n Note:\n - This property plays a critical role in memory management and computational efficiency, especially when\n working with large models or limited system resources.\n \"\"\"\n if not torch.cuda.is_available() or self.force_fp32:\n return torch.float32\n elif self.force_fp16:\n return torch.float16\n elif torch.cuda.is_bf16_supported():\n return torch.bfloat16\n else:\n return torch.float16\n\n @property\n def deepspeed(self) -> Optional[Dict[str, Any]]:\n \"\"\"\n Retrieves the deepspeed configuration dictionary based on settings within the `Config` object.\n\n This property parses the deepspeed settings from the configuration to construct the configuration dictionary\n used for ing up deepspeed in the model's training environment. It determines whether a predefined stage\n or a custom configuration file path should be utilized.\n\n Returns:\n `Optional[Dict[str, Any]]`: A dictionary containing deepspeed configurations, or `None` if deepspeed is not\n to be used.\n\n Raises:\n ValueError: If the `deepspeed_stage` specified does not correspond to a known configuration,\n or if a custom deepspeed configuration file path does not exist.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with deepspeed specifications.\n config = Config(deepspeed_stage=\"2\")\n ds_config = config.deepspeed\n # ds_config now contains the deepspeed configuration for stage 2.\n ```\n\n Note:\n - A deepspeed stage is a set of predefined configurations. If this is set, the corresponding configuration\n will be used and any custom deepspeed configuration file will be ignored.\n - If a custom deepspeed configuration file path is given and it exists, that configuration will be loaded\n and used.\n \"\"\"\n deepspeed_config: Optional[Dict[str, Any]] = None\n\n if self.deepspeed_config_path is not None:\n if os.path.isfile(self.deepspeed_config_path):\n with open(self.deepspeed_config_path) as file_object:\n deepspeed_config = json.load(file_object)\n return deepspeed_config\n else:\n raise ValueError(f\"deepspeed_config_path set to {self.deepspeed_config_path}, but not found\")\n\n if self.deepspeed_stage in [0, \"0\", \"stage_0\"]:\n return None\n\n if self.deepspeed_stage is not None:\n deepspeed_config = DS_CONFIG_MAPPER.get(self.deepspeed_stage, None)\n if deepspeed_config is None:\n raise ValueError(\n f'Deepspeed stage \"{self.deepspeed_stage}\" not found in keys: {list(DS_CONFIG_MAPPER.keys())}'\n )\n\n return deepspeed_config\n\n @property\n def fsdp(self) -> Union[str, List[str]]:\n \"\"\"\n Compiles the configurations for Fully Sharded Data Parallel (FSDP) based on the settings in the `Config` object.\n\n This property creates a list containing FSDP-related options, which informs the training process whether to\n enable FSDP and which FSDP strategy to employ.\n\n A list of options (fsdp_strategy) along the following:\n \"full_shard\": Shard parameters, gradients and optimizer states.\n \"shard_grad_op\": Shard optimizer states and gradients.\n \"offload\": Offload parameters and gradients to CPUs (only compatible with \"full_shard\" and \"shard_grad_op\").\n \"auto_wrap\": Automatically recursively wrap layers with FSDP using default_auto_wrap_policy.\n\n Returns:\n `Union[str, List[str]]`: A list of FSDP options as strings. It can be an empty string if FSDP is not used or\n a list with the specified FSDP strategy and options such as offloading.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with FSDP specifications.\n config = Config(fsdp_strategy=\"full_shard\", fsdp_offload=True)\n fsdp_options = config.fsdp\n ```\n\n Note:\n - FSDP strategies and options improve memory efficiency during distributed training by sharding the model's\n parameters across multiple devices.\n - The FSDP settings in the configuration should match the target training environment and system\n capabilities.\n \"\"\"\n fsdp_options = list()\n\n if self.fsdp_strategy is not None and self.fsdp_strategy != \"\":\n fsdp_options.append(self.fsdp_strategy)\n else:\n return \"\"\n\n if self.fsdp_offload:\n fsdp_options.append(FSDPOption.OFFLOAD)\n\n return fsdp_options\n\n @property\n def lora_model_name_or_path_for_fusing(self) -> str:\n \"\"\"\n Determines the name or path of the LoRA model to be used for the fusing process.\n\n This property resolves which model should be fused by checking whether a model ID from the Hugging Face hub or a\n local path to a LoRA model is provided in the configuration object. It is essential for the fusing operation\n when LoRA weights need to be integrated into the base model.\n\n Returns:\n `str`: The Hugging Face hub model ID or the local file path to the LoRA model, depending on which is\n specified.\n\n Raises:\n ValueError: If neither `lora_hub_model_id` nor `lora_model_local_path` is set, indicating that there is no\n model specified for fusing.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a Config object with a specified LoRA model on Hugging Face Hub or locally.\n config = Config(lora_hub_model_id=\"username/model-id\", lora_model_local_path=None)\n model_name_or_path = config.lora_model_name_or_path_for_fusing\n # model_name_or_path will hold the value \"username/model-id\".\n ```\n\n Note:\n - This property is specifically used during the model fusing step and should be configured correctly in\n scenarios where LoRA is utilized.\n \"\"\"\n if self.lora_hub_model_id is not None:\n return self.lora_hub_model_id\n elif self.lora_model_local_path is not None:\n return self.lora_model_local_path\n else:\n raise ValueError(\"Please set lora_hub_model_id or lora_model_local_path for fusing\")\n\n @property\n def need_to_prepare_model_for_kbit_training(self) -> bool:\n if self.prepare_model_for_kbit_training is not None:\n return self.prepare_model_for_kbit_training\n else:\n return self.from_gptq or self.load_in_4bit or self.load_in_8bit" }, { "identifier": "build_collator", "path": "src/xllm/core/dependencies.py", "snippet": "def build_collator(config: Config, tokenizer: PreTrainedTokenizer, **kwargs: Any) -> BaseCollator:\n \"\"\"\n Creates a data collator instance, which is responsible for collating batches of data when fed to the model during\n training.\n\n This function fetches the appropriate collator class from a registry using the key provided in the configuration.\n It then instantiates the collator with the tokenizer and any additional arguments necessary to prepare the data\n according to the model's requirements.\n\n Args:\n config (`Config`):\n The configuration object containing the key to identify the appropriate collator class and other related\n settings.\n tokenizer (`PreTrainedTokenizer`):\n The tokenizer instance that will be used by the collator to tokenize and encode the input data.\n **kwargs (`Any`):\n Additional keyword arguments that may be required by the specific collator class during instantiation.\n\n Returns:\n `BaseCollator`: An instance of a subclass of `BaseCollator` that is ready to format batches of data for the\n model.\n\n The function carries out the following operations:\n - Identifies the collator class using the `collator_key` from the configuration object's registry.\n - Initializes the collator class with the tokenizer along with any custom configurations or arguments.\n\n Raises:\n ValueError: If the type of collator found in the registry does not inherit from `BaseCollator`.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object and a loaded tokenizer.\n config = Config(...)\n tokenizer = load_tokenizer(...)\n collator = build_collator(config=config, tokenizer=tokenizer)\n # collator is now ready to be used for creating model-ready data batches during training.\n ```\n\n Note:\n - The data collator prepares and formats the data in a manner suitable for the model's input requirements.\n - Ensure the correct collator key is specified in the `Config` object for proper data collator retrieval and\n initialization.\n \"\"\"\n collator_cls: Type[BaseCollator] = collators_registry.get(key=config.collator_key)\n\n if not issubclass(collator_cls, BaseCollator):\n raise ValueError(f\"Unknown type of collator: {collator_cls.__name__}\")\n\n collator = collator_cls(tokenizer=tokenizer, max_length=config.max_length, **kwargs)\n\n return collator" }, { "identifier": "build_dataset", "path": "src/xllm/core/dependencies.py", "snippet": "def build_dataset(config: Config, is_train: bool = True, **kwargs: Any) -> Optional[BaseDataset]:\n \"\"\"\n Creates an instance of the dataset class specified in the configuration object.\n\n This function is responsible for constructing the dataset to be used for training or evaluation, leveraging the\n dataset registry to find the corresponding class and initializing it with the provided configuration path and\n arguments.\n\n Args:\n config (`Config`):\n The configuration object containing the dataset-related settings including the path to data and dataset key.\n is_train (`bool`, optional, defaults to `True`):\n A flag indicating whether to construct the training dataset or evaluation dataset. If `True`, the function\n constructs the training dataset; otherwise, it constructs the evaluation dataset if specified in the config.\n **kwargs (`Any`):\n Additional keyword arguments that are passed to the dataset class upon construction.\n\n Returns:\n Optional[BaseDataset]: An instance of the derived `BaseDataset` class if successfully created, otherwise `None`.\n\n The function performs the following operations:\n - Determines the path to the dataset based on whether a training or evaluation dataset is requested.\n - Retrieves the specified dataset class from the datasets registry using the key provided in the configuration.\n - Instantiates the dataset class with the determined path and any additional keyword arguments.\n\n Raises:\n ValueError: If the dataset class cannot be found in the registry or the type is not a subclass of `BaseDataset`.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with the dataset specifications.\n config = Config(...)\n train_dataset = build_dataset(config=config, is_train=True)\n eval_dataset = build_dataset(config=config, is_train=False)\n # train_dataset and eval_dataset are now ready to be used in the training process.\n ```\n\n Note:\n - If the path to data for the specified type of dataset does not exist in the configuration, the function will\n return `None`.\n - This function is designed to abstract away the dataset initialization, allowing for a centralized and\n consistent approach to dataset construction based on the configuration settings.\n \"\"\"\n if is_train:\n path_to_data = config.train_local_path_to_data\n elif config.eval_local_path_to_data is not None:\n path_to_data = config.eval_local_path_to_data\n else:\n return None\n\n dataset_cls: Type[BaseDataset] = datasets_registry.get(key=config.dataset_key)\n\n if issubclass(dataset_cls, BaseDataset):\n dataset = dataset_cls.load(path_to_data=path_to_data, **kwargs)\n else:\n raise ValueError(f\"Unknown type of dataset: {dataset_cls.__name__}\")\n\n return dataset" }, { "identifier": "build_model", "path": "src/xllm/core/dependencies.py", "snippet": "def build_model(\n config: Config,\n quantization_config: Union[BitsAndBytesConfig, GPTQConfig, None],\n low_cpu_mem_usage: Optional[bool] = None,\n) -> PreTrainedModel:\n \"\"\"\n Constructs the language model from a pretrained path with potential quantization configurations and customizations.\n\n This function loads the model specified in the configuration object. It can also apply quantization settings as\n defined by the quantization configuration or prepare the model for k-bit training if required.\n\n Args:\n config (`Config`):\n The configuration object containing model-related settings such as the model's name or path and other\n options.\n quantization_config (`Union[BitsAndBytesConfig, GPTQConfig, None]`):\n A configuration object guiding the quantization behavior of the model.\n low_cpu_mem_usage (`bool`, optional, defaults to `None`):\n A flag that, when set, instructs the model to optimize memory usage on CPUs. This can be helpful when\n dealing with large models or on systems with limited CPU memory resources.\n\n Returns:\n `PreTrainedModel`: An instance of a subclass of `PreTrainedModel` that has been instantiated and possibly\n quantized.\n\n The function handles the following tasks:\n - Determines whether caching should be enabled based on the gradient checkpointing setting.\n - Loads the model using the AutoModelForCausalLM class with provided configuration settings such as `dtype` and\n `device_map`.\n - Applies k-bit training preparations if configured to do so.\n - Modifies model configurations, such as disabling caching if necessary.\n\n Raises:\n ValueError: If the model's type is not supported or cannot be correctly instantiated.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with model path and quantization settings.\n config = Config(...)\n quantization_config = build_quantization_config(config=config)\n model = build_model(config=config, quantization_config=quantization_config)\n # model is now ready for training or inference.\n ```\n\n Note:\n - If quantization is intended to be applied after model initialization, the `bnb_quantize_after_model_init` flag\n should be set in the `Config` object.\n - Before calling this function, ensure that the model path and any desired customization options are properly\n set in the `Config` object.\n \"\"\"\n if config.bnb_quantize_after_model_init:\n quantization_config = None\n dist_logger(\"bnb quantization is expected later\")\n\n if config.use_gradient_checkpointing:\n use_cache = False\n else:\n use_cache = True\n\n kwargs: Dict[str, Any] = dict()\n\n if config.use_flash_attention_2:\n kwargs[\"use_flash_attention_2\"] = True\n\n if low_cpu_mem_usage is not None:\n kwargs[\"low_cpu_mem_usage\"] = low_cpu_mem_usage\n\n model = AutoModelForCausalLM.from_pretrained(\n pretrained_model_name_or_path=config.model_name_or_path,\n quantization_config=quantization_config,\n torch_dtype=config.dtype,\n trust_remote_code=config.trust_remote_code,\n device_map=config.device_map,\n use_cache=use_cache,\n **kwargs,\n )\n model.config.pretraining_tp = 1\n\n if quantization_config is not None and config.need_to_prepare_model_for_kbit_training:\n model = prepare_model_for_kbit_training(\n model=model, use_gradient_checkpointing=config.use_gradient_checkpointing\n )\n dist_logger(\n message=f\"Model prepared for kbit training. Gradient checkpointing: {config.use_gradient_checkpointing}\",\n local_rank=config.local_rank,\n )\n\n return model" }, { "identifier": "build_quantization_config", "path": "src/xllm/core/dependencies.py", "snippet": "def build_quantization_config(\n config: Config,\n) -> Union[BitsAndBytesConfig, GPTQConfig, None]:\n \"\"\"\n Constructs a configuration for model quantization based on the settings provided in the configuration object.\n\n This function generates either a `BitsAndBytesConfig` or a `GPTQConfig` instance, which are used to inform the\n quantization process for a language model. The function decides which quantization method to apply based on the\n flags set in the `Config` object.\n\n Args:\n config (`Config`):\n The configuration object that contains the flags and settings specifying the quantization method\n and parameters.\n\n Returns:\n Union[BitsAndBytesConfig, GPTQConfig, None]:\n An instance of the quantization configuration class corresponding to the method chosen based on the\n configuration settings, or `None` if quantization is not configured.\n\n The function inspects the configuration to determine the following:\n - If GPTQ-based quantization is specified, it sets up a `GPTQConfig` with the designated bit precision and grouping\n size.\n - If bitsandbytes (bnb) methodology is specified, returns a `BitsAndBytesConfig` with the respective settings.\n - If neither quantization approach is specified or the required settings are absent, it returns `None`.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with quantization settings.\n config = Config(...)\n quantization_config = build_quantization_config(config=config)\n # quantization_config is now ready to be applied in the model quantization process.\n ```\n\n Note:\n - Having the correct quantization settings in the `Config` object is crucial, as they dictate the behavior\n of the quantization process and impact the model size and computational speed after quantization.\n \"\"\"\n if config.from_gptq:\n quantization_config = GPTQConfig(\n bits=config.gptq_bits,\n group_size=config.gptq_group_size,\n disable_exllama=config.gptq_disable_exllama,\n )\n elif config.load_in_8bit or config.load_in_4bit:\n quantization_config = BitsAndBytesConfig(\n load_in_8bit=config.load_in_8bit,\n load_in_4bit=config.load_in_4bit,\n llm_int8_threshold=config.llm_int8_threshold,\n llm_int8_has_fp16_weight=config.llm_int8_has_fp16_weight,\n bnb_4bit_compute_dtype=config.dtype,\n bnb_4bit_use_double_quant=config.bnb_4bit_use_double_quant,\n bnb_4bit_quant_type=config.bnb_4bit_quant_type,\n )\n else:\n quantization_config = None\n\n return quantization_config" }, { "identifier": "build_tokenizer", "path": "src/xllm/core/dependencies.py", "snippet": "def build_tokenizer(config: Config, use_fast: Optional[bool] = None) -> PreTrainedTokenizer:\n \"\"\"\n Constructs the tokenizer for processing the text according to the specifications provided in the configuration\n object.\n\n This function loads the tokenizer from the path specified in the `Config` object. If requested, it ensures the\n tokenizer uses fast implementation (when available), and sets the padding token and side according to the given\n configuration.\n\n Args:\n config (`Config`):\n The configuration object containing tokenizer settings such as the path to the tokenizer and the desired\n padding side.\n use_fast (`bool`, optional, defaults to `None`):\n A flag indicating whether to use the fast version of the tokenizer if available. When set to `None`,\n falls back to the default behavior of tokenizer class.\n\n Returns:\n `PreTrainedTokenizer`: An instance of the `PreTrainedTokenizer` class loaded and configured as specified.\n\n The function carries out the following steps:\n - Loads the tokenizer from the pretrained path specified in the configuration.\n - If the tokenizer does not have a defined padding token, sets it to the `eos_token`.\n - If padding side settings are provided, configures the tokenizer to apply padding on the specified side.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with tokenizer path and padding preferences.\n config = Config(...)\n tokenizer = build_tokenizer(config=config)\n # tokenizer is now ready for text processing.\n ```\n\n Note:\n - This function prepares the tokenizer for use in data preparation and model inputs generation.\n - It is crucial to specify the tokenizer's path in the `Config` object for the correct initialization.\n \"\"\"\n kwargs = dict()\n\n if use_fast is not None:\n kwargs[\"use_fast\"] = use_fast\n\n tokenizer = AutoTokenizer.from_pretrained(\n pretrained_model_name_or_path=config.correct_tokenizer_name_or_path,\n trust_remote_code=config.trust_remote_code,\n **kwargs,\n )\n\n if tokenizer.pad_token is None:\n tokenizer.pad_token = tokenizer.eos_token\n dist_logger.info(message=\"Tokenizer pad token set to eos token\", local_rank=config.local_rank)\n\n if config.tokenizer_padding_side is not None:\n tokenizer.padding_side = config.tokenizer_padding_side\n dist_logger.info(\n message=f\"Tokenizer padding side set to {config.tokenizer_padding_side}\", local_rank=config.local_rank\n )\n\n return tokenizer" }, { "identifier": "build_trainer", "path": "src/xllm/core/dependencies.py", "snippet": "def build_trainer(\n config: Config,\n pad_token_id: int,\n training_arguments: TrainingArguments,\n model: Union[PreTrainedModel, PeftModel],\n train_dataset: BaseDataset,\n collator: BaseCollator,\n eval_dataset: Optional[BaseDataset] = None,\n **kwargs: Any,\n) -> LMTrainer:\n \"\"\"\n Instantiates and configures a LLM trainer appropriate for the model and datasets.\n\n This function retrieves the trainer class based on the provided configuration, setting it up with the specified\n model, token padding information, training arguments, datasets, and data collator.\n\n Args:\n config (`Config`):\n The configuration object containing necessary settings for the trainer, such as trainer key and model\n configuration.\n pad_token_id (`int`):\n The ID of the padding token used by the model, necessary for correctly computing the loss during training.\n training_arguments (`TrainingArguments`):\n The training arguments specifying training and evaluation parameters, such as learning rate and batch size.\n model (`Union[PreTrainedModel, PeftModel]`):\n The language model to be trained. Can be any model that is compatible with the training process.\n train_dataset (`BaseDataset`):\n The dataset object containing the training data samples.\n collator (`BaseCollator`):\n The data collator responsible for creating model-ready batches from the data samples.\n eval_dataset (`Optional[BaseDataset]`, defaults to `None`):\n The optional dataset object for evaluation. If provided, it is used for evaluating the model's performance\n during training.\n **kwargs (`Any`):\n Additional keyword arguments that may be required by the specific trainer instantiated.\n\n Returns:\n `LMTrainer`:\n A trainer instance of type `LMTrainer`, which extends the `Trainer` class, configured with the provided\n settings.\n\n The function follows these operations:\n - Retrieves the appropriate subclass of `Trainer` from the `trainers_registry` using a key.\n - Initializes the trainer subclass with provided arguments, configuring it for the training process.\n - Modifies the model configuration, mostly to disable caching for specific model types if necessary.\n\n Raises:\n ValueError: If the trainer class fetched from the registry does not subclass from `Trainer`.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object with trainer specifications and instance of TrainingArguments.\n config = Config(...)\n training_args = build_training_arguments(config)\n trainer = build_trainer(\n config=config,\n pad_token_id=tokenizer.pad_token_id,\n training_arguments=training_args,\n model=model,\n train_dataset=train_dataset,\n collator=collator,\n eval_dataset=eval_dataset\n )\n # trainer is now ready to start the training cycle.\n ```\n\n Note:\n - The specific subclass of `LMTrainer` depends on the `trainer_key` provided in the `Config` object,\n which allows for the use of custom training behavior if needed.\n \"\"\"\n trainer_cls = trainers_registry.get(key=config.trainer_key)\n\n if not issubclass(trainer_cls, Trainer):\n raise ValueError(f\"Unknown type of trainer: {trainer_cls.__name__}\")\n\n trainer: LMTrainer = trainer_cls(\n config=config,\n model=model,\n args=training_arguments,\n data_collator=collator,\n train_dataset=train_dataset,\n eval_dataset=eval_dataset,\n ignore_index=pad_token_id,\n **kwargs,\n )\n\n try:\n model.config.use_cache = False # type: ignore\n except Exception as exception:\n dist_logger.warning(\n message=f\"Can't set use cache to false. Exception: {exception}\",\n local_rank=config.local_rank,\n )\n\n return trainer" }, { "identifier": "build_training_arguments", "path": "src/xllm/core/dependencies.py", "snippet": "def build_training_arguments(config: Config) -> TrainingArguments:\n \"\"\"\n Constructs `TrainingArguments` for model training from the provided configuration object.\n\n This function determines the appropriate training parameters based on the system's capabilities and the user's\n configuration, setting up arguments that control aspects of the training process such as batch size, learning\n rate, weight decay, and hardware acceleration options.\n\n Args:\n config (`Config`):\n A configuration object containing necessary specifications for setting up the training environment.\n\n Returns:\n `TrainingArguments`: An instance of the `TrainingArguments` class with all the provided configuration settings\n applied. This object is then utilized by the training process.\n\n The function checks whether training is supported using mixed precision (both FP16 and BF16) depending on CUDA\n availability and settings in the config object. It also adjusts the weight saving and evaluation strategies\n according to the specified conditions, among other settings.\n\n Example usage:\n ```python\n from xllm import Config\n\n # Assuming a predefined Config object.\n config = Config(...)\n training_args = build_training_arguments(config=config)\n # training_args is now ready to be passed to Trainer or any training loop.\n ```\n\n Note:\n - This function does not train the model but merely prepares the arguments required for training.\n - It is important to ensure that the `Config` object has accurate and intended values, as they will directly\n impact all aspects of the model's training strategy.\n \"\"\"\n if torch.cuda.is_available():\n if torch.cuda.is_bf16_supported() and not config.force_fp16:\n fp16 = False\n bf16 = True\n else:\n fp16 = True\n bf16 = False\n else:\n fp16 = False\n bf16 = False\n\n training_arguments = TrainingArguments(\n output_dir=config.output_dir,\n per_device_train_batch_size=config.per_device_train_batch_size,\n gradient_accumulation_steps=config.gradient_accumulation_steps,\n warmup_steps=config.warmup_steps,\n learning_rate=config.learning_rate,\n max_steps=config.max_steps if config.max_steps is not None else -1,\n num_train_epochs=config.num_train_epochs,\n weight_decay=config.weight_decay,\n max_grad_norm=config.max_grad_norm,\n label_smoothing_factor=config.label_smoothing_factor,\n fp16=fp16,\n bf16=bf16,\n logging_steps=config.logging_steps,\n report_to=[\"wandb\"] if config.report_to_wandb else None,\n save_strategy=\"steps\",\n save_steps=config.save_steps,\n save_total_limit=config.save_total_limit,\n hub_model_id=config.hub_model_id,\n hub_strategy=\"checkpoint\",\n hub_token=os.environ.get(enums.EnvironmentVariables.huggingface_hub_token, None),\n push_to_hub=config.push_to_hub,\n hub_private_repo=config.hub_private_repo,\n save_safetensors=config.save_safetensors,\n fsdp=config.fsdp,\n deepspeed=config.deepspeed,\n remove_unused_columns=False,\n log_level=enums.LogLevel.info,\n disable_tqdm=False,\n logging_first_step=True,\n optim=config.optim, # will be overwriten by deepspeed config if exist\n do_eval=config.do_eval,\n evaluation_strategy=\"steps\" if config.do_eval else IntervalStrategy.NO,\n per_device_eval_batch_size=config.per_device_eval_batch_size or config.per_device_train_batch_size,\n eval_accumulation_steps=config.eval_accumulation_steps or config.gradient_accumulation_steps,\n eval_delay=config.eval_delay,\n eval_steps=config.eval_steps,\n seed=config.seed,\n data_seed=config.seed,\n metric_for_best_model=\"eval_loss\" if config.do_eval else \"loss\",\n neftune_noise_alpha=config.neftune_noise_alpha,\n )\n return training_arguments" }, { "identifier": "datasets_registry", "path": "src/xllm/datasets/registry.py", "snippet": "" }, { "identifier": "SodaDataset", "path": "src/xllm/datasets/soda.py", "snippet": "class SodaDataset(BaseDataset):\n HEADER_KEY = \"header\"\n DIALOG_KEY = \"dialog\"\n\n _HF_DATASET_ID = \"allenai/soda\"\n\n def __init__(self, data: List[RawSample], header_drop_probability: float = 0.05):\n super().__init__(data=data)\n self.header_drop_probability = header_drop_probability\n\n @classmethod\n def get_data(cls, config: Config) -> Optional[Tuple[List[RawSample], Optional[List[RawSample]]]]:\n soda_dataset = datasets.load_dataset(cls._HF_DATASET_ID)\n\n parsed_data: Dict[str, List[RawSample]] = dict()\n\n known_indices = set()\n\n for split in [\"train\", \"test\"]:\n parsed_data[split] = list()\n\n for sample in tqdm(soda_dataset[split], desc=f\"Parsing SODA {split}\"):\n index = sample.get(\"original_index\")\n\n if index in known_indices:\n continue\n\n parsed_sample = {\n cls.HEADER_KEY: sample.get(\"narrative\"),\n cls.DIALOG_KEY: [\n f\"{speaker}: {phrase}\"\n for speaker, phrase in zip(sample.get(\"speakers\"), sample.get(\"dialogue\"))\n ],\n }\n\n parsed_data[split].append(parsed_sample)\n known_indices.add(index)\n\n train = parsed_data[\"train\"]\n valid = parsed_data[\"test\"]\n\n return train, valid\n\n def get_sample(self, index: int) -> RawSample:\n sample = self.data[index]\n\n dialog = sample[self.DIALOG_KEY]\n\n phrases = list()\n\n if not isinstance(dialog, list):\n raise ValueError(f\"{self.DIALOG_KEY} of sample is not a list: {type(dialog)}\")\n\n for phrase in dialog:\n if isinstance(phrase, str):\n phrases.append(phrase)\n\n if self.HEADER_KEY in sample:\n header = sample[self.HEADER_KEY]\n\n is_drop_header = np.random.rand() <= self.header_drop_probability\n\n if not is_drop_header and isinstance(header, str):\n phrases.insert(0, header)\n\n sample = {enums.General.text_parts: [phrase.replace(\"\\n\", \" \").replace(\"\\r\", \" \") for phrase in phrases]}\n\n return sample" }, { "identifier": "trainers_registry", "path": "src/xllm/trainers/registry.py", "snippet": "" }, { "identifier": "LLAMA_TOKENIZER_DIR", "path": "tests/helpers/constants.py", "snippet": "LLAMA_TOKENIZER_DIR: str = os.path.join(TOKENIZERS_DIR, \"llama/\")" }, { "identifier": "DATA", "path": "tests/helpers/dummy_data.py", "snippet": "DATA = [\n {\n enums.General.text_parts: [\n \"Person 1: Hello\",\n \"Person 2: It's me\",\n \"Person 1: I was wondering\",\n ]\n },\n {\n enums.General.text_parts: [\n \"You are a sith lord\",\n \"Kenobi: Hello there\",\n \"General Grievous: General Kenobi\",\n ]\n },\n]" }, { "identifier": "DummyDataset", "path": "tests/helpers/dummy_data.py", "snippet": "class DummyDataset(BaseDataset):\n @classmethod\n def get_data(cls, config: Config) -> Tuple[List[RawSample], Optional[List[RawSample]]]:\n return DATA, None\n\n def get_sample(self, index: int) -> RawSample:\n return self.data[index]" }, { "identifier": "patch_from_pretrained_auto_causal_lm", "path": "tests/helpers/patches.py", "snippet": "@contextmanager\ndef patch_from_pretrained_auto_causal_lm(monkeypatch: MonkeyPatch) -> Any:\n def from_pretrained(\n pretrained_model_name_or_path: str,\n quantization_config: Union[BitsAndBytesConfig, GPTQConfig, None] = None,\n torch_dtype: dtype = torch.float16,\n trust_remote_code: bool = True,\n device_map: Union[str, Dict[str, Any], None] = None,\n use_cache: bool = False,\n use_flash_attention_2: bool = True,\n ) -> LlamaForCausalLM:\n config = LlamaConfig(\n vocab_size=32_000,\n hidden_size=8,\n intermediate_size=32,\n num_hidden_layers=2,\n num_attention_heads=2,\n max_position_embeddings=32,\n )\n model = LlamaForCausalLM(config=config)\n return model\n\n monkeypatch.setattr(AutoModelForCausalLM, \"from_pretrained\", from_pretrained)\n yield True\n monkeypatch.undo()" } ]
import pytest from peft import PeftModel from pytest import MonkeyPatch from torch import Tensor from transformers import ( BitsAndBytesConfig, GPTQConfig, PreTrainedTokenizer, TrainingArguments, ) from src.xllm.collators.lm import LMCollator from src.xllm.collators.registry import collators_registry from src.xllm.core.config import Config from src.xllm.core.dependencies import ( build_collator, build_dataset, build_model, build_quantization_config, build_tokenizer, build_trainer, build_training_arguments, ) from src.xllm.datasets.registry import datasets_registry from src.xllm.datasets.soda import SodaDataset from src.xllm.trainers.registry import trainers_registry from tests.helpers.constants import LLAMA_TOKENIZER_DIR from tests.helpers.dummy_data import DATA, DummyDataset from tests.helpers.patches import patch_from_pretrained_auto_causal_lm
19,271
def test_build_training_arguments(config: Config): arguments = build_training_arguments(config=config) assert arguments.per_device_train_batch_size == config.per_device_train_batch_size assert arguments.deepspeed is None def test_build_dataset_train(path_to_train_dummy_data: str): datasets_registry.add(key="dummy", value=DummyDataset) config = Config(dataset_key="dummy", train_local_path_to_data=path_to_train_dummy_data) dataset = build_dataset(config=config, is_train=True) assert dataset[0] is not None def test_build_dataset_eval(path_to_train_dummy_data: str): datasets_registry.add(key="dummy1", value=DummyDataset) config = Config(dataset_key="dummy1", eval_local_path_to_data=path_to_train_dummy_data) dataset = build_dataset(config=config, is_train=False) assert dataset[0] is not None def test_build_dataset_eval_none(path_to_train_dummy_data: str): datasets_registry.add(key="dummy2", value=DummyDataset) config = Config( dataset_key="dummy2", train_local_path_to_data=path_to_train_dummy_data, eval_local_path_to_data=None, ) dataset = build_dataset(config=config, is_train=False) assert dataset is None def test_build_dataset_exception(path_to_train_dummy_data: str): datasets_registry.add(key="exc", value=Config) config = Config(dataset_key="exc", train_local_path_to_data=path_to_train_dummy_data) with pytest.raises(ValueError): build_dataset(config=config, is_train=True) def test_build_tokenizer(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR) tokenizer = build_tokenizer(config=config) tokenizer("hello") def test_build_tokenizer_use_fast(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR) tokenizer = build_tokenizer(config=config, use_fast=False) tokenizer("hello") def test_build_tokenizer_padding_size(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR, tokenizer_padding_side="right") tokenizer = build_tokenizer(config=config) tokenizer("hello") def test_build_collator(config: Config, llama_tokenizer: PreTrainedTokenizer): collator = build_collator(config=config, tokenizer=llama_tokenizer) batch = collator(DATA) for value in batch.values(): assert isinstance(value, Tensor) def test_build_collator_exception(llama_tokenizer: PreTrainedTokenizer): collators_registry.add(key="exc", value=Config) config = Config(collator_key="exc") with pytest.raises(ValueError): _ = build_collator(config=config, tokenizer=llama_tokenizer) def test_build_quantization_config_bnb(): config = Config(load_in_8bit=True) quantization_config = build_quantization_config(config=config) assert isinstance(quantization_config, BitsAndBytesConfig) assert quantization_config.load_in_8bit def test_build_quantization_config_gptq(): config = Config(gptq_bits=4, gptq_group_size=128, from_gptq=True) quantization_config = build_quantization_config(config=config) assert isinstance(quantization_config, GPTQConfig) assert quantization_config.bits == 4 assert quantization_config.group_size == 128 def test_build_quantization_config_none(): config = Config(from_gptq=False, load_in_4bit=False, load_in_8bit=False) quantization_config = build_quantization_config(config=config) assert quantization_config is None @pytest.mark.parametrize("apply_lora", [False, True]) def test_build_model(monkeypatch: MonkeyPatch, apply_lora: bool): config = Config(apply_lora=apply_lora) with patch_from_pretrained_auto_causal_lm(monkeypatch=monkeypatch): _ = build_model( config=config, quantization_config=None, ) def test_build_model_bnb_after_init(monkeypatch: MonkeyPatch): config = Config(bnb_quantize_after_model_init=True) with patch_from_pretrained_auto_causal_lm(monkeypatch=monkeypatch): _ = build_model( config=config, quantization_config=None, ) def test_build_trainer( config: Config, training_arguments: TrainingArguments, llama_lora_model: PeftModel, soda_dataset: SodaDataset, llama_lm_collator: LMCollator, ):
# Copyright 2023 Boris Zubarev. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def test_build_training_arguments(config: Config): arguments = build_training_arguments(config=config) assert arguments.per_device_train_batch_size == config.per_device_train_batch_size assert arguments.deepspeed is None def test_build_dataset_train(path_to_train_dummy_data: str): datasets_registry.add(key="dummy", value=DummyDataset) config = Config(dataset_key="dummy", train_local_path_to_data=path_to_train_dummy_data) dataset = build_dataset(config=config, is_train=True) assert dataset[0] is not None def test_build_dataset_eval(path_to_train_dummy_data: str): datasets_registry.add(key="dummy1", value=DummyDataset) config = Config(dataset_key="dummy1", eval_local_path_to_data=path_to_train_dummy_data) dataset = build_dataset(config=config, is_train=False) assert dataset[0] is not None def test_build_dataset_eval_none(path_to_train_dummy_data: str): datasets_registry.add(key="dummy2", value=DummyDataset) config = Config( dataset_key="dummy2", train_local_path_to_data=path_to_train_dummy_data, eval_local_path_to_data=None, ) dataset = build_dataset(config=config, is_train=False) assert dataset is None def test_build_dataset_exception(path_to_train_dummy_data: str): datasets_registry.add(key="exc", value=Config) config = Config(dataset_key="exc", train_local_path_to_data=path_to_train_dummy_data) with pytest.raises(ValueError): build_dataset(config=config, is_train=True) def test_build_tokenizer(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR) tokenizer = build_tokenizer(config=config) tokenizer("hello") def test_build_tokenizer_use_fast(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR) tokenizer = build_tokenizer(config=config, use_fast=False) tokenizer("hello") def test_build_tokenizer_padding_size(): config = Config(tokenizer_name_or_path=LLAMA_TOKENIZER_DIR, tokenizer_padding_side="right") tokenizer = build_tokenizer(config=config) tokenizer("hello") def test_build_collator(config: Config, llama_tokenizer: PreTrainedTokenizer): collator = build_collator(config=config, tokenizer=llama_tokenizer) batch = collator(DATA) for value in batch.values(): assert isinstance(value, Tensor) def test_build_collator_exception(llama_tokenizer: PreTrainedTokenizer): collators_registry.add(key="exc", value=Config) config = Config(collator_key="exc") with pytest.raises(ValueError): _ = build_collator(config=config, tokenizer=llama_tokenizer) def test_build_quantization_config_bnb(): config = Config(load_in_8bit=True) quantization_config = build_quantization_config(config=config) assert isinstance(quantization_config, BitsAndBytesConfig) assert quantization_config.load_in_8bit def test_build_quantization_config_gptq(): config = Config(gptq_bits=4, gptq_group_size=128, from_gptq=True) quantization_config = build_quantization_config(config=config) assert isinstance(quantization_config, GPTQConfig) assert quantization_config.bits == 4 assert quantization_config.group_size == 128 def test_build_quantization_config_none(): config = Config(from_gptq=False, load_in_4bit=False, load_in_8bit=False) quantization_config = build_quantization_config(config=config) assert quantization_config is None @pytest.mark.parametrize("apply_lora", [False, True]) def test_build_model(monkeypatch: MonkeyPatch, apply_lora: bool): config = Config(apply_lora=apply_lora) with patch_from_pretrained_auto_causal_lm(monkeypatch=monkeypatch): _ = build_model( config=config, quantization_config=None, ) def test_build_model_bnb_after_init(monkeypatch: MonkeyPatch): config = Config(bnb_quantize_after_model_init=True) with patch_from_pretrained_auto_causal_lm(monkeypatch=monkeypatch): _ = build_model( config=config, quantization_config=None, ) def test_build_trainer( config: Config, training_arguments: TrainingArguments, llama_lora_model: PeftModel, soda_dataset: SodaDataset, llama_lm_collator: LMCollator, ):
trainer = build_trainer(
8
2023-11-10 17:55:03+00:00
24k
AMAAI-Lab/mustango
diffusers/src/diffusers/models/unet_2d_condition_flax.py
[ { "identifier": "ConfigMixin", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "class ConfigMixin:\n r\"\"\"\n Base class for all configuration classes. Stores all configuration parameters under `self.config` Also handles all\n methods for loading/downloading/saving classes inheriting from [`ConfigMixin`] with\n - [`~ConfigMixin.from_config`]\n - [`~ConfigMixin.save_config`]\n\n Class attributes:\n - **config_name** (`str`) -- A filename under which the config should stored when calling\n [`~ConfigMixin.save_config`] (should be overridden by parent class).\n - **ignore_for_config** (`List[str]`) -- A list of attributes that should not be saved in the config (should be\n overridden by subclass).\n - **has_compatibles** (`bool`) -- Whether the class has compatible classes (should be overridden by subclass).\n - **_deprecated_kwargs** (`List[str]`) -- Keyword arguments that are deprecated. Note that the init function\n should only have a `kwargs` argument if at least one argument is deprecated (should be overridden by\n subclass).\n \"\"\"\n config_name = None\n ignore_for_config = []\n has_compatibles = False\n\n _deprecated_kwargs = []\n\n def register_to_config(self, **kwargs):\n if self.config_name is None:\n raise NotImplementedError(f\"Make sure that {self.__class__} has defined a class name `config_name`\")\n # Special case for `kwargs` used in deprecation warning added to schedulers\n # TODO: remove this when we remove the deprecation warning, and the `kwargs` argument,\n # or solve in a more general way.\n kwargs.pop(\"kwargs\", None)\n for key, value in kwargs.items():\n try:\n setattr(self, key, value)\n except AttributeError as err:\n logger.error(f\"Can't set {key} with value {value} for {self}\")\n raise err\n\n if not hasattr(self, \"_internal_dict\"):\n internal_dict = kwargs\n else:\n previous_dict = dict(self._internal_dict)\n internal_dict = {**self._internal_dict, **kwargs}\n logger.debug(f\"Updating config from {previous_dict} to {internal_dict}\")\n\n self._internal_dict = FrozenDict(internal_dict)\n\n def save_config(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):\n \"\"\"\n Save a configuration object to the directory `save_directory`, so that it can be re-loaded using the\n [`~ConfigMixin.from_config`] class method.\n\n Args:\n save_directory (`str` or `os.PathLike`):\n Directory where the configuration JSON file will be saved (will be created if it does not exist).\n \"\"\"\n if os.path.isfile(save_directory):\n raise AssertionError(f\"Provided path ({save_directory}) should be a directory, not a file\")\n\n os.makedirs(save_directory, exist_ok=True)\n\n # If we save using the predefined names, we can load using `from_config`\n output_config_file = os.path.join(save_directory, self.config_name)\n\n self.to_json_file(output_config_file)\n logger.info(f\"Configuration saved in {output_config_file}\")\n\n @classmethod\n def from_config(cls, config: Union[FrozenDict, Dict[str, Any]] = None, return_unused_kwargs=False, **kwargs):\n r\"\"\"\n Instantiate a Python class from a config dictionary\n\n Parameters:\n config (`Dict[str, Any]`):\n A config dictionary from which the Python class will be instantiated. Make sure to only load\n configuration files of compatible classes.\n return_unused_kwargs (`bool`, *optional*, defaults to `False`):\n Whether kwargs that are not consumed by the Python class should be returned or not.\n\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the Python class.\n `**kwargs` will be directly passed to the underlying scheduler/model's `__init__` method and eventually\n overwrite same named arguments of `config`.\n\n Examples:\n\n ```python\n >>> from diffusers import DDPMScheduler, DDIMScheduler, PNDMScheduler\n\n >>> # Download scheduler from huggingface.co and cache.\n >>> scheduler = DDPMScheduler.from_pretrained(\"google/ddpm-cifar10-32\")\n\n >>> # Instantiate DDIM scheduler class with same config as DDPM\n >>> scheduler = DDIMScheduler.from_config(scheduler.config)\n\n >>> # Instantiate PNDM scheduler class with same config as DDPM\n >>> scheduler = PNDMScheduler.from_config(scheduler.config)\n ```\n \"\"\"\n # <===== TO BE REMOVED WITH DEPRECATION\n # TODO(Patrick) - make sure to remove the following lines when config==\"model_path\" is deprecated\n if \"pretrained_model_name_or_path\" in kwargs:\n config = kwargs.pop(\"pretrained_model_name_or_path\")\n\n if config is None:\n raise ValueError(\"Please make sure to provide a config as the first positional argument.\")\n # ======>\n\n if not isinstance(config, dict):\n deprecation_message = \"It is deprecated to pass a pretrained model name or path to `from_config`.\"\n if \"Scheduler\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a scheduler, please use {cls}.from_pretrained(...) instead.\"\n \" Otherwise, please make sure to pass a configuration dictionary instead. This functionality will\"\n \" be removed in v1.0.0.\"\n )\n elif \"Model\" in cls.__name__:\n deprecation_message += (\n f\"If you were trying to load a model, please use {cls}.load_config(...) followed by\"\n f\" {cls}.from_config(...) instead. Otherwise, please make sure to pass a configuration dictionary\"\n \" instead. This functionality will be removed in v1.0.0.\"\n )\n deprecate(\"config-passed-as-path\", \"1.0.0\", deprecation_message, standard_warn=False)\n config, kwargs = cls.load_config(pretrained_model_name_or_path=config, return_unused_kwargs=True, **kwargs)\n\n init_dict, unused_kwargs, hidden_dict = cls.extract_init_dict(config, **kwargs)\n\n # Allow dtype to be specified on initialization\n if \"dtype\" in unused_kwargs:\n init_dict[\"dtype\"] = unused_kwargs.pop(\"dtype\")\n\n # add possible deprecated kwargs\n for deprecated_kwarg in cls._deprecated_kwargs:\n if deprecated_kwarg in unused_kwargs:\n init_dict[deprecated_kwarg] = unused_kwargs.pop(deprecated_kwarg)\n\n # Return model and optionally state and/or unused_kwargs\n model = cls(**init_dict)\n\n # make sure to also save config parameters that might be used for compatible classes\n model.register_to_config(**hidden_dict)\n\n # add hidden kwargs of compatible classes to unused_kwargs\n unused_kwargs = {**unused_kwargs, **hidden_dict}\n\n if return_unused_kwargs:\n return (model, unused_kwargs)\n else:\n return model\n\n @classmethod\n def get_config_dict(cls, *args, **kwargs):\n deprecation_message = (\n f\" The function get_config_dict is deprecated. Please use {cls}.load_config instead. This function will be\"\n \" removed in version v1.0.0\"\n )\n deprecate(\"get_config_dict\", \"1.0.0\", deprecation_message, standard_warn=False)\n return cls.load_config(*args, **kwargs)\n\n @classmethod\n def load_config(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n return_unused_kwargs=False,\n return_commit_hash=False,\n **kwargs,\n ) -> Tuple[Dict[str, Any], Dict[str, Any]]:\n r\"\"\"\n Instantiate a Python class from a config dictionary\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *model id* of a model repo on huggingface.co. Valid model ids should have an\n organization name, like `google/ddpm-celebahq-256`.\n - A path to a *directory* containing model weights saved using [`~ConfigMixin.save_config`], e.g.,\n `./my_model_directory/`.\n\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, will use the token generated\n when running `transformers-cli login` (stored in `~/.huggingface`).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n subfolder (`str`, *optional*, defaults to `\"\"`):\n In case the relevant files are located inside a subfolder of the model repo (either remote in\n huggingface.co or downloaded locally), you can specify the folder name here.\n return_unused_kwargs (`bool`, *optional*, defaults to `False):\n Whether unused keyword arguments of the config shall be returned.\n return_commit_hash (`bool`, *optional*, defaults to `False):\n Whether the commit_hash of the loaded configuration shall be returned.\n\n <Tip>\n\n It is required to be logged in (`huggingface-cli login`) when you want to use private or [gated\n models](https://huggingface.co/docs/hub/models-gated#gated-models).\n\n </Tip>\n\n <Tip>\n\n Activate the special [\"offline-mode\"](https://huggingface.co/transformers/installation.html#offline-mode) to\n use this method in a firewalled environment.\n\n </Tip>\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n revision = kwargs.pop(\"revision\", None)\n _ = kwargs.pop(\"mirror\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n user_agent = kwargs.pop(\"user_agent\", {})\n\n user_agent = {**user_agent, \"file_type\": \"config\"}\n user_agent = http_user_agent(user_agent)\n\n pretrained_model_name_or_path = str(pretrained_model_name_or_path)\n\n if cls.config_name is None:\n raise ValueError(\n \"`self.config_name` is not defined. Note that one should not load a config from \"\n \"`ConfigMixin`. Please make sure to define `config_name` in a class inheriting from `ConfigMixin`\"\n )\n\n if os.path.isfile(pretrained_model_name_or_path):\n config_file = pretrained_model_name_or_path\n elif os.path.isdir(pretrained_model_name_or_path):\n if os.path.isfile(os.path.join(pretrained_model_name_or_path, cls.config_name)):\n # Load from a PyTorch checkpoint\n config_file = os.path.join(pretrained_model_name_or_path, cls.config_name)\n elif subfolder is not None and os.path.isfile(\n os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n ):\n config_file = os.path.join(pretrained_model_name_or_path, subfolder, cls.config_name)\n else:\n raise EnvironmentError(\n f\"Error no file named {cls.config_name} found in directory {pretrained_model_name_or_path}.\"\n )\n else:\n try:\n # Load from URL or cache if already cached\n config_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=cls.config_name,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier\"\n \" listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a\"\n \" token having permission to this repo with `use_auth_token` or log in with `huggingface-cli\"\n \" login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for\"\n \" this model name. Check the model page at\"\n f\" 'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {cls.config_name}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n \"There was a specific connection error when trying to load\"\n f\" {pretrained_model_name_or_path}:\\n{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a {cls.config_name} file.\\nCheckout your internet connection or see how to\"\n \" run the library in offline mode at\"\n \" 'https://huggingface.co/docs/diffusers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load config for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a {cls.config_name} file\"\n )\n\n try:\n # Load config dict\n config_dict = cls._dict_from_json_file(config_file)\n\n commit_hash = extract_commit_hash(config_file)\n except (json.JSONDecodeError, UnicodeDecodeError):\n raise EnvironmentError(f\"It looks like the config file at '{config_file}' is not a valid JSON file.\")\n\n if not (return_unused_kwargs or return_commit_hash):\n return config_dict\n\n outputs = (config_dict,)\n\n if return_unused_kwargs:\n outputs += (kwargs,)\n\n if return_commit_hash:\n outputs += (commit_hash,)\n\n return outputs\n\n @staticmethod\n def _get_init_keys(cls):\n return set(dict(inspect.signature(cls.__init__).parameters).keys())\n\n @classmethod\n def extract_init_dict(cls, config_dict, **kwargs):\n # 0. Copy origin config dict\n original_dict = dict(config_dict.items())\n\n # 1. Retrieve expected config attributes from __init__ signature\n expected_keys = cls._get_init_keys(cls)\n expected_keys.remove(\"self\")\n # remove general kwargs if present in dict\n if \"kwargs\" in expected_keys:\n expected_keys.remove(\"kwargs\")\n # remove flax internal keys\n if hasattr(cls, \"_flax_internal_args\"):\n for arg in cls._flax_internal_args:\n expected_keys.remove(arg)\n\n # 2. Remove attributes that cannot be expected from expected config attributes\n # remove keys to be ignored\n if len(cls.ignore_for_config) > 0:\n expected_keys = expected_keys - set(cls.ignore_for_config)\n\n # load diffusers library to import compatible and original scheduler\n diffusers_library = importlib.import_module(__name__.split(\".\")[0])\n\n if cls.has_compatibles:\n compatible_classes = [c for c in cls._get_compatibles() if not isinstance(c, DummyObject)]\n else:\n compatible_classes = []\n\n expected_keys_comp_cls = set()\n for c in compatible_classes:\n expected_keys_c = cls._get_init_keys(c)\n expected_keys_comp_cls = expected_keys_comp_cls.union(expected_keys_c)\n expected_keys_comp_cls = expected_keys_comp_cls - cls._get_init_keys(cls)\n config_dict = {k: v for k, v in config_dict.items() if k not in expected_keys_comp_cls}\n\n # remove attributes from orig class that cannot be expected\n orig_cls_name = config_dict.pop(\"_class_name\", cls.__name__)\n if orig_cls_name != cls.__name__ and hasattr(diffusers_library, orig_cls_name):\n orig_cls = getattr(diffusers_library, orig_cls_name)\n unexpected_keys_from_orig = cls._get_init_keys(orig_cls) - expected_keys\n config_dict = {k: v for k, v in config_dict.items() if k not in unexpected_keys_from_orig}\n\n # remove private attributes\n config_dict = {k: v for k, v in config_dict.items() if not k.startswith(\"_\")}\n\n # 3. Create keyword arguments that will be passed to __init__ from expected keyword arguments\n init_dict = {}\n for key in expected_keys:\n # if config param is passed to kwarg and is present in config dict\n # it should overwrite existing config dict key\n if key in kwargs and key in config_dict:\n config_dict[key] = kwargs.pop(key)\n\n if key in kwargs:\n # overwrite key\n init_dict[key] = kwargs.pop(key)\n elif key in config_dict:\n # use value from config dict\n init_dict[key] = config_dict.pop(key)\n\n # 4. Give nice warning if unexpected values have been passed\n if len(config_dict) > 0:\n logger.warning(\n f\"The config attributes {config_dict} were passed to {cls.__name__}, \"\n \"but are not expected and will be ignored. Please verify your \"\n f\"{cls.config_name} configuration file.\"\n )\n\n # 5. Give nice info if config attributes are initiliazed to default because they have not been passed\n passed_keys = set(init_dict.keys())\n if len(expected_keys - passed_keys) > 0:\n logger.info(\n f\"{expected_keys - passed_keys} was not found in config. Values will be initialized to default values.\"\n )\n\n # 6. Define unused keyword arguments\n unused_kwargs = {**config_dict, **kwargs}\n\n # 7. Define \"hidden\" config parameters that were saved for compatible classes\n hidden_config_dict = {k: v for k, v in original_dict.items() if k not in init_dict}\n\n return init_dict, unused_kwargs, hidden_config_dict\n\n @classmethod\n def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):\n with open(json_file, \"r\", encoding=\"utf-8\") as reader:\n text = reader.read()\n return json.loads(text)\n\n def __repr__(self):\n return f\"{self.__class__.__name__} {self.to_json_string()}\"\n\n @property\n def config(self) -> Dict[str, Any]:\n \"\"\"\n Returns the config of the class as a frozen dictionary\n\n Returns:\n `Dict[str, Any]`: Config of the class.\n \"\"\"\n return self._internal_dict\n\n def to_json_string(self) -> str:\n \"\"\"\n Serializes this instance to a JSON string.\n\n Returns:\n `str`: String containing all the attributes that make up this configuration instance in JSON format.\n \"\"\"\n config_dict = self._internal_dict if hasattr(self, \"_internal_dict\") else {}\n config_dict[\"_class_name\"] = self.__class__.__name__\n config_dict[\"_diffusers_version\"] = __version__\n\n def to_json_saveable(value):\n if isinstance(value, np.ndarray):\n value = value.tolist()\n elif isinstance(value, PosixPath):\n value = str(value)\n return value\n\n config_dict = {k: to_json_saveable(v) for k, v in config_dict.items()}\n return json.dumps(config_dict, indent=2, sort_keys=True) + \"\\n\"\n\n def to_json_file(self, json_file_path: Union[str, os.PathLike]):\n \"\"\"\n Save this instance to a JSON file.\n\n Args:\n json_file_path (`str` or `os.PathLike`):\n Path to the JSON file in which this configuration instance's parameters will be saved.\n \"\"\"\n with open(json_file_path, \"w\", encoding=\"utf-8\") as writer:\n writer.write(self.to_json_string())" }, { "identifier": "flax_register_to_config", "path": "diffusers/src/diffusers/configuration_utils.py", "snippet": "def flax_register_to_config(cls):\n original_init = cls.__init__\n\n @functools.wraps(original_init)\n def init(self, *args, **kwargs):\n if not isinstance(self, ConfigMixin):\n raise RuntimeError(\n f\"`@register_for_config` was applied to {self.__class__.__name__} init method, but this class does \"\n \"not inherit from `ConfigMixin`.\"\n )\n\n # Ignore private kwargs in the init. Retrieve all passed attributes\n init_kwargs = dict(kwargs.items())\n\n # Retrieve default values\n fields = dataclasses.fields(self)\n default_kwargs = {}\n for field in fields:\n # ignore flax specific attributes\n if field.name in self._flax_internal_args:\n continue\n if type(field.default) == dataclasses._MISSING_TYPE:\n default_kwargs[field.name] = None\n else:\n default_kwargs[field.name] = getattr(self, field.name)\n\n # Make sure init_kwargs override default kwargs\n new_kwargs = {**default_kwargs, **init_kwargs}\n # dtype should be part of `init_kwargs`, but not `new_kwargs`\n if \"dtype\" in new_kwargs:\n new_kwargs.pop(\"dtype\")\n\n # Get positional arguments aligned with kwargs\n for i, arg in enumerate(args):\n name = fields[i].name\n new_kwargs[name] = arg\n\n getattr(self, \"register_to_config\")(**new_kwargs)\n original_init(self, *args, **kwargs)\n\n cls.__init__ = init\n return cls" }, { "identifier": "BaseOutput", "path": "diffusers/src/diffusers/utils/outputs.py", "snippet": "class BaseOutput(OrderedDict):\n \"\"\"\n Base class for all model outputs as dataclass. Has a `__getitem__` that allows indexing by integer or slice (like a\n tuple) or strings (like a dictionary) that will ignore the `None` attributes. Otherwise behaves like a regular\n python dictionary.\n\n <Tip warning={true}>\n\n You can't unpack a `BaseOutput` directly. Use the [`~utils.BaseOutput.to_tuple`] method to convert it to a tuple\n before.\n\n </Tip>\n \"\"\"\n\n def __post_init__(self):\n class_fields = fields(self)\n\n # Safety and consistency checks\n if not len(class_fields):\n raise ValueError(f\"{self.__class__.__name__} has no fields.\")\n\n first_field = getattr(self, class_fields[0].name)\n other_fields_are_none = all(getattr(self, field.name) is None for field in class_fields[1:])\n\n if other_fields_are_none and isinstance(first_field, dict):\n for key, value in first_field.items():\n self[key] = value\n else:\n for field in class_fields:\n v = getattr(self, field.name)\n if v is not None:\n self[field.name] = v\n\n def __delitem__(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.\")\n\n def setdefault(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``setdefault`` on a {self.__class__.__name__} instance.\")\n\n def pop(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``pop`` on a {self.__class__.__name__} instance.\")\n\n def update(self, *args, **kwargs):\n raise Exception(f\"You cannot use ``update`` on a {self.__class__.__name__} instance.\")\n\n def __getitem__(self, k):\n if isinstance(k, str):\n inner_dict = dict(self.items())\n return inner_dict[k]\n else:\n return self.to_tuple()[k]\n\n def __setattr__(self, name, value):\n if name in self.keys() and value is not None:\n # Don't call self.__setitem__ to avoid recursion errors\n super().__setitem__(name, value)\n super().__setattr__(name, value)\n\n def __setitem__(self, key, value):\n # Will raise a KeyException if needed\n super().__setitem__(key, value)\n # Don't call self.__setattr__ to avoid recursion errors\n super().__setattr__(key, value)\n\n def to_tuple(self) -> Tuple[Any]:\n \"\"\"\n Convert self to a tuple containing all the attributes/keys that are not `None`.\n \"\"\"\n return tuple(self[k] for k in self.keys())" }, { "identifier": "FlaxTimestepEmbedding", "path": "diffusers/src/diffusers/models/embeddings_flax.py", "snippet": "class FlaxTimestepEmbedding(nn.Module):\n r\"\"\"\n Time step Embedding Module. Learns embeddings for input time steps.\n\n Args:\n time_embed_dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n time_embed_dim: int = 32\n dtype: jnp.dtype = jnp.float32\n\n @nn.compact\n def __call__(self, temb):\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_1\")(temb)\n temb = nn.silu(temb)\n temb = nn.Dense(self.time_embed_dim, dtype=self.dtype, name=\"linear_2\")(temb)\n return temb" }, { "identifier": "FlaxTimesteps", "path": "diffusers/src/diffusers/models/embeddings_flax.py", "snippet": "class FlaxTimesteps(nn.Module):\n r\"\"\"\n Wrapper Module for sinusoidal Time step Embeddings as described in https://arxiv.org/abs/2006.11239\n\n Args:\n dim (`int`, *optional*, defaults to `32`):\n Time step embedding dimension\n \"\"\"\n dim: int = 32\n flip_sin_to_cos: bool = False\n freq_shift: float = 1\n\n @nn.compact\n def __call__(self, timesteps):\n return get_sinusoidal_embeddings(\n timesteps, embedding_dim=self.dim, flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.freq_shift\n )" }, { "identifier": "FlaxModelMixin", "path": "diffusers/src/diffusers/models/modeling_flax_utils.py", "snippet": "class FlaxModelMixin:\n r\"\"\"\n Base class for all flax models.\n\n [`FlaxModelMixin`] takes care of storing the configuration of the models and handles methods for loading,\n downloading and saving models.\n \"\"\"\n config_name = CONFIG_NAME\n _automatically_saved_args = [\"_diffusers_version\", \"_class_name\", \"_name_or_path\"]\n _flax_internal_args = [\"name\", \"parent\", \"dtype\"]\n\n @classmethod\n def _from_config(cls, config, **kwargs):\n \"\"\"\n All context managers that the model should be initialized under go here.\n \"\"\"\n return cls(config, **kwargs)\n\n def _cast_floating_to(self, params: Union[Dict, FrozenDict], dtype: jnp.dtype, mask: Any = None) -> Any:\n \"\"\"\n Helper method to cast floating-point values of given parameter `PyTree` to given `dtype`.\n \"\"\"\n\n # taken from https://github.com/deepmind/jmp/blob/3a8318abc3292be38582794dbf7b094e6583b192/jmp/_src/policy.py#L27\n def conditional_cast(param):\n if isinstance(param, jnp.ndarray) and jnp.issubdtype(param.dtype, jnp.floating):\n param = param.astype(dtype)\n return param\n\n if mask is None:\n return jax.tree_map(conditional_cast, params)\n\n flat_params = flatten_dict(params)\n flat_mask, _ = jax.tree_flatten(mask)\n\n for masked, key in zip(flat_mask, flat_params.keys()):\n if masked:\n param = flat_params[key]\n flat_params[key] = conditional_cast(param)\n\n return unflatten_dict(flat_params)\n\n def to_bf16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n r\"\"\"\n Cast the floating-point `params` to `jax.numpy.bfloat16`. This returns a new `params` tree and does not cast\n the `params` in place.\n\n This method can be used on TPU to explicitly convert the model parameters to bfloat16 precision to do full\n half-precision training or to save weights in bfloat16 for inference in order to save memory and improve speed.\n\n Arguments:\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n mask (`Union[Dict, FrozenDict]`):\n A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params\n you want to cast, and should be `False` for those you want to skip.\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # load model\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # By default, the model parameters will be in fp32 precision, to cast these to bfloat16 precision\n >>> params = model.to_bf16(params)\n >>> # If you don't want to cast certain parameters (for example layer norm bias and scale)\n >>> # then pass the mask as follows\n >>> from flax import traverse_util\n\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> flat_params = traverse_util.flatten_dict(params)\n >>> mask = {\n ... path: (path[-2] != (\"LayerNorm\", \"bias\") and path[-2:] != (\"LayerNorm\", \"scale\"))\n ... for path in flat_params\n ... }\n >>> mask = traverse_util.unflatten_dict(mask)\n >>> params = model.to_bf16(params, mask)\n ```\"\"\"\n return self._cast_floating_to(params, jnp.bfloat16, mask)\n\n def to_fp32(self, params: Union[Dict, FrozenDict], mask: Any = None):\n r\"\"\"\n Cast the floating-point `params` to `jax.numpy.float32`. This method can be used to explicitly convert the\n model parameters to fp32 precision. This returns a new `params` tree and does not cast the `params` in place.\n\n Arguments:\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n mask (`Union[Dict, FrozenDict]`):\n A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params\n you want to cast, and should be `False` for those you want to skip\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # Download model and configuration from huggingface.co\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # By default, the model params will be in fp32, to illustrate the use of this method,\n >>> # we'll first cast to fp16 and back to fp32\n >>> params = model.to_f16(params)\n >>> # now cast back to fp32\n >>> params = model.to_fp32(params)\n ```\"\"\"\n return self._cast_floating_to(params, jnp.float32, mask)\n\n def to_fp16(self, params: Union[Dict, FrozenDict], mask: Any = None):\n r\"\"\"\n Cast the floating-point `params` to `jax.numpy.float16`. This returns a new `params` tree and does not cast the\n `params` in place.\n\n This method can be used on GPU to explicitly convert the model parameters to float16 precision to do full\n half-precision training or to save weights in float16 for inference in order to save memory and improve speed.\n\n Arguments:\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n mask (`Union[Dict, FrozenDict]`):\n A `PyTree` with same structure as the `params` tree. The leaves should be booleans, `True` for params\n you want to cast, and should be `False` for those you want to skip\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # load model\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # By default, the model params will be in fp32, to cast these to float16\n >>> params = model.to_fp16(params)\n >>> # If you want don't want to cast certain parameters (for example layer norm bias and scale)\n >>> # then pass the mask as follows\n >>> from flax import traverse_util\n\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> flat_params = traverse_util.flatten_dict(params)\n >>> mask = {\n ... path: (path[-2] != (\"LayerNorm\", \"bias\") and path[-2:] != (\"LayerNorm\", \"scale\"))\n ... for path in flat_params\n ... }\n >>> mask = traverse_util.unflatten_dict(mask)\n >>> params = model.to_fp16(params, mask)\n ```\"\"\"\n return self._cast_floating_to(params, jnp.float16, mask)\n\n def init_weights(self, rng: jax.random.KeyArray) -> Dict:\n raise NotImplementedError(f\"init_weights method has to be implemented for {self}\")\n\n @classmethod\n def from_pretrained(\n cls,\n pretrained_model_name_or_path: Union[str, os.PathLike],\n dtype: jnp.dtype = jnp.float32,\n *model_args,\n **kwargs,\n ):\n r\"\"\"\n Instantiate a pretrained flax model from a pre-trained model configuration.\n\n The warning *Weights from XXX not initialized from pretrained model* means that the weights of XXX do not come\n pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning\n task.\n\n The warning *Weights from XXX not used in YYY* means that the layer XXX is not used by YYY, therefore those\n weights are discarded.\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`):\n Can be either:\n\n - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.\n Valid model ids are namespaced under a user or organization name, like\n `runwayml/stable-diffusion-v1-5`.\n - A path to a *directory* containing model weights saved using [`~ModelMixin.save_pretrained`],\n e.g., `./my_model_directory/`.\n dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):\n The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and\n `jax.numpy.bfloat16` (on TPUs).\n\n This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If\n specified all the computation will be performed with the given `dtype`.\n\n **Note that this only specifies the dtype of the computation and does not influence the dtype of model\n parameters.**\n\n If you wish to change the dtype of the model parameters, see [`~ModelMixin.to_fp16`] and\n [`~ModelMixin.to_bf16`].\n model_args (sequence of positional arguments, *optional*):\n All remaining positional arguments will be passed to the underlying model's `__init__` method.\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory in which a downloaded pretrained model configuration should be cached if the\n standard cache should not be used.\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to delete incompletely received files. Will attempt to resume the download if such a\n file exists.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n local_files_only(`bool`, *optional*, defaults to `False`):\n Whether or not to only look at local files (i.e., do not try to download the model).\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a\n git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any\n identifier allowed by git.\n from_pt (`bool`, *optional*, defaults to `False`):\n Load the model weights from a PyTorch checkpoint save file.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,\n `output_attentions=True`). Behaves differently depending on whether a `config` is provided or\n automatically loaded:\n\n - If a configuration is provided with `config`, `**kwargs` will be directly passed to the\n underlying model's `__init__` method (we assume all relevant updates to the configuration have\n already been done)\n - If a configuration is not provided, `kwargs` will be first passed to the configuration class\n initialization function ([`~ConfigMixin.from_config`]). Each key of `kwargs` that corresponds to\n a configuration attribute will be used to override said attribute with the supplied `kwargs`\n value. Remaining keys that do not correspond to any configuration attribute will be passed to the\n underlying model's `__init__` function.\n\n Examples:\n\n ```python\n >>> from diffusers import FlaxUNet2DConditionModel\n\n >>> # Download model and configuration from huggingface.co and cache.\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> # Model was saved using *save_pretrained('./test/saved_model/')* (for example purposes, not runnable).\n >>> model, params = FlaxUNet2DConditionModel.from_pretrained(\"./test/saved_model/\")\n ```\"\"\"\n config = kwargs.pop(\"config\", None)\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n force_download = kwargs.pop(\"force_download\", False)\n from_pt = kwargs.pop(\"from_pt\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n subfolder = kwargs.pop(\"subfolder\", None)\n\n user_agent = {\n \"diffusers\": __version__,\n \"file_type\": \"model\",\n \"framework\": \"flax\",\n }\n\n # Load config if we don't provide a configuration\n config_path = config if config is not None else pretrained_model_name_or_path\n model, model_kwargs = cls.from_config(\n config_path,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n subfolder=subfolder,\n # model args\n dtype=dtype,\n **kwargs,\n )\n\n # Load model\n pretrained_path_with_subfolder = (\n pretrained_model_name_or_path\n if subfolder is None\n else os.path.join(pretrained_model_name_or_path, subfolder)\n )\n if os.path.isdir(pretrained_path_with_subfolder):\n if from_pt:\n if not os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):\n raise EnvironmentError(\n f\"Error no file named {WEIGHTS_NAME} found in directory {pretrained_path_with_subfolder} \"\n )\n model_file = os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)\n elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)):\n # Load from a Flax checkpoint\n model_file = os.path.join(pretrained_path_with_subfolder, FLAX_WEIGHTS_NAME)\n # Check if pytorch weights exist instead\n elif os.path.isfile(os.path.join(pretrained_path_with_subfolder, WEIGHTS_NAME)):\n raise EnvironmentError(\n f\"{WEIGHTS_NAME} file found in directory {pretrained_path_with_subfolder}. Please load the model\"\n \" using `from_pt=True`.\"\n )\n else:\n raise EnvironmentError(\n f\"Error no file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME} found in directory \"\n f\"{pretrained_path_with_subfolder}.\"\n )\n else:\n try:\n model_file = hf_hub_download(\n pretrained_model_name_or_path,\n filename=FLAX_WEIGHTS_NAME if not from_pt else WEIGHTS_NAME,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n user_agent=user_agent,\n subfolder=subfolder,\n revision=revision,\n )\n\n except RepositoryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} is not a local folder and is not a valid model identifier \"\n \"listed on 'https://huggingface.co/models'\\nIf this is a private repository, make sure to pass a \"\n \"token having permission to this repo with `use_auth_token` or log in with `huggingface-cli \"\n \"login`.\"\n )\n except RevisionNotFoundError:\n raise EnvironmentError(\n f\"{revision} is not a valid git identifier (branch name, tag name or commit id) that exists for \"\n \"this model name. Check the model page at \"\n f\"'https://huggingface.co/{pretrained_model_name_or_path}' for available revisions.\"\n )\n except EntryNotFoundError:\n raise EnvironmentError(\n f\"{pretrained_model_name_or_path} does not appear to have a file named {FLAX_WEIGHTS_NAME}.\"\n )\n except HTTPError as err:\n raise EnvironmentError(\n f\"There was a specific connection error when trying to load {pretrained_model_name_or_path}:\\n\"\n f\"{err}\"\n )\n except ValueError:\n raise EnvironmentError(\n f\"We couldn't connect to '{HUGGINGFACE_CO_RESOLVE_ENDPOINT}' to load this model, couldn't find it\"\n f\" in the cached files and it looks like {pretrained_model_name_or_path} is not the path to a\"\n f\" directory containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\\nCheckout your\"\n \" internet connection or see how to run the library in offline mode at\"\n \" 'https://huggingface.co/docs/transformers/installation#offline-mode'.\"\n )\n except EnvironmentError:\n raise EnvironmentError(\n f\"Can't load the model for '{pretrained_model_name_or_path}'. If you were trying to load it from \"\n \"'https://huggingface.co/models', make sure you don't have a local directory with the same name. \"\n f\"Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a directory \"\n f\"containing a file named {FLAX_WEIGHTS_NAME} or {WEIGHTS_NAME}.\"\n )\n\n if from_pt:\n if is_torch_available():\n from .modeling_utils import load_state_dict\n else:\n raise EnvironmentError(\n \"Can't load the model in PyTorch format because PyTorch is not installed. \"\n \"Please, install PyTorch or use native Flax weights.\"\n )\n\n # Step 1: Get the pytorch file\n pytorch_model_file = load_state_dict(model_file)\n\n # Step 2: Convert the weights\n state = convert_pytorch_state_dict_to_flax(pytorch_model_file, model)\n else:\n try:\n with open(model_file, \"rb\") as state_f:\n state = from_bytes(cls, state_f.read())\n except (UnpicklingError, msgpack.exceptions.ExtraData) as e:\n try:\n with open(model_file) as f:\n if f.read().startswith(\"version\"):\n raise OSError(\n \"You seem to have cloned a repository without having git-lfs installed. Please\"\n \" install git-lfs and run `git lfs install` followed by `git lfs pull` in the\"\n \" folder you cloned.\"\n )\n else:\n raise ValueError from e\n except (UnicodeDecodeError, ValueError):\n raise EnvironmentError(f\"Unable to convert {model_file} to Flax deserializable object. \")\n # make sure all arrays are stored as jnp.ndarray\n # NOTE: This is to prevent a bug this will be fixed in Flax >= v0.3.4:\n # https://github.com/google/flax/issues/1261\n state = jax.tree_util.tree_map(lambda x: jax.device_put(x, jax.devices(\"cpu\")[0]), state)\n\n # flatten dicts\n state = flatten_dict(state)\n\n params_shape_tree = jax.eval_shape(model.init_weights, rng=jax.random.PRNGKey(0))\n required_params = set(flatten_dict(unfreeze(params_shape_tree)).keys())\n\n shape_state = flatten_dict(unfreeze(params_shape_tree))\n\n missing_keys = required_params - set(state.keys())\n unexpected_keys = set(state.keys()) - required_params\n\n if missing_keys:\n logger.warning(\n f\"The checkpoint {pretrained_model_name_or_path} is missing required keys: {missing_keys}. \"\n \"Make sure to call model.init_weights to initialize the missing weights.\"\n )\n cls._missing_keys = missing_keys\n\n for key in state.keys():\n if key in shape_state and state[key].shape != shape_state[key].shape:\n raise ValueError(\n f\"Trying to load the pretrained weight for {key} failed: checkpoint has shape \"\n f\"{state[key].shape} which is incompatible with the model shape {shape_state[key].shape}. \"\n )\n\n # remove unexpected keys to not be saved again\n for unexpected_key in unexpected_keys:\n del state[unexpected_key]\n\n if len(unexpected_keys) > 0:\n logger.warning(\n f\"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when\"\n f\" initializing {model.__class__.__name__}: {unexpected_keys}\\n- This IS expected if you are\"\n f\" initializing {model.__class__.__name__} from the checkpoint of a model trained on another task or\"\n \" with another architecture.\"\n )\n else:\n logger.info(f\"All model checkpoint weights were used when initializing {model.__class__.__name__}.\\n\")\n\n if len(missing_keys) > 0:\n logger.warning(\n f\"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path} and are newly initialized: {missing_keys}\\nYou should probably\"\n \" TRAIN this model on a down-stream task to be able to use it for predictions and inference.\"\n )\n else:\n logger.info(\n f\"All the weights of {model.__class__.__name__} were initialized from the model checkpoint at\"\n f\" {pretrained_model_name_or_path}.\\nIf your task is similar to the task the model of the checkpoint\"\n f\" was trained on, you can already use {model.__class__.__name__} for predictions without further\"\n \" training.\"\n )\n\n return model, unflatten_dict(state)\n\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n params: Union[Dict, FrozenDict],\n is_main_process: bool = True,\n ):\n \"\"\"\n Save a model and its configuration file to a directory, so that it can be re-loaded using the\n `[`~FlaxModelMixin.from_pretrained`]` class method\n\n Arguments:\n save_directory (`str` or `os.PathLike`):\n Directory to which to save. Will be created if it doesn't exist.\n params (`Union[Dict, FrozenDict]`):\n A `PyTree` of model parameters.\n is_main_process (`bool`, *optional*, defaults to `True`):\n Whether the process calling this is the main process or not. Useful when in distributed training like\n TPUs and need to call this function on all processes. In this case, set `is_main_process=True` only on\n the main process to avoid race conditions.\n \"\"\"\n if os.path.isfile(save_directory):\n logger.error(f\"Provided path ({save_directory}) should be a directory, not a file\")\n return\n\n os.makedirs(save_directory, exist_ok=True)\n\n model_to_save = self\n\n # Attach architecture to the config\n # Save the config\n if is_main_process:\n model_to_save.save_config(save_directory)\n\n # save model\n output_model_file = os.path.join(save_directory, FLAX_WEIGHTS_NAME)\n with open(output_model_file, \"wb\") as f:\n model_bytes = to_bytes(params)\n f.write(model_bytes)\n\n logger.info(f\"Model weights saved in {output_model_file}\")" }, { "identifier": "FlaxCrossAttnDownBlock2D", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxCrossAttnDownBlock2D(nn.Module):\n r\"\"\"\n Cross Attention 2D Downsizing block - original architecture from Unet transformers:\n https://arxiv.org/abs/2103.06104\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n out_channels (:obj:`int`):\n Output channels\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n attn_num_head_channels (:obj:`int`, *optional*, defaults to 1):\n Number of attention heads of each spatial transformer block\n add_downsample (:obj:`bool`, *optional*, defaults to `True`):\n Whether to add downsampling layer before each final output\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n in_channels: int\n out_channels: int\n dropout: float = 0.0\n num_layers: int = 1\n attn_num_head_channels: int = 1\n add_downsample: bool = True\n use_linear_projection: bool = False\n only_cross_attention: bool = False\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n resnets = []\n attentions = []\n\n for i in range(self.num_layers):\n in_channels = self.in_channels if i == 0 else self.out_channels\n\n res_block = FlaxResnetBlock2D(\n in_channels=in_channels,\n out_channels=self.out_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n\n attn_block = FlaxTransformer2DModel(\n in_channels=self.out_channels,\n n_heads=self.attn_num_head_channels,\n d_head=self.out_channels // self.attn_num_head_channels,\n depth=1,\n use_linear_projection=self.use_linear_projection,\n only_cross_attention=self.only_cross_attention,\n dtype=self.dtype,\n )\n attentions.append(attn_block)\n\n self.resnets = resnets\n self.attentions = attentions\n\n if self.add_downsample:\n self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)\n\n def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True):\n output_states = ()\n\n for resnet, attn in zip(self.resnets, self.attentions):\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)\n output_states += (hidden_states,)\n\n if self.add_downsample:\n hidden_states = self.downsamplers_0(hidden_states)\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "FlaxCrossAttnUpBlock2D", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxCrossAttnUpBlock2D(nn.Module):\n r\"\"\"\n Cross Attention 2D Upsampling block - original architecture from Unet transformers:\n https://arxiv.org/abs/2103.06104\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n out_channels (:obj:`int`):\n Output channels\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n attn_num_head_channels (:obj:`int`, *optional*, defaults to 1):\n Number of attention heads of each spatial transformer block\n add_upsample (:obj:`bool`, *optional*, defaults to `True`):\n Whether to add upsampling layer before each final output\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n in_channels: int\n out_channels: int\n prev_output_channel: int\n dropout: float = 0.0\n num_layers: int = 1\n attn_num_head_channels: int = 1\n add_upsample: bool = True\n use_linear_projection: bool = False\n only_cross_attention: bool = False\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n resnets = []\n attentions = []\n\n for i in range(self.num_layers):\n res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels\n resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels\n\n res_block = FlaxResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=self.out_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n\n attn_block = FlaxTransformer2DModel(\n in_channels=self.out_channels,\n n_heads=self.attn_num_head_channels,\n d_head=self.out_channels // self.attn_num_head_channels,\n depth=1,\n use_linear_projection=self.use_linear_projection,\n only_cross_attention=self.only_cross_attention,\n dtype=self.dtype,\n )\n attentions.append(attn_block)\n\n self.resnets = resnets\n self.attentions = attentions\n\n if self.add_upsample:\n self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype)\n\n def __call__(self, hidden_states, res_hidden_states_tuple, temb, encoder_hidden_states, deterministic=True):\n for resnet, attn in zip(self.resnets, self.attentions):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1)\n\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)\n\n if self.add_upsample:\n hidden_states = self.upsamplers_0(hidden_states)\n\n return hidden_states" }, { "identifier": "FlaxDownBlock2D", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxDownBlock2D(nn.Module):\n r\"\"\"\n Flax 2D downsizing block\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n out_channels (:obj:`int`):\n Output channels\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n add_downsample (:obj:`bool`, *optional*, defaults to `True`):\n Whether to add downsampling layer before each final output\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n in_channels: int\n out_channels: int\n dropout: float = 0.0\n num_layers: int = 1\n add_downsample: bool = True\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n resnets = []\n\n for i in range(self.num_layers):\n in_channels = self.in_channels if i == 0 else self.out_channels\n\n res_block = FlaxResnetBlock2D(\n in_channels=in_channels,\n out_channels=self.out_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n self.resnets = resnets\n\n if self.add_downsample:\n self.downsamplers_0 = FlaxDownsample2D(self.out_channels, dtype=self.dtype)\n\n def __call__(self, hidden_states, temb, deterministic=True):\n output_states = ()\n\n for resnet in self.resnets:\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n output_states += (hidden_states,)\n\n if self.add_downsample:\n hidden_states = self.downsamplers_0(hidden_states)\n output_states += (hidden_states,)\n\n return hidden_states, output_states" }, { "identifier": "FlaxUNetMidBlock2DCrossAttn", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxUNetMidBlock2DCrossAttn(nn.Module):\n r\"\"\"\n Cross Attention 2D Mid-level block - original architecture from Unet transformers: https://arxiv.org/abs/2103.06104\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n attn_num_head_channels (:obj:`int`, *optional*, defaults to 1):\n Number of attention heads of each spatial transformer block\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n in_channels: int\n dropout: float = 0.0\n num_layers: int = 1\n attn_num_head_channels: int = 1\n use_linear_projection: bool = False\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n # there is always at least one resnet\n resnets = [\n FlaxResnetBlock2D(\n in_channels=self.in_channels,\n out_channels=self.in_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n ]\n\n attentions = []\n\n for _ in range(self.num_layers):\n attn_block = FlaxTransformer2DModel(\n in_channels=self.in_channels,\n n_heads=self.attn_num_head_channels,\n d_head=self.in_channels // self.attn_num_head_channels,\n depth=1,\n use_linear_projection=self.use_linear_projection,\n dtype=self.dtype,\n )\n attentions.append(attn_block)\n\n res_block = FlaxResnetBlock2D(\n in_channels=self.in_channels,\n out_channels=self.in_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n\n self.resnets = resnets\n self.attentions = attentions\n\n def __call__(self, hidden_states, temb, encoder_hidden_states, deterministic=True):\n hidden_states = self.resnets[0](hidden_states, temb)\n for attn, resnet in zip(self.attentions, self.resnets[1:]):\n hidden_states = attn(hidden_states, encoder_hidden_states, deterministic=deterministic)\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n\n return hidden_states" }, { "identifier": "FlaxUpBlock2D", "path": "diffusers/src/diffusers/models/unet_2d_blocks_flax.py", "snippet": "class FlaxUpBlock2D(nn.Module):\n r\"\"\"\n Flax 2D upsampling block\n\n Parameters:\n in_channels (:obj:`int`):\n Input channels\n out_channels (:obj:`int`):\n Output channels\n prev_output_channel (:obj:`int`):\n Output channels from the previous block\n dropout (:obj:`float`, *optional*, defaults to 0.0):\n Dropout rate\n num_layers (:obj:`int`, *optional*, defaults to 1):\n Number of attention blocks layers\n add_downsample (:obj:`bool`, *optional*, defaults to `True`):\n Whether to add downsampling layer before each final output\n dtype (:obj:`jnp.dtype`, *optional*, defaults to jnp.float32):\n Parameters `dtype`\n \"\"\"\n in_channels: int\n out_channels: int\n prev_output_channel: int\n dropout: float = 0.0\n num_layers: int = 1\n add_upsample: bool = True\n dtype: jnp.dtype = jnp.float32\n\n def setup(self):\n resnets = []\n\n for i in range(self.num_layers):\n res_skip_channels = self.in_channels if (i == self.num_layers - 1) else self.out_channels\n resnet_in_channels = self.prev_output_channel if i == 0 else self.out_channels\n\n res_block = FlaxResnetBlock2D(\n in_channels=resnet_in_channels + res_skip_channels,\n out_channels=self.out_channels,\n dropout_prob=self.dropout,\n dtype=self.dtype,\n )\n resnets.append(res_block)\n\n self.resnets = resnets\n\n if self.add_upsample:\n self.upsamplers_0 = FlaxUpsample2D(self.out_channels, dtype=self.dtype)\n\n def __call__(self, hidden_states, res_hidden_states_tuple, temb, deterministic=True):\n for resnet in self.resnets:\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = jnp.concatenate((hidden_states, res_hidden_states), axis=-1)\n\n hidden_states = resnet(hidden_states, temb, deterministic=deterministic)\n\n if self.add_upsample:\n hidden_states = self.upsamplers_0(hidden_states)\n\n return hidden_states" } ]
from typing import Tuple, Union from flax.core.frozen_dict import FrozenDict from ..configuration_utils import ConfigMixin, flax_register_to_config from ..utils import BaseOutput from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps from .modeling_flax_utils import FlaxModelMixin from .unet_2d_blocks_flax import ( FlaxCrossAttnDownBlock2D, FlaxCrossAttnUpBlock2D, FlaxDownBlock2D, FlaxUNetMidBlock2DCrossAttn, FlaxUpBlock2D, ) import flax import flax.linen as nn import jax import jax.numpy as jnp
17,317
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @flax.struct.dataclass class FlaxUNet2DConditionOutput(BaseOutput): """ Args: sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: jnp.ndarray @flax_register_to_config class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin): r""" FlaxUNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep and returns sample shaped output. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the models (such as downloading or saving, etc.) Also, this model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: sample_size (`int`, *optional*): The size of the input sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. The corresponding class names will be: "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D" up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): The tuple of upsample blocks to use. The corresponding class names will be: "FlaxUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D" block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): The dimension of the attention heads. cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features. dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. """ sample_size: int = 32 in_channels: int = 4 out_channels: int = 4 down_block_types: Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") only_cross_attention: Union[bool, Tuple[bool]] = False block_out_channels: Tuple[int] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int]] = 8 cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} return self.init(rngs, sample, timesteps, encoder_hidden_states)["params"] def setup(self): block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 # input self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time self.time_proj = FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. @flax.struct.dataclass class FlaxUNet2DConditionOutput(BaseOutput): """ Args: sample (`jnp.ndarray` of shape `(batch_size, num_channels, height, width)`): Hidden states conditioned on `encoder_hidden_states` input. Output of last layer of model. """ sample: jnp.ndarray @flax_register_to_config class FlaxUNet2DConditionModel(nn.Module, FlaxModelMixin, ConfigMixin): r""" FlaxUNet2DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep and returns sample shaped output. This model inherits from [`FlaxModelMixin`]. Check the superclass documentation for the generic methods the library implements for all the models (such as downloading or saving, etc.) Also, this model is a Flax Linen [flax.linen.Module](https://flax.readthedocs.io/en/latest/flax.linen.html#module) subclass. Use it as a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and behavior. Finally, this model supports inherent JAX features such as: - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit) - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation) - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap) - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap) Parameters: sample_size (`int`, *optional*): The size of the input sample. in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample. out_channels (`int`, *optional*, defaults to 4): The number of channels in the output. down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): The tuple of downsample blocks to use. The corresponding class names will be: "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxCrossAttnDownBlock2D", "FlaxDownBlock2D" up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D",)`): The tuple of upsample blocks to use. The corresponding class names will be: "FlaxUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D", "FlaxCrossAttnUpBlock2D" block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`): The tuple of output channels for each block. layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block. attention_head_dim (`int` or `Tuple[int]`, *optional*, defaults to 8): The dimension of the attention heads. cross_attention_dim (`int`, *optional*, defaults to 768): The dimension of the cross attention features. dropout (`float`, *optional*, defaults to 0): Dropout probability for down, up and bottleneck blocks. flip_sin_to_cos (`bool`, *optional*, defaults to `True`): Whether to flip the sin to cos in the time embedding. freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding. """ sample_size: int = 32 in_channels: int = 4 out_channels: int = 4 down_block_types: Tuple[str] = ( "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D", ) up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D") only_cross_attention: Union[bool, Tuple[bool]] = False block_out_channels: Tuple[int] = (320, 640, 1280, 1280) layers_per_block: int = 2 attention_head_dim: Union[int, Tuple[int]] = 8 cross_attention_dim: int = 1280 dropout: float = 0.0 use_linear_projection: bool = False dtype: jnp.dtype = jnp.float32 flip_sin_to_cos: bool = True freq_shift: int = 0 def init_weights(self, rng: jax.random.KeyArray) -> FrozenDict: # init input tensors sample_shape = (1, self.in_channels, self.sample_size, self.sample_size) sample = jnp.zeros(sample_shape, dtype=jnp.float32) timesteps = jnp.ones((1,), dtype=jnp.int32) encoder_hidden_states = jnp.zeros((1, 1, self.cross_attention_dim), dtype=jnp.float32) params_rng, dropout_rng = jax.random.split(rng) rngs = {"params": params_rng, "dropout": dropout_rng} return self.init(rngs, sample, timesteps, encoder_hidden_states)["params"] def setup(self): block_out_channels = self.block_out_channels time_embed_dim = block_out_channels[0] * 4 # input self.conv_in = nn.Conv( block_out_channels[0], kernel_size=(3, 3), strides=(1, 1), padding=((1, 1), (1, 1)), dtype=self.dtype, ) # time self.time_proj = FlaxTimesteps( block_out_channels[0], flip_sin_to_cos=self.flip_sin_to_cos, freq_shift=self.config.freq_shift )
self.time_embedding = FlaxTimestepEmbedding(time_embed_dim, dtype=self.dtype)
3
2023-11-14 23:29:31+00:00
24k
BraveGroup/Drive-WM
src/diffusers/pipelines/dance_diffusion/pipeline_dance_diffusion.py
[ { "identifier": "logging", "path": "src/diffusers/utils/logging.py", "snippet": "def _get_default_logging_level() -> int:\ndef _get_library_name() -> str:\ndef _get_library_root_logger() -> logging.Logger:\ndef _configure_library_root_logger() -> None:\ndef _reset_library_root_logger() -> None:\ndef get_log_levels_dict() -> Dict[str, int]:\ndef get_logger(name: Optional[str] = None) -> logging.Logger:\ndef get_verbosity() -> int:\ndef set_verbosity(verbosity: int) -> None:\ndef set_verbosity_info() -> None:\ndef set_verbosity_warning() -> None:\ndef set_verbosity_debug() -> None:\ndef set_verbosity_error() -> None:\ndef disable_default_handler() -> None:\ndef enable_default_handler() -> None:\ndef add_handler(handler: logging.Handler) -> None:\ndef remove_handler(handler: logging.Handler) -> None:\ndef disable_propagation() -> None:\ndef enable_propagation() -> None:\ndef enable_explicit_format() -> None:\ndef reset_format() -> None:\ndef warning_advice(self, *args, **kwargs) -> None:\n def __init__(self, *args, **kwargs): # pylint: disable=unused-argument\n def __iter__(self):\n def __getattr__(self, _):\n def empty_fn(*args, **kwargs): # pylint: disable=unused-argument\n def __enter__(self):\n def __exit__(self, type_, value, traceback):\n def __call__(self, *args, **kwargs):\n def set_lock(self, *args, **kwargs):\n def get_lock(self):\ndef is_progress_bar_enabled() -> bool:\ndef enable_progress_bar() -> None:\ndef disable_progress_bar() -> None:\nclass EmptyTqdm:\nclass _tqdm_cls:" }, { "identifier": "randn_tensor", "path": "src/diffusers/utils/torch_utils.py", "snippet": "def randn_tensor(\n shape: Union[Tuple, List],\n generator: Optional[Union[List[\"torch.Generator\"], \"torch.Generator\"]] = None,\n device: Optional[\"torch.device\"] = None,\n dtype: Optional[\"torch.dtype\"] = None,\n layout: Optional[\"torch.layout\"] = None,\n):\n \"\"\"A helper function to create random tensors on the desired `device` with the desired `dtype`. When\n passing a list of generators, you can seed each batch size individually. If CPU generators are passed, the tensor\n is always created on the CPU.\n \"\"\"\n # device on which tensor is created defaults to device\n rand_device = device\n batch_size = shape[0]\n\n layout = layout or torch.strided\n device = device or torch.device(\"cpu\")\n\n if generator is not None:\n gen_device_type = generator.device.type if not isinstance(generator, list) else generator[0].device.type\n if gen_device_type != device.type and gen_device_type == \"cpu\":\n rand_device = \"cpu\"\n if device != \"mps\":\n logger.info(\n f\"The passed generator was created on 'cpu' even though a tensor on {device} was expected.\"\n f\" Tensors will be created on 'cpu' and then moved to {device}. Note that one can probably\"\n f\" slighly speed up this function by passing a generator that was created on the {device} device.\"\n )\n elif gen_device_type != device.type and gen_device_type == \"cuda\":\n raise ValueError(f\"Cannot generate a {device} tensor from a generator of type {gen_device_type}.\")\n\n # make sure generator list of length 1 is treated like a non-list\n if isinstance(generator, list) and len(generator) == 1:\n generator = generator[0]\n\n if isinstance(generator, list):\n shape = (1,) + shape[1:]\n latents = [\n torch.randn(shape, generator=generator[i], device=rand_device, dtype=dtype, layout=layout)\n for i in range(batch_size)\n ]\n latents = torch.cat(latents, dim=0).to(device)\n else:\n latents = torch.randn(shape, generator=generator, device=rand_device, dtype=dtype, layout=layout).to(device)\n\n return latents" }, { "identifier": "AudioPipelineOutput", "path": "src/diffusers/pipelines/pipeline_utils.py", "snippet": "class AudioPipelineOutput(BaseOutput):\n \"\"\"\n Output class for audio pipelines.\n\n Args:\n audios (`np.ndarray`)\n List of denoised audio samples of a NumPy array of shape `(batch_size, num_channels, sample_rate)`.\n \"\"\"\n\n audios: np.ndarray" }, { "identifier": "DiffusionPipeline", "path": "src/diffusers/pipelines/pipeline_utils.py", "snippet": "class DiffusionPipeline(ConfigMixin, PushToHubMixin):\n r\"\"\"\n Base class for all pipelines.\n\n [`DiffusionPipeline`] stores all components (models, schedulers, and processors) for diffusion pipelines and\n provides methods for loading, downloading and saving models. It also includes methods to:\n\n - move all PyTorch modules to the device of your choice\n - enable/disable the progress bar for the denoising iteration\n\n Class attributes:\n\n - **config_name** (`str`) -- The configuration filename that stores the class and module names of all the\n diffusion pipeline's components.\n - **_optional_components** (`List[str]`) -- List of all optional components that don't have to be passed to the\n pipeline to function (should be overridden by subclasses).\n \"\"\"\n\n config_name = \"model_index.json\"\n model_cpu_offload_seq = None\n _optional_components = []\n _exclude_from_cpu_offload = []\n _load_connected_pipes = False\n _is_onnx = False\n\n def register_modules(self, **kwargs):\n # import it here to avoid circular import\n diffusers_module = importlib.import_module(__name__.split(\".\")[0])\n pipelines = getattr(diffusers_module, \"pipelines\")\n\n for name, module in kwargs.items():\n # retrieve library\n if module is None or isinstance(module, (tuple, list)) and module[0] is None:\n register_dict = {name: (None, None)}\n else:\n # register the config from the original module, not the dynamo compiled one\n not_compiled_module = _unwrap_model(module)\n\n library = not_compiled_module.__module__.split(\".\")[0]\n\n # check if the module is a pipeline module\n module_path_items = not_compiled_module.__module__.split(\".\")\n pipeline_dir = module_path_items[-2] if len(module_path_items) > 2 else None\n\n path = not_compiled_module.__module__.split(\".\")\n is_pipeline_module = pipeline_dir in path and hasattr(pipelines, pipeline_dir)\n\n # if library is not in LOADABLE_CLASSES, then it is a custom module.\n # Or if it's a pipeline module, then the module is inside the pipeline\n # folder so we set the library to module name.\n if is_pipeline_module:\n library = pipeline_dir\n elif library not in LOADABLE_CLASSES:\n library = not_compiled_module.__module__\n\n # retrieve class_name\n class_name = not_compiled_module.__class__.__name__\n\n register_dict = {name: (library, class_name)}\n\n # save model index config\n self.register_to_config(**register_dict)\n\n # set models\n setattr(self, name, module)\n\n def __setattr__(self, name: str, value: Any):\n if name in self.__dict__ and hasattr(self.config, name):\n # We need to overwrite the config if name exists in config\n if isinstance(getattr(self.config, name), (tuple, list)):\n if value is not None and self.config[name][0] is not None:\n class_library_tuple = (value.__module__.split(\".\")[0], value.__class__.__name__)\n else:\n class_library_tuple = (None, None)\n\n self.register_to_config(**{name: class_library_tuple})\n else:\n self.register_to_config(**{name: value})\n\n super().__setattr__(name, value)\n\n def save_pretrained(\n self,\n save_directory: Union[str, os.PathLike],\n safe_serialization: bool = True,\n variant: Optional[str] = None,\n push_to_hub: bool = False,\n **kwargs,\n ):\n \"\"\"\n Save all saveable variables of the pipeline to a directory. A pipeline variable can be saved and loaded if its\n class implements both a save and loading method. The pipeline is easily reloaded using the\n [`~DiffusionPipeline.from_pretrained`] class method.\n\n Arguments:\n save_directory (`str` or `os.PathLike`):\n Directory to save a pipeline to. Will be created if it doesn't exist.\n safe_serialization (`bool`, *optional*, defaults to `True`):\n Whether to save the model using `safetensors` or the traditional PyTorch way with `pickle`.\n variant (`str`, *optional*):\n If specified, weights are saved in the format `pytorch_model.<variant>.bin`.\n push_to_hub (`bool`, *optional*, defaults to `False`):\n Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the\n repository you want to push to with `repo_id` (will default to the name of `save_directory` in your\n namespace).\n kwargs (`Dict[str, Any]`, *optional*):\n Additional keyword arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.\n \"\"\"\n model_index_dict = dict(self.config)\n model_index_dict.pop(\"_class_name\", None)\n model_index_dict.pop(\"_diffusers_version\", None)\n model_index_dict.pop(\"_module\", None)\n model_index_dict.pop(\"_name_or_path\", None)\n\n if push_to_hub:\n commit_message = kwargs.pop(\"commit_message\", None)\n private = kwargs.pop(\"private\", False)\n create_pr = kwargs.pop(\"create_pr\", False)\n token = kwargs.pop(\"token\", None)\n repo_id = kwargs.pop(\"repo_id\", save_directory.split(os.path.sep)[-1])\n repo_id = create_repo(repo_id, exist_ok=True, private=private, token=token).repo_id\n\n expected_modules, optional_kwargs = self._get_signature_keys(self)\n\n def is_saveable_module(name, value):\n if name not in expected_modules:\n return False\n if name in self._optional_components and value[0] is None:\n return False\n return True\n\n model_index_dict = {k: v for k, v in model_index_dict.items() if is_saveable_module(k, v)}\n for pipeline_component_name in model_index_dict.keys():\n sub_model = getattr(self, pipeline_component_name)\n model_cls = sub_model.__class__\n\n # Dynamo wraps the original model in a private class.\n # I didn't find a public API to get the original class.\n if is_compiled_module(sub_model):\n sub_model = _unwrap_model(sub_model)\n model_cls = sub_model.__class__\n\n save_method_name = None\n # search for the model's base class in LOADABLE_CLASSES\n for library_name, library_classes in LOADABLE_CLASSES.items():\n if library_name in sys.modules:\n library = importlib.import_module(library_name)\n else:\n logger.info(\n f\"{library_name} is not installed. Cannot save {pipeline_component_name} as {library_classes} from {library_name}\"\n )\n\n for base_class, save_load_methods in library_classes.items():\n class_candidate = getattr(library, base_class, None)\n if class_candidate is not None and issubclass(model_cls, class_candidate):\n # if we found a suitable base class in LOADABLE_CLASSES then grab its save method\n save_method_name = save_load_methods[0]\n break\n if save_method_name is not None:\n break\n\n if save_method_name is None:\n logger.warn(f\"self.{pipeline_component_name}={sub_model} of type {type(sub_model)} cannot be saved.\")\n # make sure that unsaveable components are not tried to be loaded afterward\n self.register_to_config(**{pipeline_component_name: (None, None)})\n continue\n\n save_method = getattr(sub_model, save_method_name)\n\n # Call the save method with the argument safe_serialization only if it's supported\n save_method_signature = inspect.signature(save_method)\n save_method_accept_safe = \"safe_serialization\" in save_method_signature.parameters\n save_method_accept_variant = \"variant\" in save_method_signature.parameters\n\n save_kwargs = {}\n if save_method_accept_safe:\n save_kwargs[\"safe_serialization\"] = safe_serialization\n if save_method_accept_variant:\n save_kwargs[\"variant\"] = variant\n\n save_method(os.path.join(save_directory, pipeline_component_name), **save_kwargs)\n\n # finally save the config\n self.save_config(save_directory)\n\n if push_to_hub:\n self._upload_folder(\n save_directory,\n repo_id,\n token=token,\n commit_message=commit_message,\n create_pr=create_pr,\n )\n\n def to(self, *args, **kwargs):\n r\"\"\"\n Performs Pipeline dtype and/or device conversion. A torch.dtype and torch.device are inferred from the\n arguments of `self.to(*args, **kwargs).`\n\n <Tip>\n\n If the pipeline already has the correct torch.dtype and torch.device, then it is returned as is. Otherwise,\n the returned pipeline is a copy of self with the desired torch.dtype and torch.device.\n\n </Tip>\n\n\n Here are the ways to call `to`:\n\n - `to(dtype, silence_dtype_warnings=False) → DiffusionPipeline` to return a pipeline with the specified\n [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype)\n - `to(device, silence_dtype_warnings=False) → DiffusionPipeline` to return a pipeline with the specified\n [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device)\n - `to(device=None, dtype=None, silence_dtype_warnings=False) → DiffusionPipeline` to return a pipeline with the\n specified [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device) and\n [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype)\n\n Arguments:\n dtype (`torch.dtype`, *optional*):\n Returns a pipeline with the specified\n [`dtype`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.dtype)\n device (`torch.Device`, *optional*):\n Returns a pipeline with the specified\n [`device`](https://pytorch.org/docs/stable/tensor_attributes.html#torch.device)\n silence_dtype_warnings (`str`, *optional*, defaults to `False`):\n Whether to omit warnings if the target `dtype` is not compatible with the target `device`.\n\n Returns:\n [`DiffusionPipeline`]: The pipeline converted to specified `dtype` and/or `dtype`.\n \"\"\"\n\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n if torch_dtype is not None:\n deprecate(\"torch_dtype\", \"0.25.0\", \"\")\n torch_device = kwargs.pop(\"torch_device\", None)\n if torch_device is not None:\n deprecate(\"torch_device\", \"0.25.0\", \"\")\n\n dtype_kwarg = kwargs.pop(\"dtype\", None)\n device_kwarg = kwargs.pop(\"device\", None)\n silence_dtype_warnings = kwargs.pop(\"silence_dtype_warnings\", False)\n\n if torch_dtype is not None and dtype_kwarg is not None:\n raise ValueError(\n \"You have passed both `torch_dtype` and `dtype` as a keyword argument. Please make sure to only pass `dtype`.\"\n )\n\n dtype = torch_dtype or dtype_kwarg\n\n if torch_device is not None and device_kwarg is not None:\n raise ValueError(\n \"You have passed both `torch_device` and `device` as a keyword argument. Please make sure to only pass `device`.\"\n )\n\n device = torch_device or device_kwarg\n\n dtype_arg = None\n device_arg = None\n if len(args) == 1:\n if isinstance(args[0], torch.dtype):\n dtype_arg = args[0]\n else:\n device_arg = torch.device(args[0]) if args[0] is not None else None\n elif len(args) == 2:\n if isinstance(args[0], torch.dtype):\n raise ValueError(\n \"When passing two arguments, make sure the first corresponds to `device` and the second to `dtype`.\"\n )\n device_arg = torch.device(args[0]) if args[0] is not None else None\n dtype_arg = args[1]\n elif len(args) > 2:\n raise ValueError(\"Please make sure to pass at most two arguments (`device` and `dtype`) `.to(...)`\")\n\n if dtype is not None and dtype_arg is not None:\n raise ValueError(\n \"You have passed `dtype` both as an argument and as a keyword argument. Please only pass one of the two.\"\n )\n\n dtype = dtype or dtype_arg\n\n if device is not None and device_arg is not None:\n raise ValueError(\n \"You have passed `device` both as an argument and as a keyword argument. Please only pass one of the two.\"\n )\n\n device = device or device_arg\n\n # throw warning if pipeline is in \"offloaded\"-mode but user tries to manually set to GPU.\n def module_is_sequentially_offloaded(module):\n if not is_accelerate_available() or is_accelerate_version(\"<\", \"0.14.0\"):\n return False\n\n return hasattr(module, \"_hf_hook\") and not isinstance(\n module._hf_hook, (accelerate.hooks.CpuOffload, accelerate.hooks.AlignDevicesHook)\n )\n\n def module_is_offloaded(module):\n if not is_accelerate_available() or is_accelerate_version(\"<\", \"0.17.0.dev0\"):\n return False\n\n return hasattr(module, \"_hf_hook\") and isinstance(module._hf_hook, accelerate.hooks.CpuOffload)\n\n # .to(\"cuda\") would raise an error if the pipeline is sequentially offloaded, so we raise our own to make it clearer\n pipeline_is_sequentially_offloaded = any(\n module_is_sequentially_offloaded(module) for _, module in self.components.items()\n )\n if pipeline_is_sequentially_offloaded and device and torch.device(device).type == \"cuda\":\n raise ValueError(\n \"It seems like you have activated sequential model offloading by calling `enable_sequential_cpu_offload`, but are now attempting to move the pipeline to GPU. This is not compatible with offloading. Please, move your pipeline `.to('cpu')` or consider removing the move altogether if you use sequential offloading.\"\n )\n\n # Display a warning in this case (the operation succeeds but the benefits are lost)\n pipeline_is_offloaded = any(module_is_offloaded(module) for _, module in self.components.items())\n if pipeline_is_offloaded and device and torch.device(device).type == \"cuda\":\n logger.warning(\n f\"It seems like you have activated model offloading by calling `enable_model_cpu_offload`, but are now manually moving the pipeline to GPU. It is strongly recommended against doing so as memory gains from offloading are likely to be lost. Offloading automatically takes care of moving the individual components {', '.join(self.components.keys())} to GPU when needed. To make sure offloading works as expected, you should consider moving the pipeline back to CPU: `pipeline.to('cpu')` or removing the move altogether if you use offloading.\"\n )\n\n module_names, _ = self._get_signature_keys(self)\n modules = [getattr(self, n, None) for n in module_names]\n modules = [m for m in modules if isinstance(m, torch.nn.Module)]\n\n is_offloaded = pipeline_is_offloaded or pipeline_is_sequentially_offloaded\n for module in modules:\n is_loaded_in_8bit = hasattr(module, \"is_loaded_in_8bit\") and module.is_loaded_in_8bit\n\n if is_loaded_in_8bit and dtype is not None:\n logger.warning(\n f\"The module '{module.__class__.__name__}' has been loaded in 8bit and conversion to {torch_dtype} is not yet supported. Module is still in 8bit precision.\"\n )\n\n if is_loaded_in_8bit and device is not None:\n logger.warning(\n f\"The module '{module.__class__.__name__}' has been loaded in 8bit and moving it to {torch_dtype} via `.to()` is not yet supported. Module is still on {module.device}.\"\n )\n else:\n module.to(device, dtype)\n\n if (\n module.dtype == torch.float16\n and str(device) in [\"cpu\"]\n and not silence_dtype_warnings\n and not is_offloaded\n ):\n logger.warning(\n \"Pipelines loaded with `dtype=torch.float16` cannot run with `cpu` device. It\"\n \" is not recommended to move them to `cpu` as running them will fail. Please make\"\n \" sure to use an accelerator to run the pipeline in inference, due to the lack of\"\n \" support for`float16` operations on this device in PyTorch. Please, remove the\"\n \" `torch_dtype=torch.float16` argument, or use another device for inference.\"\n )\n return self\n\n @property\n def device(self) -> torch.device:\n r\"\"\"\n Returns:\n `torch.device`: The torch device on which the pipeline is located.\n \"\"\"\n module_names, _ = self._get_signature_keys(self)\n modules = [getattr(self, n, None) for n in module_names]\n modules = [m for m in modules if isinstance(m, torch.nn.Module)]\n\n for module in modules:\n return module.device\n\n return torch.device(\"cpu\")\n\n @property\n def dtype(self) -> torch.dtype:\n r\"\"\"\n Returns:\n `torch.dtype`: The torch dtype on which the pipeline is located.\n \"\"\"\n module_names, _ = self._get_signature_keys(self)\n modules = [getattr(self, n, None) for n in module_names]\n modules = [m for m in modules if isinstance(m, torch.nn.Module)]\n\n for module in modules:\n return module.dtype\n\n return torch.float32\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.PathLike]], **kwargs):\n r\"\"\"\n Instantiate a PyTorch diffusion pipeline from pretrained pipeline weights.\n\n The pipeline is set in evaluation mode (`model.eval()`) by default.\n\n If you get the error message below, you need to finetune the weights for your downstream task:\n\n ```\n Some weights of UNet2DConditionModel were not initialized from the model checkpoint at runwayml/stable-diffusion-v1-5 and are newly initialized because the shapes did not match:\n - conv_in.weight: found shape torch.Size([320, 4, 3, 3]) in the checkpoint and torch.Size([320, 9, 3, 3]) in the model instantiated\n You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n ```\n\n Parameters:\n pretrained_model_name_or_path (`str` or `os.PathLike`, *optional*):\n Can be either:\n\n - A string, the *repo id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline\n hosted on the Hub.\n - A path to a *directory* (for example `./my_pipeline_directory/`) containing pipeline weights\n saved using\n [`~DiffusionPipeline.save_pretrained`].\n torch_dtype (`str` or `torch.dtype`, *optional*):\n Override the default `torch.dtype` and load the model with another dtype. If \"auto\" is passed, the\n dtype is automatically derived from the model's weights.\n custom_pipeline (`str`, *optional*):\n\n <Tip warning={true}>\n\n 🧪 This is an experimental feature and may change in the future.\n\n </Tip>\n\n Can be either:\n\n - A string, the *repo id* (for example `hf-internal-testing/diffusers-dummy-pipeline`) of a custom\n pipeline hosted on the Hub. The repository must contain a file called pipeline.py that defines\n the custom pipeline.\n - A string, the *file name* of a community pipeline hosted on GitHub under\n [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file\n names must match the file name and not the pipeline script (`clip_guided_stable_diffusion`\n instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the\n current main branch of GitHub.\n - A path to a directory (`./my_pipeline_directory/`) containing a custom pipeline. The directory\n must contain a file called `pipeline.py` that defines the custom pipeline.\n\n For more information on how to load and create custom pipelines, please have a look at [Loading and\n Adding Custom\n Pipelines](https://huggingface.co/docs/diffusers/using-diffusers/custom_pipeline_overview)\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n cache_dir (`Union[str, os.PathLike]`, *optional*):\n Path to a directory where a downloaded pretrained model configuration is cached if the standard cache\n is not used.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n custom_revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id similar to\n `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a\n custom pipeline from GitHub, otherwise it defaults to `\"main\"` when loading from the Hub.\n mirror (`str`, *optional*):\n Mirror source to resolve accessibility issues if you’re downloading a model in China. We do not\n guarantee the timeliness or safety of the source, and you should refer to the mirror site for more\n information.\n device_map (`str` or `Dict[str, Union[int, str, torch.device]]`, *optional*):\n A map that specifies where each submodule should go. It doesn’t need to be defined for each\n parameter/buffer name; once a given module name is inside, every submodule of it will be sent to the\n same device.\n\n Set `device_map=\"auto\"` to have 🤗 Accelerate automatically compute the most optimized `device_map`. For\n more information about each option see [designing a device\n map](https://hf.co/docs/accelerate/main/en/usage_guides/big_modeling#designing-a-device-map).\n max_memory (`Dict`, *optional*):\n A dictionary device identifier for the maximum memory. Will default to the maximum memory available for\n each GPU and the available CPU RAM if unset.\n offload_folder (`str` or `os.PathLike`, *optional*):\n The path to offload weights if device_map contains the value `\"disk\"`.\n offload_state_dict (`bool`, *optional*):\n If `True`, temporarily offloads the CPU state dict to the hard drive to avoid running out of CPU RAM if\n the weight of the CPU state dict + the biggest shard of the checkpoint does not fit. Defaults to `True`\n when there is some disk offload.\n low_cpu_mem_usage (`bool`, *optional*, defaults to `True` if torch version >= 1.9.0 else `False`):\n Speed up model loading only loading the pretrained weights and not initializing the weights. This also\n tries to not use more than 1x model size in CPU memory (including peak memory) while loading the model.\n Only supported for PyTorch >= 1.9.0. If you are using an older version of PyTorch, setting this\n argument to `True` will raise an error.\n use_safetensors (`bool`, *optional*, defaults to `None`):\n If set to `None`, the safetensors weights are downloaded if they're available **and** if the\n safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors\n weights. If set to `False`, safetensors weights are not loaded.\n use_onnx (`bool`, *optional*, defaults to `None`):\n If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights\n will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is\n `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending\n with `.onnx` and `.pb`.\n kwargs (remaining dictionary of keyword arguments, *optional*):\n Can be used to overwrite load and saveable variables (the pipeline components of the specific pipeline\n class). The overwritten components are passed directly to the pipelines `__init__` method. See example\n below for more information.\n variant (`str`, *optional*):\n Load weights from a specified variant filename such as `\"fp16\"` or `\"ema\"`. This is ignored when\n loading `from_flax`.\n\n <Tip>\n\n To use private or [gated](https://huggingface.co/docs/hub/models-gated#gated-models) models, log-in with\n `huggingface-cli login`.\n\n </Tip>\n\n Examples:\n\n ```py\n >>> from diffusers import DiffusionPipeline\n\n >>> # Download pipeline from huggingface.co and cache.\n >>> pipeline = DiffusionPipeline.from_pretrained(\"CompVis/ldm-text2im-large-256\")\n\n >>> # Download pipeline that requires an authorization token\n >>> # For more information on access tokens, please refer to this section\n >>> # of the documentation](https://huggingface.co/docs/hub/security-tokens)\n >>> pipeline = DiffusionPipeline.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n\n >>> # Use a different scheduler\n >>> from diffusers import LMSDiscreteScheduler\n\n >>> scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config)\n >>> pipeline.scheduler = scheduler\n ```\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n resume_download = kwargs.pop(\"resume_download\", False)\n force_download = kwargs.pop(\"force_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", HF_HUB_OFFLINE)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n from_flax = kwargs.pop(\"from_flax\", False)\n torch_dtype = kwargs.pop(\"torch_dtype\", None)\n custom_pipeline = kwargs.pop(\"custom_pipeline\", None)\n custom_revision = kwargs.pop(\"custom_revision\", None)\n provider = kwargs.pop(\"provider\", None)\n sess_options = kwargs.pop(\"sess_options\", None)\n device_map = kwargs.pop(\"device_map\", None)\n max_memory = kwargs.pop(\"max_memory\", None)\n offload_folder = kwargs.pop(\"offload_folder\", None)\n offload_state_dict = kwargs.pop(\"offload_state_dict\", False)\n low_cpu_mem_usage = kwargs.pop(\"low_cpu_mem_usage\", _LOW_CPU_MEM_USAGE_DEFAULT)\n variant = kwargs.pop(\"variant\", None)\n use_safetensors = kwargs.pop(\"use_safetensors\", None)\n use_onnx = kwargs.pop(\"use_onnx\", None)\n load_connected_pipeline = kwargs.pop(\"load_connected_pipeline\", False)\n\n # 1. Download the checkpoints and configs\n # use snapshot download here to get it working from from_pretrained\n if not os.path.isdir(pretrained_model_name_or_path):\n if pretrained_model_name_or_path.count(\"/\") > 1:\n raise ValueError(\n f'The provided pretrained_model_name_or_path \"{pretrained_model_name_or_path}\"'\n \" is neither a valid local path nor a valid repo id. Please check the parameter.\"\n )\n cached_folder = cls.download(\n pretrained_model_name_or_path,\n cache_dir=cache_dir,\n resume_download=resume_download,\n force_download=force_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n from_flax=from_flax,\n use_safetensors=use_safetensors,\n use_onnx=use_onnx,\n custom_pipeline=custom_pipeline,\n custom_revision=custom_revision,\n variant=variant,\n load_connected_pipeline=load_connected_pipeline,\n **kwargs,\n )\n else:\n cached_folder = pretrained_model_name_or_path\n\n config_dict = cls.load_config(cached_folder)\n\n # pop out \"_ignore_files\" as it is only needed for download\n config_dict.pop(\"_ignore_files\", None)\n\n # 2. Define which model components should load variants\n # We retrieve the information by matching whether variant\n # model checkpoints exist in the subfolders\n model_variants = {}\n if variant is not None:\n for folder in os.listdir(cached_folder):\n folder_path = os.path.join(cached_folder, folder)\n is_folder = os.path.isdir(folder_path) and folder in config_dict\n variant_exists = is_folder and any(\n p.split(\".\")[1].startswith(variant) for p in os.listdir(folder_path)\n )\n if variant_exists:\n model_variants[folder] = variant\n\n # 3. Load the pipeline class, if using custom module then load it from the hub\n # if we load from explicit class, let's use it\n custom_class_name = None\n if os.path.isfile(os.path.join(cached_folder, f\"{custom_pipeline}.py\")):\n custom_pipeline = os.path.join(cached_folder, f\"{custom_pipeline}.py\")\n elif isinstance(config_dict[\"_class_name\"], (list, tuple)) and os.path.isfile(\n os.path.join(cached_folder, f\"{config_dict['_class_name'][0]}.py\")\n ):\n custom_pipeline = os.path.join(cached_folder, f\"{config_dict['_class_name'][0]}.py\")\n custom_class_name = config_dict[\"_class_name\"][1]\n\n pipeline_class = _get_pipeline_class(\n cls,\n config_dict,\n load_connected_pipeline=load_connected_pipeline,\n custom_pipeline=custom_pipeline,\n class_name=custom_class_name,\n cache_dir=cache_dir,\n revision=custom_revision,\n )\n\n # DEPRECATED: To be removed in 1.0.0\n if pipeline_class.__name__ == \"StableDiffusionInpaintPipeline\" and version.parse(\n version.parse(config_dict[\"_diffusers_version\"]).base_version\n ) <= version.parse(\"0.5.1\"):\n from diffusers import StableDiffusionInpaintPipeline, StableDiffusionInpaintPipelineLegacy\n\n pipeline_class = StableDiffusionInpaintPipelineLegacy\n\n deprecation_message = (\n \"You are using a legacy checkpoint for inpainting with Stable Diffusion, therefore we are loading the\"\n f\" {StableDiffusionInpaintPipelineLegacy} class instead of {StableDiffusionInpaintPipeline}. For\"\n \" better inpainting results, we strongly suggest using Stable Diffusion's official inpainting\"\n \" checkpoint: https://huggingface.co/runwayml/stable-diffusion-inpainting instead or adapting your\"\n f\" checkpoint {pretrained_model_name_or_path} to the format of\"\n \" https://huggingface.co/runwayml/stable-diffusion-inpainting. Note that we do not actively maintain\"\n \" the {StableDiffusionInpaintPipelineLegacy} class and will likely remove it in version 1.0.0.\"\n )\n deprecate(\"StableDiffusionInpaintPipelineLegacy\", \"1.0.0\", deprecation_message, standard_warn=False)\n\n # 4. Define expected modules given pipeline signature\n # and define non-None initialized modules (=`init_kwargs`)\n\n # some modules can be passed directly to the init\n # in this case they are already instantiated in `kwargs`\n # extract them here\n expected_modules, optional_kwargs = cls._get_signature_keys(pipeline_class)\n passed_class_obj = {k: kwargs.pop(k) for k in expected_modules if k in kwargs}\n passed_pipe_kwargs = {k: kwargs.pop(k) for k in optional_kwargs if k in kwargs}\n\n init_dict, unused_kwargs, _ = pipeline_class.extract_init_dict(config_dict, **kwargs)\n\n # define init kwargs and make sure that optional component modules are filtered out\n init_kwargs = {\n k: init_dict.pop(k)\n for k in optional_kwargs\n if k in init_dict and k not in pipeline_class._optional_components\n }\n init_kwargs = {**init_kwargs, **passed_pipe_kwargs}\n\n # remove `null` components\n def load_module(name, value):\n if value[0] is None:\n return False\n if name in passed_class_obj and passed_class_obj[name] is None:\n return False\n return True\n\n init_dict = {k: v for k, v in init_dict.items() if load_module(k, v)}\n\n # Special case: safety_checker must be loaded separately when using `from_flax`\n if from_flax and \"safety_checker\" in init_dict and \"safety_checker\" not in passed_class_obj:\n raise NotImplementedError(\n \"The safety checker cannot be automatically loaded when loading weights `from_flax`.\"\n \" Please, pass `safety_checker=None` to `from_pretrained`, and load the safety checker\"\n \" separately if you need it.\"\n )\n\n # 5. Throw nice warnings / errors for fast accelerate loading\n if len(unused_kwargs) > 0:\n logger.warning(\n f\"Keyword arguments {unused_kwargs} are not expected by {pipeline_class.__name__} and will be ignored.\"\n )\n\n if low_cpu_mem_usage and not is_accelerate_available():\n low_cpu_mem_usage = False\n logger.warning(\n \"Cannot initialize model with low cpu memory usage because `accelerate` was not found in the\"\n \" environment. Defaulting to `low_cpu_mem_usage=False`. It is strongly recommended to install\"\n \" `accelerate` for faster and less memory-intense model loading. You can do so with: \\n```\\npip\"\n \" install accelerate\\n```\\n.\"\n )\n\n if device_map is not None and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Loading and dispatching requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `device_map=None`.\"\n )\n\n if low_cpu_mem_usage is True and not is_torch_version(\">=\", \"1.9.0\"):\n raise NotImplementedError(\n \"Low memory initialization requires torch >= 1.9.0. Please either update your PyTorch version or set\"\n \" `low_cpu_mem_usage=False`.\"\n )\n\n if low_cpu_mem_usage is False and device_map is not None:\n raise ValueError(\n f\"You cannot set `low_cpu_mem_usage` to False while using device_map={device_map} for loading and\"\n \" dispatching. Please make sure to set `low_cpu_mem_usage=True`.\"\n )\n\n # import it here to avoid circular import\n from diffusers import pipelines\n\n # 6. Load each module in the pipeline\n for name, (library_name, class_name) in logging.tqdm(init_dict.items(), desc=\"Loading pipeline components...\"):\n # 6.1 - now that JAX/Flax is an official framework of the library, we might load from Flax names\n class_name = class_name[4:] if class_name.startswith(\"Flax\") else class_name\n\n # 6.2 Define all importable classes\n is_pipeline_module = hasattr(pipelines, library_name)\n importable_classes = ALL_IMPORTABLE_CLASSES\n loaded_sub_model = None\n\n # 6.3 Use passed sub model or load class_name from library_name\n if name in passed_class_obj:\n # if the model is in a pipeline module, then we load it from the pipeline\n # check that passed_class_obj has correct parent class\n maybe_raise_or_warn(\n library_name, library, class_name, importable_classes, passed_class_obj, name, is_pipeline_module\n )\n\n loaded_sub_model = passed_class_obj[name]\n else:\n # load sub model\n loaded_sub_model = load_sub_model(\n library_name=library_name,\n class_name=class_name,\n importable_classes=importable_classes,\n pipelines=pipelines,\n is_pipeline_module=is_pipeline_module,\n pipeline_class=pipeline_class,\n torch_dtype=torch_dtype,\n provider=provider,\n sess_options=sess_options,\n device_map=device_map,\n max_memory=max_memory,\n offload_folder=offload_folder,\n offload_state_dict=offload_state_dict,\n model_variants=model_variants,\n name=name,\n from_flax=from_flax,\n variant=variant,\n low_cpu_mem_usage=low_cpu_mem_usage,\n cached_folder=cached_folder,\n revision=revision,\n )\n logger.info(\n f\"Loaded {name} as {class_name} from `{name}` subfolder of {pretrained_model_name_or_path}.\"\n )\n\n init_kwargs[name] = loaded_sub_model # UNet(...), # DiffusionSchedule(...)\n\n if pipeline_class._load_connected_pipes and os.path.isfile(os.path.join(cached_folder, \"README.md\")):\n modelcard = ModelCard.load(os.path.join(cached_folder, \"README.md\"))\n connected_pipes = {prefix: getattr(modelcard.data, prefix, [None])[0] for prefix in CONNECTED_PIPES_KEYS}\n load_kwargs = {\n \"cache_dir\": cache_dir,\n \"resume_download\": resume_download,\n \"force_download\": force_download,\n \"proxies\": proxies,\n \"local_files_only\": local_files_only,\n \"use_auth_token\": use_auth_token,\n \"revision\": revision,\n \"torch_dtype\": torch_dtype,\n \"custom_pipeline\": custom_pipeline,\n \"custom_revision\": custom_revision,\n \"provider\": provider,\n \"sess_options\": sess_options,\n \"device_map\": device_map,\n \"max_memory\": max_memory,\n \"offload_folder\": offload_folder,\n \"offload_state_dict\": offload_state_dict,\n \"low_cpu_mem_usage\": low_cpu_mem_usage,\n \"variant\": variant,\n \"use_safetensors\": use_safetensors,\n }\n\n def get_connected_passed_kwargs(prefix):\n connected_passed_class_obj = {\n k.replace(f\"{prefix}_\", \"\"): w for k, w in passed_class_obj.items() if k.split(\"_\")[0] == prefix\n }\n connected_passed_pipe_kwargs = {\n k.replace(f\"{prefix}_\", \"\"): w for k, w in passed_pipe_kwargs.items() if k.split(\"_\")[0] == prefix\n }\n\n connected_passed_kwargs = {**connected_passed_class_obj, **connected_passed_pipe_kwargs}\n return connected_passed_kwargs\n\n connected_pipes = {\n prefix: DiffusionPipeline.from_pretrained(\n repo_id, **load_kwargs.copy(), **get_connected_passed_kwargs(prefix)\n )\n for prefix, repo_id in connected_pipes.items()\n if repo_id is not None\n }\n\n for prefix, connected_pipe in connected_pipes.items():\n # add connected pipes to `init_kwargs` with <prefix>_<component_name>, e.g. \"prior_text_encoder\"\n init_kwargs.update(\n {\"_\".join([prefix, name]): component for name, component in connected_pipe.components.items()}\n )\n\n # 7. Potentially add passed objects if expected\n missing_modules = set(expected_modules) - set(init_kwargs.keys())\n passed_modules = list(passed_class_obj.keys())\n optional_modules = pipeline_class._optional_components\n if len(missing_modules) > 0 and missing_modules <= set(passed_modules + optional_modules):\n for module in missing_modules:\n init_kwargs[module] = passed_class_obj.get(module, None)\n elif len(missing_modules) > 0:\n passed_modules = set(list(init_kwargs.keys()) + list(passed_class_obj.keys())) - optional_kwargs\n raise ValueError(\n f\"Pipeline {pipeline_class} expected {expected_modules}, but only {passed_modules} were passed.\"\n )\n\n # 8. Instantiate the pipeline\n model = pipeline_class(**init_kwargs)\n\n # 9. Save where the model was instantiated from\n model.register_to_config(_name_or_path=pretrained_model_name_or_path)\n return model\n\n @property\n def name_or_path(self) -> str:\n return getattr(self.config, \"_name_or_path\", None)\n\n @property\n def _execution_device(self):\n r\"\"\"\n Returns the device on which the pipeline's models will be executed. After calling\n [`~DiffusionPipeline.enable_sequential_cpu_offload`] the execution device can only be inferred from\n Accelerate's module hooks.\n \"\"\"\n for name, model in self.components.items():\n if not isinstance(model, torch.nn.Module) or name in self._exclude_from_cpu_offload:\n continue\n\n if not hasattr(model, \"_hf_hook\"):\n return self.device\n for module in model.modules():\n if (\n hasattr(module, \"_hf_hook\")\n and hasattr(module._hf_hook, \"execution_device\")\n and module._hf_hook.execution_device is not None\n ):\n return torch.device(module._hf_hook.execution_device)\n return self.device\n\n def enable_model_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = \"cuda\"):\n r\"\"\"\n Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared\n to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`\n method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with\n `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.\n\n Arguments:\n gpu_id (`int`, *optional*):\n The ID of the accelerator that shall be used in inference. If not specified, it will default to 0.\n device (`torch.Device` or `str`, *optional*, defaults to \"cuda\"):\n The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will\n default to \"cuda\".\n \"\"\"\n if self.model_cpu_offload_seq is None:\n raise ValueError(\n \"Model CPU offload cannot be enabled because no `model_cpu_offload_seq` class attribute is set.\"\n )\n\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.17.0.dev0\"):\n from accelerate import cpu_offload_with_hook\n else:\n raise ImportError(\"`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.\")\n\n torch_device = torch.device(device)\n device_index = torch_device.index\n\n if gpu_id is not None and device_index is not None:\n raise ValueError(\n f\"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}\"\n f\"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}\"\n )\n\n # _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0\n self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, \"_offload_gpu_id\", 0)\n\n device_type = torch_device.type\n device = torch.device(f\"{device_type}:{self._offload_gpu_id}\")\n\n if self.device.type != \"cpu\":\n self.to(\"cpu\", silence_dtype_warnings=True)\n device_mod = getattr(torch, self.device.type, None)\n if hasattr(device_mod, \"empty_cache\") and device_mod.is_available():\n device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist)\n\n all_model_components = {k: v for k, v in self.components.items() if isinstance(v, torch.nn.Module)}\n\n self._all_hooks = []\n hook = None\n for model_str in self.model_cpu_offload_seq.split(\"->\"):\n model = all_model_components.pop(model_str, None)\n if not isinstance(model, torch.nn.Module):\n continue\n\n _, hook = cpu_offload_with_hook(model, device, prev_module_hook=hook)\n self._all_hooks.append(hook)\n\n # CPU offload models that are not in the seq chain unless they are explicitly excluded\n # these models will stay on CPU until maybe_free_model_hooks is called\n # some models cannot be in the seq chain because they are iteratively called, such as controlnet\n for name, model in all_model_components.items():\n if not isinstance(model, torch.nn.Module):\n continue\n\n if name in self._exclude_from_cpu_offload:\n model.to(device)\n else:\n _, hook = cpu_offload_with_hook(model, device)\n self._all_hooks.append(hook)\n\n def maybe_free_model_hooks(self):\n r\"\"\"\n Function that offloads all components, removes all model hooks that were added when using\n `enable_model_cpu_offload` and then applies them again. In case the model has not been offloaded this function\n is a no-op. Make sure to add this function to the end of the `__call__` function of your pipeline so that it\n functions correctly when applying enable_model_cpu_offload.\n \"\"\"\n if not hasattr(self, \"_all_hooks\") or len(self._all_hooks) == 0:\n # `enable_model_cpu_offload` has not be called, so silently do nothing\n return\n\n for hook in self._all_hooks:\n # offload model and remove hook from model\n hook.offload()\n hook.remove()\n\n # make sure the model is in the same state as before calling it\n self.enable_model_cpu_offload()\n\n def enable_sequential_cpu_offload(self, gpu_id: Optional[int] = None, device: Union[torch.device, str] = \"cuda\"):\n r\"\"\"\n Offloads all models to CPU using 🤗 Accelerate, significantly reducing memory usage. When called, the state\n dicts of all `torch.nn.Module` components (except those in `self._exclude_from_cpu_offload`) are saved to CPU\n and then moved to `torch.device('meta')` and loaded to GPU only when their specific submodule has its `forward`\n method called. Offloading happens on a submodule basis. Memory savings are higher than with\n `enable_model_cpu_offload`, but performance is lower.\n\n Arguments:\n gpu_id (`int`, *optional*):\n The ID of the accelerator that shall be used in inference. If not specified, it will default to 0.\n device (`torch.Device` or `str`, *optional*, defaults to \"cuda\"):\n The PyTorch device type of the accelerator that shall be used in inference. If not specified, it will\n default to \"cuda\".\n \"\"\"\n if is_accelerate_available() and is_accelerate_version(\">=\", \"0.14.0\"):\n from accelerate import cpu_offload\n else:\n raise ImportError(\"`enable_sequential_cpu_offload` requires `accelerate v0.14.0` or higher\")\n\n torch_device = torch.device(device)\n device_index = torch_device.index\n\n if gpu_id is not None and device_index is not None:\n raise ValueError(\n f\"You have passed both `gpu_id`={gpu_id} and an index as part of the passed device `device`={device}\"\n f\"Cannot pass both. Please make sure to either not define `gpu_id` or not pass the index as part of the device: `device`={torch_device.type}\"\n )\n\n # _offload_gpu_id should be set to passed gpu_id (or id in passed `device`) or default to previously set id or default to 0\n self._offload_gpu_id = gpu_id or torch_device.index or getattr(self, \"_offload_gpu_id\", 0)\n\n device_type = torch_device.type\n device = torch.device(f\"{device_type}:{self._offload_gpu_id}\")\n\n if self.device.type != \"cpu\":\n self.to(\"cpu\", silence_dtype_warnings=True)\n device_mod = getattr(torch, self.device.type, None)\n if hasattr(device_mod, \"empty_cache\") and device_mod.is_available():\n device_mod.empty_cache() # otherwise we don't see the memory savings (but they probably exist)\n\n for name, model in self.components.items():\n if not isinstance(model, torch.nn.Module):\n continue\n\n if name in self._exclude_from_cpu_offload:\n model.to(device)\n else:\n # make sure to offload buffers if not all high level weights\n # are of type nn.Module\n offload_buffers = len(model._parameters) > 0\n cpu_offload(model, device, offload_buffers=offload_buffers)\n\n @classmethod\n def download(cls, pretrained_model_name, **kwargs) -> Union[str, os.PathLike]:\n r\"\"\"\n Download and cache a PyTorch diffusion pipeline from pretrained pipeline weights.\n\n Parameters:\n pretrained_model_name (`str` or `os.PathLike`, *optional*):\n A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained pipeline\n hosted on the Hub.\n custom_pipeline (`str`, *optional*):\n Can be either:\n\n - A string, the *repository id* (for example `CompVis/ldm-text2im-large-256`) of a pretrained\n pipeline hosted on the Hub. The repository must contain a file called `pipeline.py` that defines\n the custom pipeline.\n\n - A string, the *file name* of a community pipeline hosted on GitHub under\n [Community](https://github.com/huggingface/diffusers/tree/main/examples/community). Valid file\n names must match the file name and not the pipeline script (`clip_guided_stable_diffusion`\n instead of `clip_guided_stable_diffusion.py`). Community pipelines are always loaded from the\n current `main` branch of GitHub.\n\n - A path to a *directory* (`./my_pipeline_directory/`) containing a custom pipeline. The directory\n must contain a file called `pipeline.py` that defines the custom pipeline.\n\n <Tip warning={true}>\n\n 🧪 This is an experimental feature and may change in the future.\n\n </Tip>\n\n For more information on how to load and create custom pipelines, take a look at [How to contribute a\n community pipeline](https://huggingface.co/docs/diffusers/main/en/using-diffusers/contribute_pipeline).\n\n force_download (`bool`, *optional*, defaults to `False`):\n Whether or not to force the (re-)download of the model weights and configuration files, overriding the\n cached versions if they exist.\n resume_download (`bool`, *optional*, defaults to `False`):\n Whether or not to resume downloading the model weights and configuration files. If set to `False`, any\n incompletely downloaded files are deleted.\n proxies (`Dict[str, str]`, *optional*):\n A dictionary of proxy servers to use by protocol or endpoint, for example, `{'http': 'foo.bar:3128',\n 'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.\n output_loading_info(`bool`, *optional*, defaults to `False`):\n Whether or not to also return a dictionary containing missing keys, unexpected keys and error messages.\n local_files_only (`bool`, *optional*, defaults to `False`):\n Whether to only load local model weights and configuration files or not. If set to `True`, the model\n won't be downloaded from the Hub.\n use_auth_token (`str` or *bool*, *optional*):\n The token to use as HTTP bearer authorization for remote files. If `True`, the token generated from\n `diffusers-cli login` (stored in `~/.huggingface`) is used.\n revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, a commit id, or any identifier\n allowed by Git.\n custom_revision (`str`, *optional*, defaults to `\"main\"`):\n The specific model version to use. It can be a branch name, a tag name, or a commit id similar to\n `revision` when loading a custom pipeline from the Hub. It can be a 🤗 Diffusers version when loading a\n custom pipeline from GitHub, otherwise it defaults to `\"main\"` when loading from the Hub.\n mirror (`str`, *optional*):\n Mirror source to resolve accessibility issues if you're downloading a model in China. We do not\n guarantee the timeliness or safety of the source, and you should refer to the mirror site for more\n information.\n variant (`str`, *optional*):\n Load weights from a specified variant filename such as `\"fp16\"` or `\"ema\"`. This is ignored when\n loading `from_flax`.\n use_safetensors (`bool`, *optional*, defaults to `None`):\n If set to `None`, the safetensors weights are downloaded if they're available **and** if the\n safetensors library is installed. If set to `True`, the model is forcibly loaded from safetensors\n weights. If set to `False`, safetensors weights are not loaded.\n use_onnx (`bool`, *optional*, defaults to `False`):\n If set to `True`, ONNX weights will always be downloaded if present. If set to `False`, ONNX weights\n will never be downloaded. By default `use_onnx` defaults to the `_is_onnx` class attribute which is\n `False` for non-ONNX pipelines and `True` for ONNX pipelines. ONNX weights include both files ending\n with `.onnx` and `.pb`.\n trust_remote_code (`bool`, *optional*, defaults to `False`):\n Whether or not to allow for custom pipelines and components defined on the Hub in their own files. This\n option should only be set to `True` for repositories you trust and in which you have read the code, as\n it will execute code present on the Hub on your local machine.\n\n Returns:\n `os.PathLike`:\n A path to the downloaded pipeline.\n\n <Tip>\n\n To use private or [gated models](https://huggingface.co/docs/hub/models-gated#gated-models), log-in with\n `huggingface-cli login`.\n\n </Tip>\n\n \"\"\"\n cache_dir = kwargs.pop(\"cache_dir\", DIFFUSERS_CACHE)\n resume_download = kwargs.pop(\"resume_download\", False)\n force_download = kwargs.pop(\"force_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n local_files_only = kwargs.pop(\"local_files_only\", HF_HUB_OFFLINE)\n use_auth_token = kwargs.pop(\"use_auth_token\", None)\n revision = kwargs.pop(\"revision\", None)\n from_flax = kwargs.pop(\"from_flax\", False)\n custom_pipeline = kwargs.pop(\"custom_pipeline\", None)\n custom_revision = kwargs.pop(\"custom_revision\", None)\n variant = kwargs.pop(\"variant\", None)\n use_safetensors = kwargs.pop(\"use_safetensors\", None)\n use_onnx = kwargs.pop(\"use_onnx\", None)\n load_connected_pipeline = kwargs.pop(\"load_connected_pipeline\", False)\n trust_remote_code = kwargs.pop(\"trust_remote_code\", False)\n\n allow_pickle = False\n if use_safetensors is None:\n use_safetensors = True\n allow_pickle = True\n\n allow_patterns = None\n ignore_patterns = None\n\n model_info_call_error: Optional[Exception] = None\n if not local_files_only:\n try:\n info = model_info(\n pretrained_model_name,\n use_auth_token=use_auth_token,\n revision=revision,\n )\n except HTTPError as e:\n logger.warn(f\"Couldn't connect to the Hub: {e}.\\nWill try to load from local cache.\")\n local_files_only = True\n model_info_call_error = e # save error to reraise it if model is not cached locally\n\n if not local_files_only:\n config_file = hf_hub_download(\n pretrained_model_name,\n cls.config_name,\n cache_dir=cache_dir,\n revision=revision,\n proxies=proxies,\n force_download=force_download,\n resume_download=resume_download,\n use_auth_token=use_auth_token,\n )\n\n config_dict = cls._dict_from_json_file(config_file)\n ignore_filenames = config_dict.pop(\"_ignore_files\", [])\n\n # retrieve all folder_names that contain relevant files\n folder_names = [k for k, v in config_dict.items() if isinstance(v, list) and k != \"_class_name\"]\n\n filenames = {sibling.rfilename for sibling in info.siblings}\n model_filenames, variant_filenames = variant_compatible_siblings(filenames, variant=variant)\n\n diffusers_module = importlib.import_module(__name__.split(\".\")[0])\n pipelines = getattr(diffusers_module, \"pipelines\")\n\n # optionally create a custom component <> custom file mapping\n custom_components = {}\n for component in folder_names:\n module_candidate = config_dict[component][0]\n\n if module_candidate is None or not isinstance(module_candidate, str):\n continue\n\n candidate_file = os.path.join(component, module_candidate + \".py\")\n\n if candidate_file in filenames:\n custom_components[component] = module_candidate\n elif module_candidate not in LOADABLE_CLASSES and not hasattr(pipelines, module_candidate):\n raise ValueError(\n f\"{candidate_file} as defined in `model_index.json` does not exist in {pretrained_model_name} and is not a module in 'diffusers/pipelines'.\"\n )\n\n if len(variant_filenames) == 0 and variant is not None:\n deprecation_message = (\n f\"You are trying to load the model files of the `variant={variant}`, but no such modeling files are available.\"\n f\"The default model files: {model_filenames} will be loaded instead. Make sure to not load from `variant={variant}`\"\n \"if such variant modeling files are not available. Doing so will lead to an error in v0.24.0 as defaulting to non-variant\"\n \"modeling files is deprecated.\"\n )\n deprecate(\"no variant default\", \"0.24.0\", deprecation_message, standard_warn=False)\n\n # remove ignored filenames\n model_filenames = set(model_filenames) - set(ignore_filenames)\n variant_filenames = set(variant_filenames) - set(ignore_filenames)\n\n # if the whole pipeline is cached we don't have to ping the Hub\n if revision in DEPRECATED_REVISION_ARGS and version.parse(\n version.parse(__version__).base_version\n ) >= version.parse(\"0.22.0\"):\n warn_deprecated_model_variant(\n pretrained_model_name, use_auth_token, variant, revision, model_filenames\n )\n\n model_folder_names = {os.path.split(f)[0] for f in model_filenames if os.path.split(f)[0] in folder_names}\n\n custom_class_name = None\n if custom_pipeline is None and isinstance(config_dict[\"_class_name\"], (list, tuple)):\n custom_pipeline = config_dict[\"_class_name\"][0]\n custom_class_name = config_dict[\"_class_name\"][1]\n\n # all filenames compatible with variant will be added\n allow_patterns = list(model_filenames)\n\n # allow all patterns from non-model folders\n # this enables downloading schedulers, tokenizers, ...\n allow_patterns += [f\"{k}/*\" for k in folder_names if k not in model_folder_names]\n # add custom component files\n allow_patterns += [f\"{k}/{f}.py\" for k, f in custom_components.items()]\n # add custom pipeline file\n allow_patterns += [f\"{custom_pipeline}.py\"] if f\"{custom_pipeline}.py\" in filenames else []\n # also allow downloading config.json files with the model\n allow_patterns += [os.path.join(k, \"config.json\") for k in model_folder_names]\n\n allow_patterns += [\n SCHEDULER_CONFIG_NAME,\n CONFIG_NAME,\n cls.config_name,\n CUSTOM_PIPELINE_FILE_NAME,\n ]\n\n load_pipe_from_hub = custom_pipeline is not None and f\"{custom_pipeline}.py\" in filenames\n load_components_from_hub = len(custom_components) > 0\n\n if load_pipe_from_hub and not trust_remote_code:\n raise ValueError(\n f\"The repository for {pretrained_model_name} contains custom code in {custom_pipeline}.py which must be executed to correctly \"\n f\"load the model. You can inspect the repository content at https://hf.co/{pretrained_model_name}/blob/main/{custom_pipeline}.py.\\n\"\n f\"Please pass the argument `trust_remote_code=True` to allow custom code to be run.\"\n )\n\n if load_components_from_hub and not trust_remote_code:\n raise ValueError(\n f\"The repository for {pretrained_model_name} contains custom code in {'.py, '.join([os.path.join(k, v) for k,v in custom_components.items()])} which must be executed to correctly \"\n f\"load the model. You can inspect the repository content at {', '.join([f'https://hf.co/{pretrained_model_name}/{k}/{v}.py' for k,v in custom_components.items()])}.\\n\"\n f\"Please pass the argument `trust_remote_code=True` to allow custom code to be run.\"\n )\n\n # retrieve passed components that should not be downloaded\n pipeline_class = _get_pipeline_class(\n cls,\n config_dict,\n load_connected_pipeline=load_connected_pipeline,\n custom_pipeline=custom_pipeline,\n repo_id=pretrained_model_name if load_pipe_from_hub else None,\n hub_revision=revision,\n class_name=custom_class_name,\n cache_dir=cache_dir,\n revision=custom_revision,\n )\n expected_components, _ = cls._get_signature_keys(pipeline_class)\n passed_components = [k for k in expected_components if k in kwargs]\n\n if (\n use_safetensors\n and not allow_pickle\n and not is_safetensors_compatible(\n model_filenames, variant=variant, passed_components=passed_components\n )\n ):\n raise EnvironmentError(\n f\"Could not find the necessary `safetensors` weights in {model_filenames} (variant={variant})\"\n )\n if from_flax:\n ignore_patterns = [\"*.bin\", \"*.safetensors\", \"*.onnx\", \"*.pb\"]\n elif use_safetensors and is_safetensors_compatible(\n model_filenames, variant=variant, passed_components=passed_components\n ):\n ignore_patterns = [\"*.bin\", \"*.msgpack\"]\n\n use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx\n if not use_onnx:\n ignore_patterns += [\"*.onnx\", \"*.pb\"]\n\n safetensors_variant_filenames = {f for f in variant_filenames if f.endswith(\".safetensors\")}\n safetensors_model_filenames = {f for f in model_filenames if f.endswith(\".safetensors\")}\n if (\n len(safetensors_variant_filenames) > 0\n and safetensors_model_filenames != safetensors_variant_filenames\n ):\n logger.warn(\n f\"\\nA mixture of {variant} and non-{variant} filenames will be loaded.\\nLoaded {variant} filenames:\\n[{', '.join(safetensors_variant_filenames)}]\\nLoaded non-{variant} filenames:\\n[{', '.join(safetensors_model_filenames - safetensors_variant_filenames)}\\nIf this behavior is not expected, please check your folder structure.\"\n )\n else:\n ignore_patterns = [\"*.safetensors\", \"*.msgpack\"]\n\n use_onnx = use_onnx if use_onnx is not None else pipeline_class._is_onnx\n if not use_onnx:\n ignore_patterns += [\"*.onnx\", \"*.pb\"]\n\n bin_variant_filenames = {f for f in variant_filenames if f.endswith(\".bin\")}\n bin_model_filenames = {f for f in model_filenames if f.endswith(\".bin\")}\n if len(bin_variant_filenames) > 0 and bin_model_filenames != bin_variant_filenames:\n logger.warn(\n f\"\\nA mixture of {variant} and non-{variant} filenames will be loaded.\\nLoaded {variant} filenames:\\n[{', '.join(bin_variant_filenames)}]\\nLoaded non-{variant} filenames:\\n[{', '.join(bin_model_filenames - bin_variant_filenames)}\\nIf this behavior is not expected, please check your folder structure.\"\n )\n\n # Don't download any objects that are passed\n allow_patterns = [\n p for p in allow_patterns if not (len(p.split(\"/\")) == 2 and p.split(\"/\")[0] in passed_components)\n ]\n\n if pipeline_class._load_connected_pipes:\n allow_patterns.append(\"README.md\")\n\n # Don't download index files of forbidden patterns either\n ignore_patterns = ignore_patterns + [f\"{i}.index.*json\" for i in ignore_patterns]\n\n re_ignore_pattern = [re.compile(fnmatch.translate(p)) for p in ignore_patterns]\n re_allow_pattern = [re.compile(fnmatch.translate(p)) for p in allow_patterns]\n\n expected_files = [f for f in filenames if not any(p.match(f) for p in re_ignore_pattern)]\n expected_files = [f for f in expected_files if any(p.match(f) for p in re_allow_pattern)]\n\n snapshot_folder = Path(config_file).parent\n pipeline_is_cached = all((snapshot_folder / f).is_file() for f in expected_files)\n\n if pipeline_is_cached and not force_download:\n # if the pipeline is cached, we can directly return it\n # else call snapshot_download\n return snapshot_folder\n\n user_agent = {\"pipeline_class\": cls.__name__}\n if custom_pipeline is not None and not custom_pipeline.endswith(\".py\"):\n user_agent[\"custom_pipeline\"] = custom_pipeline\n\n # download all allow_patterns - ignore_patterns\n try:\n cached_folder = snapshot_download(\n pretrained_model_name,\n cache_dir=cache_dir,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n use_auth_token=use_auth_token,\n revision=revision,\n allow_patterns=allow_patterns,\n ignore_patterns=ignore_patterns,\n user_agent=user_agent,\n )\n\n # retrieve pipeline class from local file\n cls_name = cls.load_config(os.path.join(cached_folder, \"model_index.json\")).get(\"_class_name\", None)\n cls_name = cls_name[4:] if isinstance(cls_name, str) and cls_name.startswith(\"Flax\") else cls_name\n\n diffusers_module = importlib.import_module(__name__.split(\".\")[0])\n pipeline_class = getattr(diffusers_module, cls_name, None) if isinstance(cls_name, str) else None\n\n if pipeline_class is not None and pipeline_class._load_connected_pipes:\n modelcard = ModelCard.load(os.path.join(cached_folder, \"README.md\"))\n connected_pipes = sum([getattr(modelcard.data, k, []) for k in CONNECTED_PIPES_KEYS], [])\n for connected_pipe_repo_id in connected_pipes:\n download_kwargs = {\n \"cache_dir\": cache_dir,\n \"resume_download\": resume_download,\n \"force_download\": force_download,\n \"proxies\": proxies,\n \"local_files_only\": local_files_only,\n \"use_auth_token\": use_auth_token,\n \"variant\": variant,\n \"use_safetensors\": use_safetensors,\n }\n DiffusionPipeline.download(connected_pipe_repo_id, **download_kwargs)\n\n return cached_folder\n\n except FileNotFoundError:\n # Means we tried to load pipeline with `local_files_only=True` but the files have not been found in local cache.\n # This can happen in two cases:\n # 1. If the user passed `local_files_only=True` => we raise the error directly\n # 2. If we forced `local_files_only=True` when `model_info` failed => we raise the initial error\n if model_info_call_error is None:\n # 1. user passed `local_files_only=True`\n raise\n else:\n # 2. we forced `local_files_only=True` when `model_info` failed\n raise EnvironmentError(\n f\"Cannot load model {pretrained_model_name}: model is not cached locally and an error occured\"\n \" while trying to fetch metadata from the Hub. Please check out the root cause in the stacktrace\"\n \" above.\"\n ) from model_info_call_error\n\n @classmethod\n def _get_signature_keys(cls, obj):\n parameters = inspect.signature(obj.__init__).parameters\n required_parameters = {k: v for k, v in parameters.items() if v.default == inspect._empty}\n optional_parameters = set({k for k, v in parameters.items() if v.default != inspect._empty})\n expected_modules = set(required_parameters.keys()) - {\"self\"}\n\n optional_names = list(optional_parameters)\n for name in optional_names:\n if name in cls._optional_components:\n expected_modules.add(name)\n optional_parameters.remove(name)\n\n return expected_modules, optional_parameters\n\n @property\n def components(self) -> Dict[str, Any]:\n r\"\"\"\n The `self.components` property can be useful to run different pipelines with the same weights and\n configurations without reallocating additional memory.\n\n Returns (`dict`):\n A dictionary containing all the modules needed to initialize the pipeline.\n\n Examples:\n\n ```py\n >>> from diffusers import (\n ... StableDiffusionPipeline,\n ... StableDiffusionImg2ImgPipeline,\n ... StableDiffusionInpaintPipeline,\n ... )\n\n >>> text2img = StableDiffusionPipeline.from_pretrained(\"runwayml/stable-diffusion-v1-5\")\n >>> img2img = StableDiffusionImg2ImgPipeline(**text2img.components)\n >>> inpaint = StableDiffusionInpaintPipeline(**text2img.components)\n ```\n \"\"\"\n expected_modules, optional_parameters = self._get_signature_keys(self)\n components = {\n k: getattr(self, k) for k in self.config.keys() if not k.startswith(\"_\") and k not in optional_parameters\n }\n\n if set(components.keys()) != expected_modules:\n raise ValueError(\n f\"{self} has been incorrectly initialized or {self.__class__} is incorrectly implemented. Expected\"\n f\" {expected_modules} to be defined, but {components.keys()} are defined.\"\n )\n\n return components\n\n @staticmethod\n def numpy_to_pil(images):\n \"\"\"\n Convert a NumPy image or a batch of images to a PIL image.\n \"\"\"\n return numpy_to_pil(images)\n\n def progress_bar(self, iterable=None, total=None):\n if not hasattr(self, \"_progress_bar_config\"):\n self._progress_bar_config = {}\n elif not isinstance(self._progress_bar_config, dict):\n raise ValueError(\n f\"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}.\"\n )\n\n if iterable is not None:\n return tqdm(iterable, **self._progress_bar_config)\n elif total is not None:\n return tqdm(total=total, **self._progress_bar_config)\n else:\n raise ValueError(\"Either `total` or `iterable` has to be defined.\")\n\n def set_progress_bar_config(self, **kwargs):\n self._progress_bar_config = kwargs\n\n def enable_xformers_memory_efficient_attention(self, attention_op: Optional[Callable] = None):\n r\"\"\"\n Enable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/). When this\n option is enabled, you should observe lower GPU memory usage and a potential speed up during inference. Speed\n up during training is not guaranteed.\n\n <Tip warning={true}>\n\n ⚠️ When memory efficient attention and sliced attention are both enabled, memory efficient attention takes\n precedent.\n\n </Tip>\n\n Parameters:\n attention_op (`Callable`, *optional*):\n Override the default `None` operator for use as `op` argument to the\n [`memory_efficient_attention()`](https://facebookresearch.github.io/xformers/components/ops.html#xformers.ops.memory_efficient_attention)\n function of xFormers.\n\n Examples:\n\n ```py\n >>> import torch\n >>> from diffusers import DiffusionPipeline\n >>> from xformers.ops import MemoryEfficientAttentionFlashAttentionOp\n\n >>> pipe = DiffusionPipeline.from_pretrained(\"stabilityai/stable-diffusion-2-1\", torch_dtype=torch.float16)\n >>> pipe = pipe.to(\"cuda\")\n >>> pipe.enable_xformers_memory_efficient_attention(attention_op=MemoryEfficientAttentionFlashAttentionOp)\n >>> # Workaround for not accepting attention shape using VAE for Flash Attention\n >>> pipe.vae.enable_xformers_memory_efficient_attention(attention_op=None)\n ```\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(True, attention_op)\n\n def disable_xformers_memory_efficient_attention(self):\n r\"\"\"\n Disable memory efficient attention from [xFormers](https://facebookresearch.github.io/xformers/).\n \"\"\"\n self.set_use_memory_efficient_attention_xformers(False)\n\n def set_use_memory_efficient_attention_xformers(\n self, valid: bool, attention_op: Optional[Callable] = None\n ) -> None:\n # Recursively walk through all the children.\n # Any children which exposes the set_use_memory_efficient_attention_xformers method\n # gets the message\n def fn_recursive_set_mem_eff(module: torch.nn.Module):\n if hasattr(module, \"set_use_memory_efficient_attention_xformers\"):\n module.set_use_memory_efficient_attention_xformers(valid, attention_op)\n\n for child in module.children():\n fn_recursive_set_mem_eff(child)\n\n module_names, _ = self._get_signature_keys(self)\n modules = [getattr(self, n, None) for n in module_names]\n modules = [m for m in modules if isinstance(m, torch.nn.Module)]\n\n for module in modules:\n fn_recursive_set_mem_eff(module)\n\n def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = \"auto\"):\n r\"\"\"\n Enable sliced attention computation. When this option is enabled, the attention module splits the input tensor\n in slices to compute attention in several steps. For more than one attention head, the computation is performed\n sequentially over each head. This is useful to save some memory in exchange for a small speed decrease.\n\n <Tip warning={true}>\n\n ⚠️ Don't enable attention slicing if you're already using `scaled_dot_product_attention` (SDPA) from PyTorch\n 2.0 or xFormers. These attention computations are already very memory efficient so you won't need to enable\n this function. If you enable attention slicing with SDPA or xFormers, it can lead to serious slow downs!\n\n </Tip>\n\n Args:\n slice_size (`str` or `int`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maximum amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n\n Examples:\n\n ```py\n >>> import torch\n >>> from diffusers import StableDiffusionPipeline\n\n >>> pipe = StableDiffusionPipeline.from_pretrained(\n ... \"runwayml/stable-diffusion-v1-5\",\n ... torch_dtype=torch.float16,\n ... use_safetensors=True,\n ... )\n\n >>> prompt = \"a photo of an astronaut riding a horse on mars\"\n >>> pipe.enable_attention_slicing()\n >>> image = pipe(prompt).images[0]\n ```\n \"\"\"\n self.set_attention_slice(slice_size)\n\n def disable_attention_slicing(self):\n r\"\"\"\n Disable sliced attention computation. If `enable_attention_slicing` was previously called, attention is\n computed in one step.\n \"\"\"\n # set slice_size = `None` to disable `attention slicing`\n self.enable_attention_slicing(None)\n\n def set_attention_slice(self, slice_size: Optional[int]):\n module_names, _ = self._get_signature_keys(self)\n modules = [getattr(self, n, None) for n in module_names]\n modules = [m for m in modules if isinstance(m, torch.nn.Module) and hasattr(m, \"set_attention_slice\")]\n\n for module in modules:\n module.set_attention_slice(slice_size)" } ]
from typing import List, Optional, Tuple, Union from ...utils import logging from ...utils.torch_utils import randn_tensor from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline import torch
20,873
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) # pylint: disable=invalid-name class DanceDiffusionPipeline(DiffusionPipeline): r""" Pipeline for audio generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet1DModel`]): A `UNet1DModel` to denoise the encoded audio. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of [`IPNDMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, num_inference_steps: int = 100, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, audio_length_in_s: Optional[float] = None, return_dict: bool = True, ) -> Union[AudioPipelineOutput, Tuple]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of audio samples to generate. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at the expense of slower inference. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`): The length of the generated audio sample in seconds. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. Example: ```py from diffusers import DiffusionPipeline from scipy.io.wavfile import write model_id = "harmonai/maestro-150k" pipe = DiffusionPipeline.from_pretrained(model_id) pipe = pipe.to("cuda") audios = pipe(audio_length_in_s=4.0).audios # To save locally for i, audio in enumerate(audios): write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose()) # To dislay in google colab import IPython.display as ipd for audio in audios: display(ipd.Audio(audio, rate=pipe.unet.sample_rate)) ``` Returns: [`~pipelines.AudioPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated audio. """ if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate sample_size = audio_length_in_s * self.unet.config.sample_rate down_scale_factor = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." ) original_sample_size = int(sample_size) if sample_size % down_scale_factor != 0: sample_size = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) sample_size = int(sample_size) dtype = next(self.unet.parameters()).dtype shape = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
# Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. logger = logging.get_logger(__name__) # pylint: disable=invalid-name class DanceDiffusionPipeline(DiffusionPipeline): r""" Pipeline for audio generation. This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods implemented for all pipelines (downloading, saving, running on a particular device, etc.). Parameters: unet ([`UNet1DModel`]): A `UNet1DModel` to denoise the encoded audio. scheduler ([`SchedulerMixin`]): A scheduler to be used in combination with `unet` to denoise the encoded audio latents. Can be one of [`IPNDMScheduler`]. """ model_cpu_offload_seq = "unet" def __init__(self, unet, scheduler): super().__init__() self.register_modules(unet=unet, scheduler=scheduler) @torch.no_grad() def __call__( self, batch_size: int = 1, num_inference_steps: int = 100, generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, audio_length_in_s: Optional[float] = None, return_dict: bool = True, ) -> Union[AudioPipelineOutput, Tuple]: r""" The call function to the pipeline for generation. Args: batch_size (`int`, *optional*, defaults to 1): The number of audio samples to generate. num_inference_steps (`int`, *optional*, defaults to 50): The number of denoising steps. More denoising steps usually lead to a higher-quality audio sample at the expense of slower inference. generator (`torch.Generator`, *optional*): A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation deterministic. audio_length_in_s (`float`, *optional*, defaults to `self.unet.config.sample_size/self.unet.config.sample_rate`): The length of the generated audio sample in seconds. return_dict (`bool`, *optional*, defaults to `True`): Whether or not to return a [`~pipelines.AudioPipelineOutput`] instead of a plain tuple. Example: ```py from diffusers import DiffusionPipeline from scipy.io.wavfile import write model_id = "harmonai/maestro-150k" pipe = DiffusionPipeline.from_pretrained(model_id) pipe = pipe.to("cuda") audios = pipe(audio_length_in_s=4.0).audios # To save locally for i, audio in enumerate(audios): write(f"maestro_test_{i}.wav", pipe.unet.sample_rate, audio.transpose()) # To dislay in google colab import IPython.display as ipd for audio in audios: display(ipd.Audio(audio, rate=pipe.unet.sample_rate)) ``` Returns: [`~pipelines.AudioPipelineOutput`] or `tuple`: If `return_dict` is `True`, [`~pipelines.AudioPipelineOutput`] is returned, otherwise a `tuple` is returned where the first element is a list with the generated audio. """ if audio_length_in_s is None: audio_length_in_s = self.unet.config.sample_size / self.unet.config.sample_rate sample_size = audio_length_in_s * self.unet.config.sample_rate down_scale_factor = 2 ** len(self.unet.up_blocks) if sample_size < 3 * down_scale_factor: raise ValueError( f"{audio_length_in_s} is too small. Make sure it's bigger or equal to" f" {3 * down_scale_factor / self.unet.config.sample_rate}." ) original_sample_size = int(sample_size) if sample_size % down_scale_factor != 0: sample_size = ( (audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1 ) * down_scale_factor logger.info( f"{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled" f" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising" " process." ) sample_size = int(sample_size) dtype = next(self.unet.parameters()).dtype shape = (batch_size, self.unet.config.in_channels, sample_size) if isinstance(generator, list) and len(generator) != batch_size: raise ValueError( f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" f" size of {batch_size}. Make sure the batch size matches the length of the generators." )
audio = randn_tensor(shape, generator=generator, device=self._execution_device, dtype=dtype)
1
2023-11-18 01:40:55+00:00
24k
wjun0830/CGDETR
cg_detr/train.py
[ { "identifier": "BaseOptions", "path": "cg_detr/config.py", "snippet": "class BaseOptions(object):\n saved_option_filename = \"opt.json\"\n ckpt_filename = \"model.ckpt\"\n tensorboard_log_dir = \"tensorboard_log\"\n train_log_filename = \"train.log.txt\"\n eval_log_filename = \"eval.log.txt\"\n\n def __init__(self):\n self.parser = None\n self.initialized = False\n self.opt = None\n\n def initialize(self):\n self.initialized = True\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--dset_name\", type=str, choices=[\"hl\", 'tvsum', 'charadesSTA', 'tacos', 'nlq','youtube_uni'])\n parser.add_argument(\"--dset_domain\", type=str, \n help=\"Domain to train for tvsum dataset. (Only used for tvsum and youtube-hl)\")\n \n parser.add_argument(\"--eval_split_name\", type=str, default=\"val\",\n help=\"should match keys in video_duration_idx_path, must set for VCMR\")\n parser.add_argument(\"--debug\", action=\"store_true\",\n help=\"debug (fast) mode, break all loops, do not load all data into memory.\")\n parser.add_argument(\"--data_ratio\", type=float, default=1.0,\n help=\"how many training and eval data to use. 1.0: use all, 0.1: use 10%.\"\n \"Use small portion for debug purposes. Note this is different from --debug, \"\n \"which works by breaking the loops, typically they are not used together.\")\n parser.add_argument(\"--results_root\", type=str, default=\"results\")\n parser.add_argument(\"--exp_id\", type=str, default=None, help=\"id of this run, required at training\")\n parser.add_argument(\"--seed\", type=int, default=2018, help=\"random seed\")\n parser.add_argument(\"--device\", type=int, default=0, help=\"0 cuda, -1 cpu\")\n parser.add_argument(\"--num_workers\", type=int, default=0,\n help=\"num subprocesses used to load the data, 0: use main process\")\n parser.add_argument(\"--no_pin_memory\", action=\"store_true\",\n help=\"Don't use pin_memory=True for dataloader. \"\n \"ref: https://discuss.pytorch.org/t/should-we-set-non-blocking-to-true/38234/4\")\n\n # training config\n parser.add_argument(\"--lr\", type=float, default=1e-4, help=\"learning rate\")\n parser.add_argument(\"--lr_drop\", type=int, default=400, help=\"drop learning rate to 1/10 every lr_drop epochs\")\n parser.add_argument(\"--wd\", type=float, default=1e-4, help=\"weight decay\")\n parser.add_argument(\"--n_epoch\", type=int, default=200, help=\"number of epochs to run\")\n parser.add_argument(\"--max_es_cnt\", type=int, default=200,\n help=\"number of epochs to early stop, use -1 to disable early stop\")\n parser.add_argument(\"--bsz\", type=int, default=32, help=\"mini-batch size\")\n parser.add_argument(\"--eval_bsz\", type=int, default=100,\n help=\"mini-batch size at inference, for query\")\n parser.add_argument(\"--eval_epoch\", type=int, default=5,\n help=\"inference epoch\")\n parser.add_argument(\"--grad_clip\", type=float, default=0.1, help=\"perform gradient clip, -1: disable\")\n parser.add_argument(\"--eval_untrained\", action=\"store_true\", help=\"Evaluate on un-trained model\")\n parser.add_argument(\"--resume\", type=str, default=None,\n help=\"checkpoint path to resume or evaluate, without --resume_all this only load weights\")\n parser.add_argument(\"--resume_all\", action=\"store_true\",\n help=\"if --resume_all, load optimizer/scheduler/epoch as well\")\n parser.add_argument(\"--start_epoch\", type=int, default=None,\n help=\"if None, will be set automatically when using --resume_all\")\n\n # Data config\n parser.add_argument(\"--max_q_l\", type=int, default=-1)\n parser.add_argument(\"--max_v_l\", type=int, default=-1)\n parser.add_argument(\"--clip_length\", type=float, default=2)\n parser.add_argument(\"--max_windows\", type=int, default=5)\n\n parser.add_argument(\"--train_path\", type=str, default=None)\n parser.add_argument(\"--eval_path\", type=str, default=None,\n help=\"Evaluating during training, for Dev set. If None, will only do training, \")\n parser.add_argument(\"--no_norm_vfeat\", action=\"store_true\", help=\"Do not do normalize video feat\")\n parser.add_argument(\"--no_norm_tfeat\", action=\"store_true\", help=\"Do not do normalize text feat\")\n parser.add_argument(\"--v_feat_dirs\", type=str, nargs=\"+\",\n help=\"video feature dirs. If more than one, will concat their features. \"\n \"Note that sub ctx features are also accepted here.\")\n parser.add_argument(\"--t_feat_dir\", type=str, help=\"text/query feature dir\")\n parser.add_argument(\"--a_feat_dir\", type=str, help=\"audio feature dir\")\n parser.add_argument(\"--v_feat_dim\", type=int, help=\"video feature dim\")\n parser.add_argument(\"--t_feat_dim\", type=int, help=\"text/query feature dim\")\n parser.add_argument(\"--a_feat_dim\", type=int, help=\"audio feature dim\")\n parser.add_argument(\"--ctx_mode\", type=str, default=\"video_tef\")\n\n # Model config\n parser.add_argument('--position_embedding', default='sine', type=str, choices=('sine', 'learned'),\n help=\"Type of positional embedding to use on top of the image features\")\n # * Transformer\n parser.add_argument('--enc_layers', default=3, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dec_layers', default=3, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--t2v_layers', default=2, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--sent_layers', default=1, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--moment_layers', default=1, type=int,\n help=\"Number of decoding layers in the transformer\")\n parser.add_argument('--dummy_layers', default=2, type=int,\n help=\"Number of encoding layers in the transformer\")\n parser.add_argument('--dim_feedforward', default=1024, type=int,\n help=\"Intermediate size of the feedforward layers in the transformer blocks\")\n parser.add_argument('--hidden_dim', default=256, type=int,\n help=\"Size of the embeddings (dimension of the transformer)\")\n parser.add_argument('--input_dropout', default=0.5, type=float,\n help=\"Dropout applied in input\")\n parser.add_argument('--dropout', default=0.1, type=float,\n help=\"Dropout applied in the transformer\")\n parser.add_argument(\"--txt_drop_ratio\", default=0, type=float,\n help=\"drop txt_drop_ratio tokens from text input. 0.1=10%\")\n parser.add_argument(\"--use_txt_pos\", action=\"store_true\", help=\"use position_embedding for text as well.\")\n parser.add_argument('--nheads', default=8, type=int,\n help=\"Number of attention heads inside the transformer's attentions\")\n parser.add_argument('--num_queries', default=10, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--num_dummies', default=45, type=int,\n help=\"Number of dummy tokens\")\n parser.add_argument('--total_prompts', default=10, type=int,\n help=\"Number of query slots\")\n parser.add_argument('--num_prompts', default=1, type=int,\n help=\"Number of dummy tokens\")\n parser.add_argument('--pre_norm', action='store_true')\n # other model configs\n parser.add_argument(\"--n_input_proj\", type=int, default=2, help=\"#layers to encoder input\")\n parser.add_argument(\"--contrastive_hdim\", type=int, default=64, help=\"dim for contrastive embeddings\")\n parser.add_argument(\"--temperature\", type=float, default=0.07, help=\"temperature nce contrastive_align_loss\")\n # Loss\n\n parser.add_argument(\"--saliency_margin\", type=float, default=0.2)\n parser.add_argument('--no_aux_loss', dest='aux_loss', action='store_false',\n help=\"Disables auxiliary decoding losses (loss at each layer)\")\n parser.add_argument(\"--span_loss_type\", default=\"l1\", type=str, choices=['l1', 'ce'],\n help=\"l1: (center-x, width) regression. ce: (st_idx, ed_idx) classification.\")\n parser.add_argument(\"--contrastive_align_loss\", action=\"store_true\",\n help=\"Disable contrastive_align_loss between matched query spans and the text.\")\n # * Matcher\n parser.add_argument('--set_cost_span', default=10, type=float,\n help=\"L1 span coefficient in the matching cost\")\n parser.add_argument('--set_cost_giou', default=1, type=float,\n help=\"giou span coefficient in the matching cost\")\n parser.add_argument('--set_cost_class', default=4, type=float,\n help=\"Class coefficient in the matching cost\")\n\n # * Loss coefficients\n parser.add_argument(\"--lw_saliency\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument(\"--lw_wattn\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument(\"--lw_ms_align\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument(\"--lw_distill\", type=float, default=1.,\n help=\"weight for saliency loss, set to 0 will ignore\")\n parser.add_argument('--span_loss_coef', default=10, type=float)\n parser.add_argument('--giou_loss_coef', default=1, type=float)\n parser.add_argument('--label_loss_coef', default=4, type=float)\n parser.add_argument('--eos_coef', default=0.1, type=float,\n help=\"Relative classification weight of the no-object class\")\n parser.add_argument(\"--contrastive_align_loss_coef\", default=0.0, type=float)\n\n parser.add_argument(\"--no_sort_results\", action=\"store_true\",\n help=\"do not sort results, use this for moment query visualization\")\n parser.add_argument(\"--max_before_nms\", type=int, default=10)\n parser.add_argument(\"--max_after_nms\", type=int, default=10)\n parser.add_argument(\"--conf_thd\", type=float, default=0.0, help=\"only keep windows with conf >= conf_thd\")\n parser.add_argument(\"--nms_thd\", type=float, default=-1,\n help=\"additionally use non-maximum suppression \"\n \"(or non-minimum suppression for distance)\"\n \"to post-processing the predictions. \"\n \"-1: do not use nms. [0, 1]\")\n self.parser = parser\n\n def display_save(self, opt):\n args = vars(opt)\n # Display settings\n print(dict_to_markdown(vars(opt), max_str_len=120))\n # Save settings\n if not isinstance(self, TestOptions):\n option_file_path = os.path.join(opt.results_dir, self.saved_option_filename) # not yaml file indeed\n save_json(args, option_file_path, save_pretty=True)\n\n def parse(self, a_feat_dir=None):\n if not self.initialized:\n self.initialize()\n opt = self.parser.parse_args()\n\n if opt.debug:\n opt.results_root = os.path.sep.join(opt.results_root.split(os.path.sep)[:-1] + [\"debug_results\", ])\n opt.num_workers = 0\n\n if isinstance(self, TestOptions):\n # modify model_dir to absolute path\n # opt.model_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), \"results\", opt.model_dir)\n opt.model_dir = os.path.dirname(opt.resume)\n if a_feat_dir is not None:\n opt.a_feat_dir = a_feat_dir\n saved_options = load_json(os.path.join(opt.model_dir, self.saved_option_filename))\n for arg in saved_options: # use saved options to overwrite all BaseOptions args.\n if arg not in [\"results_root\", \"num_workers\", \"nms_thd\", \"debug\", # \"max_before_nms\", \"max_after_nms\"\n \"max_pred_l\", \"min_pred_l\",\n \"resume\", \"resume_all\", \"no_sort_results\"]:\n setattr(opt, arg, saved_options[arg])\n # opt.no_core_driver = True\n if opt.eval_results_dir is not None:\n opt.results_dir = opt.eval_results_dir\n else:\n if opt.exp_id is None:\n raise ValueError(\"--exp_id is required for at a training option!\")\n\n ctx_str = opt.ctx_mode + \"_sub\" if any([\"sub_ctx\" in p for p in opt.v_feat_dirs]) else opt.ctx_mode\n opt.results_dir = os.path.join(opt.results_root,\n \"-\".join([opt.dset_name, ctx_str, opt.exp_id,\n str(opt.enc_layers) + str(opt.dec_layers) + str(opt.t2v_layers) + str(opt.moment_layers) + str(opt.dummy_layers) + str(opt.sent_layers),\n 'ndum_' + str(opt.num_dummies), 'nprom_' + str(opt.num_prompts) + '_' + str(opt.total_prompts)]))\n mkdirp(opt.results_dir)\n save_fns = ['cg_detr/model.py', 'cg_detr/transformer.py']\n for save_fn in save_fns:\n shutil.copyfile(save_fn, os.path.join(opt.results_dir, os.path.basename(save_fn)))\n\n # save a copy of current code\n code_dir = os.path.dirname(os.path.realpath(__file__))\n code_zip_filename = os.path.join(opt.results_dir, \"code.zip\")\n make_zipfile(code_dir, code_zip_filename,\n enclosing_dir=\"code\",\n exclude_dirs_substring=\"results\",\n exclude_dirs=[\"results\", \"debug_results\", \"__pycache__\"],\n exclude_extensions=[\".pyc\", \".ipynb\", \".swap\"], )\n\n self.display_save(opt)\n\n opt.ckpt_filepath = os.path.join(opt.results_dir, self.ckpt_filename)\n opt.train_log_filepath = os.path.join(opt.results_dir, self.train_log_filename)\n opt.eval_log_filepath = os.path.join(opt.results_dir, self.eval_log_filename)\n opt.tensorboard_log_dir = os.path.join(opt.results_dir, self.tensorboard_log_dir)\n opt.device = torch.device(\"cuda\" if opt.device >= 0 else \"cpu\")\n opt.pin_memory = not opt.no_pin_memory\n\n opt.use_tef = \"tef\" in opt.ctx_mode\n opt.use_video = \"video\" in opt.ctx_mode\n if not opt.use_video:\n opt.v_feat_dim = 0\n if opt.use_tef:\n opt.v_feat_dim += 2\n\n self.opt = opt\n return opt" }, { "identifier": "StartEndDataset", "path": "cg_detr/start_end_dataset.py", "snippet": "class StartEndDataset(Dataset):\n Q_FEAT_TYPES = [\"pooler_output\", \"last_hidden_state\"]\n \"\"\"One line in data loaded from data_path.\"\n {\n \"qid\": 7803,\n \"query\": \"Man in gray top walks from outside to inside.\",\n \"duration\": 150,\n \"vid\": \"RoripwjYFp8_360.0_510.0\",\n \"relevant_clip_ids\": [13, 14, 15, 16, 17],\n \"relevant_windows\": [[26, 36]]\n }\n \"\"\"\n\n def __init__(self, dset_name, data_path, v_feat_dirs, q_feat_dir,\n q_feat_type=\"last_hidden_state\",\n max_q_l=32, max_v_l=75, data_ratio=1.0, ctx_mode=\"video\",\n normalize_v=True, normalize_t=True, load_labels=True,\n clip_len=2, max_windows=5, span_loss_type=\"l1\", txt_drop_ratio=0,\n dset_domain=None):\n self.dset_name = dset_name\n self.data_path = data_path\n self.data_ratio = data_ratio\n self.v_feat_dirs = v_feat_dirs \\\n if isinstance(v_feat_dirs, list) else [v_feat_dirs]\n self.q_feat_dir = q_feat_dir\n self.q_feat_type = q_feat_type\n if max_v_l == -1:\n max_v_l = 100000000\n if max_q_l == -1:\n max_q_l = 100\n self.max_q_l = max_q_l\n self.max_v_l = max_v_l\n self.ctx_mode = ctx_mode\n self.use_tef = \"tef\" in ctx_mode\n self.use_video = \"video\" in ctx_mode\n self.normalize_t = normalize_t\n self.normalize_v = normalize_v\n self.load_labels = load_labels\n self.clip_len = clip_len\n self.max_windows = max_windows # maximum number of windows to use as labels\n self.span_loss_type = span_loss_type\n self.txt_drop_ratio = txt_drop_ratio\n if \"val\" in data_path or \"test\" in data_path:\n assert txt_drop_ratio == 0\n\n\n # checks\n assert q_feat_type in self.Q_FEAT_TYPES\n\n # data\n self.data = self.load_data()\n \n # load specific domain data for tvsum dataset\n if self.dset_name in ['tvsum', 'tvsum_sfc']:\n target_domain = dset_domain\n assert target_domain in [\"BK\", \"BT\", \"DS\", \"FM\", \"GA\", \"MS\", \"PK\", \"PR\", \"VT\", \"VU\"]\n\n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data\n \n # load specific domain data for youtube-hl dataset\n if self.dset_name == 'youtube_uni':\n target_domain = dset_domain\n assert target_domain in [\"dog\", \"gymnastics\", \"parkour\", \"skating\", \"skiing\", \"surfing\"]\n \n new_data = []\n for d in self.data:\n if target_domain == d['domain']:\n new_data.append(d)\n self.data = new_data \n \n self.use_glove = False\n self.use_glove = 'vgg' in self.v_feat_dirs[0]\n\n if self.dset_name == 'charadesSTA' and self.use_glove:\n self.vocab = vocab.pretrained_aliases['glove.6B.300d']()\n self.vocab.itos.extend(['<unk>'])\n self.vocab.stoi['<unk>'] = self.vocab.vectors.shape[0]\n self.vocab.vectors = torch.cat(\n (self.vocab.vectors, torch.zeros(1, self.vocab.dim)), dim=0)\n self.embedding = nn.Embedding.from_pretrained(self.vocab.vectors)\n \n\n def load_data(self):\n datalist = load_jsonl(self.data_path)\n if self.data_ratio != 1:\n n_examples = int(len(datalist) * self.data_ratio)\n datalist = datalist[:n_examples]\n logger.info(\"Using {}% of the data: {} examples\"\n .format(self.data_ratio * 100, n_examples))\n return datalist\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, index):\n meta = self.data[index]\n\n model_inputs = dict()\n\n if self.use_glove:\n model_inputs[\"query_feat\"] = self.get_query(meta[\"query\"])\n else:\n model_inputs[\"query_feat\"] = self._get_query_feat_by_qid(meta[\"qid\"]) # (Dq, ) or (Lq, Dq)\n \n if self.use_video:\n model_inputs[\"video_feat\"] = self._get_video_feat_by_vid(meta[\"vid\"]) # (Lv, Dv)\n ctx_l = len(model_inputs[\"video_feat\"])\n else:\n ctx_l = self.max_v_l\n\n\n if self.use_tef:\n tef_st = torch.arange(0, ctx_l, 1.0) / ctx_l\n tef_ed = tef_st + 1.0 / ctx_l\n tef = torch.stack([tef_st, tef_ed], dim=1) # (Lv, 2)\n if self.use_video:\n model_inputs[\"video_feat\"] = torch.cat(\n [model_inputs[\"video_feat\"], tef], dim=1) # (Lv, Dv+2)\n else:\n model_inputs[\"video_feat\"] = tef\n\n\n if self.dset_name in ['tvsum']:\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_tvsum(meta_label, ctx_l)\n if len(model_inputs[\"saliency_all_labels\"]) != len(model_inputs[\"video_feat\"]):\n model_inputs[\"video_feat\"] = model_inputs[\"video_feat\"][:len(model_inputs[\"saliency_all_labels\"])]\n\n elif self.dset_name == 'youtube_uni':\n model_inputs[\"span_labels\"] = torch.tensor([[0., 0.]])\n meta_label = meta['label']\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all_youtube(meta_label, ctx_l)\n else:\n if \"relevant_windows\" in meta: ## For Qvhighlights test set\n model_inputs[\"span_labels\"] = self.get_span_labels(meta[\"relevant_windows\"], ctx_l) # (#windows, 2)\n if self.dset_name in ['charadesSTA', 'tacos', 'activitynet']: ## charades, tacos, nlq\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n elif self.dset_name in ['nlq']:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l, 2) # only one gt\n elif \"subs_train\" not in self.data_path:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\"saliency_all_labels\"] = \\\n self.get_saliency_labels_all(meta[\"relevant_clip_ids\"], meta[\"saliency_scores\"], ctx_l)\n else:\n model_inputs[\"saliency_pos_labels\"], model_inputs[\"saliency_neg_labels\"], model_inputs[\n \"saliency_all_labels\"] = \\\n self.get_saliency_labels_sub_as_query(meta[\"relevant_windows\"][0], meta[\"duration\"], ctx_l) # only one gt\n\n if 'qvhighlight' in self.data_path:\n model_inputs[\"relevant_clip_ids\"] = meta[\"relevant_clip_ids\"]\n model_inputs[\"vid\"] = meta[\"vid\"]\n model_inputs[\"qid\"] = meta[\"qid\"]\n return dict(meta=meta, model_inputs=model_inputs)\n\n def get_query(self, query):\n word_inds = torch.LongTensor(\n [self.vocab.stoi.get(w.lower(), 400000) for w in query.split()])\n return self.embedding(word_inds)\n\n def get_saliency_labels_sub_as_query(self, gt_window, duration, ctx_l, max_n=2):\n clip_len = duration / ctx_l\n gt_st = int(gt_window[0] / clip_len)\n gt_ed = max(0, min(int(gt_window[1] / clip_len), ctx_l) - 1)\n if gt_st > gt_ed:\n gt_st = gt_ed\n\n if gt_st != gt_ed:\n pos_clip_indices = random.sample(range(gt_st, gt_ed + 1), k=max_n)\n else:\n if self.dset_name == 'nlq':\n pos_clip_indices = [gt_st] * 2\n else:\n pos_clip_indices = [gt_st, gt_st]\n\n neg_pool = list(range(0, gt_st)) + list(range(gt_ed+1, ctx_l))\n try:\n neg_clip_indices = random.sample(neg_pool, k=max_n)\n except:\n neg_clip_indices = pos_clip_indices\n\n # For charades_sta\n score_array = np.zeros(ctx_l)\n score_array[gt_st:gt_ed + 1] = 1\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n\n def get_saliency_labels(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices\n\n def get_saliency_labels_all(self, rel_clip_ids, scores, ctx_l, max_n=1, add_easy_negative=True):\n \"\"\"Sum the scores from the three annotations, then take the two clips with the\n maximum scores as positive, and two with the minimum scores as negative.\n Args:\n rel_clip_ids: list(int), list of relevant clip ids\n scores: list([anno1_score, anno2_score, anno3_score]),\n ctx_l: int\n max_n: int, #clips to use as positive and negative, for easy and hard negative, respectively.\n add_easy_negative: bool, if True, sample eay negative outside the relevant_clip_ids.\n \"\"\"\n # indices inside rel_clip_ids\n scores = np.array(scores) # (#rel_clips, 3)\n agg_scores = np.sum(scores, 1) # (#rel_clips, )\n sort_indices = np.argsort(agg_scores) # increasing\n\n # score_array = [min(agg_scores[idx], ctx_l-1) for idx in range(ctx_l)]\n score_array = np.zeros(ctx_l)\n for idx in range(len(rel_clip_ids)):\n if rel_clip_ids[idx] >= ctx_l:\n score_array_new = np.zeros(ctx_l + 1)\n score_array_new[:ctx_l] = score_array\n score_array = score_array_new\n score_array[rel_clip_ids[idx]] = agg_scores[idx]\n\n # indices in the whole video\n # the min(_, ctx_l-1) here is incorrect, but should not cause\n # much troubles since this should be rarely used.\n hard_pos_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(rel_clip_ids[idx], ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)) - set(rel_clip_ids))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_tvsum(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n agg_scores = np.sum(labels - np.ones_like(labels), axis=-1)[:ctx_l] # start from 1, so minus 1\n score_array = agg_scores / 80 * 12\n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n\n def get_saliency_labels_all_youtube(self, labels, ctx_l, max_n=1, add_easy_negative=False):\n \n # Youtube-hl only have binary score\n agg_scores = np.array(labels)[:, 0] # (L, 1) --> (L, )\n score_array = agg_scores * 1\n \n sort_indices = np.argsort(agg_scores) # increasing\n\n hard_pos_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[-max_n:]]\n hard_neg_clip_indices = [min(idx, ctx_l-1) for idx in sort_indices[:max_n]]\n easy_pos_clip_indices = []\n easy_neg_clip_indices = []\n if add_easy_negative:\n easy_neg_pool = list(set(range(ctx_l)))\n if len(easy_neg_pool) >= max_n:\n easy_pos_clip_indices = random.sample(rel_clip_ids, k=max_n)\n easy_neg_clip_indices = random.sample(easy_neg_pool, k=max_n)\n else: # copy the hard ones\n easy_pos_clip_indices = hard_pos_clip_indices\n easy_neg_clip_indices = hard_neg_clip_indices\n\n pos_clip_indices = hard_pos_clip_indices + easy_pos_clip_indices\n neg_clip_indices = hard_neg_clip_indices + easy_neg_clip_indices\n\n return pos_clip_indices, neg_clip_indices, score_array\n \n \n def get_span_labels(self, windows, ctx_l):\n \"\"\"\n windows: list([st, ed]) in seconds. E.g. [[26, 36]], corresponding st_ed clip_indices [[13, 17]] (inclusive)\n Note a maximum of `self.max_windows` windows are used.\n returns Tensor of shape (#windows, 2), each row is [center, width] normalized by video length\n \"\"\"\n if len(windows) > self.max_windows:\n random.shuffle(windows)\n windows = windows[:self.max_windows]\n if self.span_loss_type == \"l1\":\n windows = torch.Tensor(windows) / (ctx_l * self.clip_len) # normalized windows in xx\n windows = span_xx_to_cxw(windows) # normalized windows in cxw\n elif self.span_loss_type == \"ce\":\n windows = torch.Tensor([\n [int(w[0] / self.clip_len), min(int(w[1] / self.clip_len), ctx_l) - 1]\n for w in windows]).long() # inclusive\n else:\n raise NotImplementedError\n return windows\n\n def _get_query_feat_by_qid(self, qid):\n if self.dset_name == 'tvsum':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid))) # 'token', 'text'\n return torch.from_numpy(q_feat['token'])\n # youtube-hl\n elif self.dset_name == 'youtube_uni':\n q_feat = np.load(join(self.q_feat_dir, \"{}.npz\".format(qid)))\n return torch.from_numpy(q_feat['last_hidden_state'])\n \n elif self.dset_name in ['tacos', 'nlq']:\n q_feat_path = join(self.q_feat_dir, f\"{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n else:\n # QVhighlight dataset\n q_feat_path = join(self.q_feat_dir, f\"qid{qid}.npz\")\n q_feat = np.load(q_feat_path)[self.q_feat_type].astype(np.float32)\n if self.q_feat_type == \"last_hidden_state\":\n q_feat = q_feat[:self.max_q_l]\n if self.normalize_t:\n q_feat = l2_normalize_np_array(q_feat)\n if self.txt_drop_ratio > 0:\n q_feat = self.random_drop_rows(q_feat)\n return torch.from_numpy(q_feat) # (D, ) or (Lq, D)\n\n def random_drop_rows(self, embeddings):\n \"\"\"randomly mask num_drop rows in embeddings to be zero.\n Args:\n embeddings: np.ndarray (L, D)\n \"\"\"\n num_drop_rows = round(len(embeddings) * self.txt_drop_ratio)\n if num_drop_rows > 0:\n row_indices = np.random.choice(\n len(embeddings), size=num_drop_rows, replace=False)\n embeddings[row_indices] = 0\n return embeddings\n\n def _get_video_feat_by_vid(self, vid):\n if self.dset_name == 'tvsum':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n _feat_path = join(_feat_dir, f\"{vid}_rgb.npy\")\n _feat_rgb = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n\n _feat_path = join(_feat_dir, f\"{vid}_opt.npy\")\n _feat_opt = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n _feat = np.concatenate([_feat_rgb, _feat_opt], axis=-1)\n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n elif self.dset_name == 'youtube_uni':\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n # Only single npz files per directory\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n \n # _feat = _feat_rgb\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list] # TODO do we need to cut the length over the min_len?\n v_feat = np.concatenate(v_feat_list, axis=1)\n\n else:\n v_feat_list = []\n for _feat_dir in self.v_feat_dirs:\n try:\n _feat_path = join(_feat_dir, f\"{vid}.npz\")\n _feat = np.load(_feat_path)[\"features\"][:self.max_v_l].astype(np.float32)\n except:\n _feat_path = join(_feat_dir, f\"{vid}.npy\")\n _feat = np.load(_feat_path)[:self.max_v_l].astype(np.float32)\n if self.normalize_v:\n _feat = l2_normalize_np_array(_feat)\n v_feat_list.append(_feat)\n # some features are slightly longer than the others\n min_len = min([len(e) for e in v_feat_list])\n v_feat_list = [e[:min_len] for e in v_feat_list]\n v_feat = np.concatenate(v_feat_list, axis=1)\n return torch.from_numpy(v_feat) # (Lv, D)" }, { "identifier": "start_end_collate", "path": "cg_detr/start_end_dataset.py", "snippet": "def start_end_collate(batch):\n batch_meta = [e[\"meta\"] for e in batch] # seems no need to collate ?\n\n model_inputs_keys = batch[0][\"model_inputs\"].keys()\n batched_data = dict()\n for k in model_inputs_keys:\n if k == \"span_labels\":\n batched_data[k] = [dict(spans=e[\"model_inputs\"][\"span_labels\"]) for e in batch]\n continue\n if k in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n batched_data[k] = torch.LongTensor([e[\"model_inputs\"][k] for e in batch])\n continue\n if k == \"saliency_all_labels\":\n pad_data, mask_data = pad_sequences_1d([e[\"model_inputs\"][k] for e in batch], dtype=np.float32, fixed_length=None)\n batched_data[k] = torch.tensor(pad_data, dtype=torch.float32)\n continue\n if k == 'qid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n if k == 'vid':\n batched_data[k] = [e[\"model_inputs\"][k] for e in batch]\n continue\n batched_data[k] = pad_sequences_1d(\n [e[\"model_inputs\"][k] for e in batch], dtype=torch.float32, fixed_length=None)\n return batch_meta, batched_data" }, { "identifier": "prepare_batch_inputs", "path": "cg_detr/start_end_dataset.py", "snippet": "def prepare_batch_inputs(batched_model_inputs, device, non_blocking=False):\n model_inputs = dict(\n src_txt=batched_model_inputs[\"query_feat\"][0].to(device, non_blocking=non_blocking),\n src_txt_mask=batched_model_inputs[\"query_feat\"][1].to(device, non_blocking=non_blocking),\n src_vid=batched_model_inputs[\"video_feat\"][0].to(device, non_blocking=non_blocking),\n src_vid_mask=batched_model_inputs[\"video_feat\"][1].to(device, non_blocking=non_blocking),\n vid=batched_model_inputs[\"vid\"],\n qid=batched_model_inputs[\"qid\"],\n )\n targets = {}\n\n if \"span_labels\" in batched_model_inputs:\n targets[\"span_labels\"] = [\n dict(spans=e[\"spans\"].to(device, non_blocking=non_blocking))\n for e in batched_model_inputs[\"span_labels\"]\n ]\n if \"saliency_pos_labels\" in batched_model_inputs:\n for name in [\"saliency_pos_labels\", \"saliency_neg_labels\"]:\n targets[name] = batched_model_inputs[name].to(device, non_blocking=non_blocking)\n\n if \"saliency_all_labels\" in batched_model_inputs:\n targets[\"saliency_all_labels\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets[\"relevant_clips\"] = batched_model_inputs[\"saliency_all_labels\"].to(device, non_blocking=non_blocking)\n targets = None if len(targets) == 0 else targets\n return model_inputs, targets" }, { "identifier": "eval_epoch", "path": "cg_detr/inference.py", "snippet": "def eval_epoch(model, eval_dataset, opt, save_submission_filename, epoch_i=None, criterion=None, tb_writer=None):\n logger.info(\"Generate submissions\")\n model.eval()\n if criterion is not None and eval_dataset.load_labels:\n criterion.eval()\n else:\n criterion = None\n\n if opt.dset_name == 'tacos':\n shuffle = True\n else:\n shuffle = False\n\n eval_loader = DataLoader(\n eval_dataset,\n collate_fn=start_end_collate,\n batch_size=opt.eval_bsz,\n num_workers=opt.num_workers,\n shuffle=shuffle,\n pin_memory=opt.pin_memory\n )\n\n\n # tvsum \n if opt.dset_name in ['tvsum', 'youtube_uni']:\n metrics, eval_loss_meters = compute_hl_results(model, eval_loader, opt, epoch_i, criterion, tb_writer)\n \n # to match original save format\n submission = [\n {\"brief\": metrics}\n ]\n submission_path = os.path.join(opt.results_dir, \"latest_metric.jsonl\")\n save_jsonl(submission, submission_path)\n\n return submission[0], submission[0], eval_loss_meters, [submission_path]\n\n else:\n submission, eval_loss_meters = get_eval_res(model, eval_loader, opt, epoch_i, criterion, tb_writer)\n\n if opt.dset_name in ['charadesSTA', 'tacos', 'nlq']:\n new_submission = []\n for s in submission:\n s.pop('pred_saliency_scores', None)\n new_submission.append(s)\n submission = new_submission\n\n if opt.no_sort_results:\n save_submission_filename = save_submission_filename.replace(\".jsonl\", \"_unsorted.jsonl\")\n metrics, metrics_nms, latest_file_paths = eval_epoch_post_processing(\n submission, opt, eval_dataset.data, save_submission_filename)\n return metrics, metrics_nms, eval_loss_meters, latest_file_paths" }, { "identifier": "start_inference", "path": "cg_detr/inference.py", "snippet": "def start_inference(train_opt=None, split=None, splitfile=None):\n if train_opt is not None:\n opt = TestOptions().parse(train_opt.a_feat_dir)\n else:\n opt = TestOptions().parse()\n if split is not None:\n opt.eval_split_name = split\n if splitfile is not None:\n opt.eval_path = splitfile\n\n print(opt.eval_split_name)\n print(opt.eval_path)\n logger.info(\"Setup config, data and model...\")\n\n\n cudnn.benchmark = True\n cudnn.deterministic = False\n\n assert opt.eval_path is not None\n if opt.eval_split_name == 'val':\n loadlabel = True\n else:\n loadlabel = False\n\n eval_dataset = StartEndDataset(\n dset_name=opt.dset_name,\n data_path=opt.eval_path,\n v_feat_dirs=opt.v_feat_dirs,\n q_feat_dir=opt.t_feat_dir,\n q_feat_type=\"last_hidden_state\",\n max_q_l=opt.max_q_l,\n max_v_l=opt.max_v_l,\n ctx_mode=opt.ctx_mode,\n data_ratio=opt.data_ratio,\n normalize_v=not opt.no_norm_vfeat,\n normalize_t=not opt.no_norm_tfeat,\n clip_len=opt.clip_length,\n max_windows=opt.max_windows,\n load_labels=loadlabel, # opt.eval_split_name == \"val\",\n span_loss_type=opt.span_loss_type,\n txt_drop_ratio=0,\n dset_domain=opt.dset_domain,\n )\n\n\n\n model, criterion, _, _ = setup_model(opt)\n\n save_submission_filename = \"hl_{}_submission.jsonl\".format(\n opt.eval_split_name)\n # save_submission_filename = \"inference_{}_{}_{}_preds.jsonl\".format(\n # opt.dset_name, opt.eval_split_name, opt.eval_id)\n logger.info(\"Starting inference...\")\n with torch.no_grad():\n metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \\\n eval_epoch(model, eval_dataset, opt, save_submission_filename, criterion=criterion)\n if opt.eval_split_name == 'val':\n logger.info(\"metrics_no_nms {}\".format(pprint.pformat(metrics_no_nms[\"brief\"], indent=4)))\n if metrics_nms is not None:\n logger.info(\"metrics_nms {}\".format(pprint.pformat(metrics_nms[\"brief\"], indent=4)))" }, { "identifier": "setup_model", "path": "cg_detr/inference.py", "snippet": "def setup_model(opt):\n \"\"\"setup model/optimizer/scheduler and load checkpoints when needed\"\"\"\n logger.info(\"setup model/optimizer/scheduler\")\n model, criterion = build_model(opt)\n if opt.device.type == \"cuda\":\n logger.info(\"CUDA enabled.\")\n model.to(opt.device)\n criterion.to(opt.device)\n\n param_dicts = [{\"params\": [p for n, p in model.named_parameters() if p.requires_grad]}]\n optimizer = torch.optim.AdamW(param_dicts, lr=opt.lr, weight_decay=opt.wd)\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, opt.lr_drop)\n\n if opt.resume is not None:\n logger.info(f\"Load checkpoint from {opt.resume}\")\n checkpoint = torch.load(opt.resume, map_location=\"cpu\")\n from collections import OrderedDict\n new_state_dict = OrderedDict()\n if 'pt' in opt.resume[:-4]:\n if 'asr' in opt.resume[:25]:\n model.load_state_dict(checkpoint[\"model\"])\n else:\n for k, v in checkpoint[\"model\"].items():\n name = k[7:] # remove `module.`\n new_state_dict[name] = v\n # model.load_state_dict(checkpoint[\"model\"])\n model.load_state_dict(new_state_dict)\n else:\n model.load_state_dict(checkpoint[\"model\"])\n if opt.resume_all:\n optimizer.load_state_dict(checkpoint['optimizer'])\n lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])\n opt.start_epoch = checkpoint['epoch'] + 1\n logger.info(f\"Loaded model saved at epoch {checkpoint['epoch']} from checkpoint: {opt.resume}\")\n else:\n logger.warning(\"If you intend to evaluate the model, please specify --resume with ckpt path\")\n\n return model, criterion, optimizer, lr_scheduler" }, { "identifier": "AverageMeter", "path": "utils/basic_utils.py", "snippet": "class AverageMeter(object):\n \"\"\"Computes and stores the average and current/max/min value\"\"\"\n def __init__(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n self.max = -1e10\n self.min = 1e10\n\n def update(self, val, n=1):\n self.max = max(val, self.max)\n self.min = min(val, self.min)\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count" }, { "identifier": "dict_to_markdown", "path": "utils/basic_utils.py", "snippet": "def dict_to_markdown(d, max_str_len=120):\n # convert list into its str representation\n d = {k: v.__repr__() if isinstance(v, list) else v for k, v in d.items()}\n # truncate string that is longer than max_str_len\n if max_str_len is not None:\n d = {k: v[-max_str_len:] if isinstance(v, str) else v for k, v in d.items()}\n return pd.DataFrame(d, index=[0]).transpose().to_markdown()" }, { "identifier": "count_parameters", "path": "utils/model_utils.py", "snippet": "def count_parameters(model, verbose=True):\n \"\"\"Count number of parameters in PyTorch model,\n References: https://discuss.pytorch.org/t/how-do-i-check-the-number-of-parameters-of-a-model/4325/7.\n\n from utils.utils import count_parameters\n count_parameters(model)\n import sys\n sys.exit(1)\n \"\"\"\n n_all = sum(p.numel() for p in model.parameters())\n n_trainable = sum(p.numel() for p in model.parameters() if p.requires_grad)\n if verbose:\n print(\"Parameter Count: all {:,d}; trainable {:,d}\".format(n_all, n_trainable))\n return n_all, n_trainable" } ]
import os import time import json import pprint import random import numpy as np import torch import torch.nn as nn import torch.backends.cudnn as cudnn import logging import sys from tqdm import tqdm, trange from collections import defaultdict from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter from cg_detr.config import BaseOptions from cg_detr.start_end_dataset import \ StartEndDataset, start_end_collate, prepare_batch_inputs from cg_detr.inference import eval_epoch, start_inference, setup_model from utils.basic_utils import AverageMeter, dict_to_markdown from utils.model_utils import count_parameters
14,870
es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def start_training(): logger.info("Setup config, data and model...") opt = BaseOptions().parse() set_seed(opt.seed) if opt.debug: # keep the model run deterministically # 'cudnn.benchmark = True' enabled auto finding the best algorithm for a specific input/net config. # Enable this only when input size is fixed. cudnn.benchmark = False cudnn.deterministic = True dataset_config = dict( dset_name=opt.dset_name, data_path=opt.train_path, v_feat_dirs=opt.v_feat_dirs, q_feat_dir=opt.t_feat_dir, q_feat_type="last_hidden_state", max_q_l=opt.max_q_l, max_v_l=opt.max_v_l, ctx_mode=opt.ctx_mode, data_ratio=opt.data_ratio, normalize_v=not opt.no_norm_vfeat, normalize_t=not opt.no_norm_tfeat, clip_len=opt.clip_length, max_windows=opt.max_windows, span_loss_type=opt.span_loss_type, txt_drop_ratio=opt.txt_drop_ratio, dset_domain=opt.dset_domain, ) dataset_config["data_path"] = opt.train_path train_dataset = StartEndDataset(**dataset_config) if opt.eval_path is not None: dataset_config["data_path"] = opt.eval_path dataset_config["txt_drop_ratio"] = 0 dataset_config["q_feat_dir"] = opt.t_feat_dir.replace("sub_features", "text_features") # for pretraining # dataset_config["load_labels"] = False # uncomment to calculate eval loss eval_dataset = StartEndDataset(**dataset_config) else: eval_dataset = None model, criterion, optimizer, lr_scheduler = setup_model(opt) logger.info(f"Model {model}") count_parameters(model) logger.info("Start Training...") # For tvsum dataset, use train_hl function if opt.dset_name in ['tvsum', 'youtube_uni']: train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt) else: train(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt) return opt.ckpt_filepath.replace(".ckpt", "_best.ckpt"), opt.eval_split_name, opt.eval_path, opt.debug, opt if __name__ == '__main__': best_ckpt_path, eval_split_name, eval_path, debug, opt = start_training() if not debug: input_args = ["--resume", best_ckpt_path, "--eval_split_name", eval_split_name, "--eval_path", eval_path] sys.argv[1:] = input_args logger.info("\n\n\nFINISHED TRAINING!!!") logger.info("Evaluating model at {}".format(best_ckpt_path)) logger.info("Input args {}".format(sys.argv[1:]))
logger = logging.getLogger(__name__) logging.basicConfig(format="%(asctime)s.%(msecs)03d:%(levelname)s:%(name)s - %(message)s", datefmt="%Y-%m-%d %H:%M:%S", level=logging.INFO) def set_seed(seed, use_cuda=True): random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) if use_cuda: torch.cuda.manual_seed_all(seed) def train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer): logger.info(f"[Epoch {epoch_i+1}]") model.train() criterion.train() # init meters time_meters = defaultdict(AverageMeter) loss_meters = defaultdict(AverageMeter) num_training_examples = len(train_loader) timer_dataloading = time.time() for batch_idx, batch in tqdm(enumerate(train_loader), desc="Training Iteration", total=num_training_examples): time_meters["dataloading_time"].update(time.time() - timer_dataloading) timer_start = time.time() model_inputs, targets = prepare_batch_inputs(batch[1], opt.device, non_blocking=opt.pin_memory) time_meters["prepare_inputs_time"].update(time.time() - timer_start) timer_start = time.time() outputs = model(**model_inputs, targets=targets) loss_dict = criterion(outputs, targets) weight_dict = criterion.weight_dict losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) time_meters["model_forward_time"].update(time.time() - timer_start) timer_start = time.time() optimizer.zero_grad() losses.backward() if opt.grad_clip > 0: nn.utils.clip_grad_norm_(model.parameters(), opt.grad_clip) optimizer.step() time_meters["model_backward_time"].update(time.time() - timer_start) loss_dict["loss_overall"] = float(losses) # for logging only for k, v in loss_dict.items(): loss_meters[k].update(float(v) * weight_dict[k] if k in weight_dict else float(v)) timer_dataloading = time.time() if opt.debug and batch_idx == 3: break # print/add logs tb_writer.add_scalar("Train/lr", float(optimizer.param_groups[0]["lr"]), epoch_i+1) for k, v in loss_meters.items(): tb_writer.add_scalar("Train/{}".format(k), v.avg, epoch_i+1) to_write = opt.train_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i+1, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in loss_meters.items()])) with open(opt.train_log_filepath, "a") as f: f.write(to_write) logger.info("Epoch time stats:") for name, meter in time_meters.items(): d = {k: f"{getattr(meter, k):.4f}" for k in ["max", "min", "avg"]} logger.info(f"{name} ==> {d}") def train(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = opt.eval_epoch if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) if opt.dset_name in ['hl']: stop_score = metrics["brief"]["MR-full-mAP"] else: stop_score = (metrics["brief"]["[email protected]"] + metrics["brief"]["[email protected]"]) / 2 if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) # save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain # if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies # checkpoint = { # "model": model.state_dict(), # "optimizer": optimizer.state_dict(), # "epoch": epoch_i, # "opt": opt # } # torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, val_dataset, opt): if opt.device.type == "cuda": logger.info("CUDA enabled.") model.to(opt.device) tb_writer = SummaryWriter(opt.tensorboard_log_dir) tb_writer.add_text("hyperparameters", dict_to_markdown(vars(opt), max_str_len=None)) opt.train_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str}\n" opt.eval_log_txt_formatter = "{time_str} [Epoch] {epoch:03d} [Loss] {loss_str} [Metrics] {eval_metrics_str}\n" train_loader = DataLoader( train_dataset, collate_fn=start_end_collate, batch_size=opt.bsz, num_workers=opt.num_workers, shuffle=True, pin_memory=opt.pin_memory ) prev_best_score = 0. es_cnt = 0 # start_epoch = 0 if opt.start_epoch is None: start_epoch = -1 if opt.eval_untrained else 0 else: start_epoch = opt.start_epoch save_submission_filename = "latest_{}_{}_preds.jsonl".format(opt.dset_name, opt.eval_split_name) for epoch_i in trange(start_epoch, opt.n_epoch, desc="Epoch"): if epoch_i > -1: train_epoch(model, criterion, train_loader, optimizer, opt, epoch_i, tb_writer) lr_scheduler.step() eval_epoch_interval = 5 if opt.eval_path is not None and (epoch_i + 1) % eval_epoch_interval == 0: with torch.no_grad(): metrics_no_nms, metrics_nms, eval_loss_meters, latest_file_paths = \ eval_epoch(model, val_dataset, opt, save_submission_filename, epoch_i, criterion, tb_writer) # log to_write = opt.eval_log_txt_formatter.format( time_str=time.strftime("%Y_%m_%d_%H_%M_%S"), epoch=epoch_i, loss_str=" ".join(["{} {:.4f}".format(k, v.avg) for k, v in eval_loss_meters.items()]), eval_metrics_str=json.dumps(metrics_no_nms)) with open(opt.eval_log_filepath, "a") as f: f.write(to_write) logger.info("metrics_no_nms {}".format(pprint.pformat(metrics_no_nms["brief"], indent=4))) if metrics_nms is not None: logger.info("metrics_nms {}".format(pprint.pformat(metrics_nms["brief"], indent=4))) metrics = metrics_no_nms for k, v in metrics["brief"].items(): tb_writer.add_scalar(f"Eval/{k}", float(v), epoch_i+1) # stop_score = metrics["brief"]["MR-full-mAP"] stop_score = metrics["brief"]["mAP"] if stop_score > prev_best_score: es_cnt = 0 prev_best_score = stop_score checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_best.ckpt")) best_file_paths = [e.replace("latest", "best") for e in latest_file_paths] for src, tgt in zip(latest_file_paths, best_file_paths): os.renames(src, tgt) logger.info("The checkpoint file has been updated.") else: es_cnt += 1 if opt.max_es_cnt != -1 and es_cnt > opt.max_es_cnt: # early stop with open(opt.train_log_filepath, "a") as f: f.write(f"Early Stop at epoch {epoch_i}") logger.info(f"\n>>>>> Early stop at epoch {epoch_i} {prev_best_score}\n") break # save ckpt checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "lr_scheduler": lr_scheduler.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", "_latest.ckpt")) save_interval = 10 if "subs_train" in opt.train_path else 50 # smaller for pretrain if (epoch_i + 1) % save_interval == 0 or (epoch_i + 1) % opt.lr_drop == 0: # additional copies checkpoint = { "model": model.state_dict(), "optimizer": optimizer.state_dict(), "epoch": epoch_i, "opt": opt } torch.save(checkpoint, opt.ckpt_filepath.replace(".ckpt", f"_e{epoch_i:04d}.ckpt")) if opt.debug: break tb_writer.close() def start_training(): logger.info("Setup config, data and model...") opt = BaseOptions().parse() set_seed(opt.seed) if opt.debug: # keep the model run deterministically # 'cudnn.benchmark = True' enabled auto finding the best algorithm for a specific input/net config. # Enable this only when input size is fixed. cudnn.benchmark = False cudnn.deterministic = True dataset_config = dict( dset_name=opt.dset_name, data_path=opt.train_path, v_feat_dirs=opt.v_feat_dirs, q_feat_dir=opt.t_feat_dir, q_feat_type="last_hidden_state", max_q_l=opt.max_q_l, max_v_l=opt.max_v_l, ctx_mode=opt.ctx_mode, data_ratio=opt.data_ratio, normalize_v=not opt.no_norm_vfeat, normalize_t=not opt.no_norm_tfeat, clip_len=opt.clip_length, max_windows=opt.max_windows, span_loss_type=opt.span_loss_type, txt_drop_ratio=opt.txt_drop_ratio, dset_domain=opt.dset_domain, ) dataset_config["data_path"] = opt.train_path train_dataset = StartEndDataset(**dataset_config) if opt.eval_path is not None: dataset_config["data_path"] = opt.eval_path dataset_config["txt_drop_ratio"] = 0 dataset_config["q_feat_dir"] = opt.t_feat_dir.replace("sub_features", "text_features") # for pretraining # dataset_config["load_labels"] = False # uncomment to calculate eval loss eval_dataset = StartEndDataset(**dataset_config) else: eval_dataset = None model, criterion, optimizer, lr_scheduler = setup_model(opt) logger.info(f"Model {model}") count_parameters(model) logger.info("Start Training...") # For tvsum dataset, use train_hl function if opt.dset_name in ['tvsum', 'youtube_uni']: train_hl(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt) else: train(model, criterion, optimizer, lr_scheduler, train_dataset, eval_dataset, opt) return opt.ckpt_filepath.replace(".ckpt", "_best.ckpt"), opt.eval_split_name, opt.eval_path, opt.debug, opt if __name__ == '__main__': best_ckpt_path, eval_split_name, eval_path, debug, opt = start_training() if not debug: input_args = ["--resume", best_ckpt_path, "--eval_split_name", eval_split_name, "--eval_path", eval_path] sys.argv[1:] = input_args logger.info("\n\n\nFINISHED TRAINING!!!") logger.info("Evaluating model at {}".format(best_ckpt_path)) logger.info("Input args {}".format(sys.argv[1:]))
start_inference(opt)
5
2023-11-10 12:45:25+00:00
24k
ej0cl6/TextEE
TextEE/models/OneIE/E2Etrainer.py
[ { "identifier": "BasicTrainer", "path": "TextEE/models/trainer.py", "snippet": "class BasicTrainer(object):\n def __init__(self, config, type_set=None):\n self.config = config\n self.type_set = type_set\n \n @classmethod\n def add_extra_info_fn(cls, instances, raw_data, config):\n for instance in instances:\n instance[\"extra_info\"] = None\n return instances\n \n def load_model(self, checkpoint=None):\n pass\n \n def train(self, train_data, dev_data, **kwargs):\n pass\n \n def predict(self, data, **kwargs):\n pass" }, { "identifier": "OneIEE2EModel", "path": "TextEE/models/OneIE/E2Emodel.py", "snippet": "class OneIEE2EModel(nn.Module):\n def __init__(self,\n config,\n vocabs,\n valid_patterns=None):\n super().__init__()\n\n # vocabularies\n self.vocabs = vocabs\n self.entity_label_stoi = vocabs['entity_label']\n self.trigger_label_stoi = vocabs['trigger_label']\n self.mention_type_stoi = vocabs['mention_type']\n self.entity_type_stoi = vocabs['entity_type']\n self.event_type_stoi = vocabs['event_type']\n self.relation_type_stoi = vocabs['relation_type']\n self.role_type_stoi = vocabs['role_type']\n self.entity_label_itos = {i:s for s, i in self.entity_label_stoi.items()}\n self.trigger_label_itos = {i:s for s, i in self.trigger_label_stoi.items()}\n self.entity_type_itos = {i: s for s, i in self.entity_type_stoi.items()}\n self.event_type_itos = {i: s for s, i in self.event_type_stoi.items()}\n self.relation_type_itos = {i: s for s, i in self.relation_type_stoi.items()}\n self.role_type_itos = {i: s for s, i in self.role_type_stoi.items()}\n self.entity_label_num = len(self.entity_label_stoi)\n self.trigger_label_num = len(self.trigger_label_stoi)\n self.mention_type_num = len(self.mention_type_stoi)\n self.entity_type_num = len(self.entity_type_stoi)\n self.event_type_num = len(self.event_type_stoi)\n self.relation_type_num = len(self.relation_type_stoi)\n self.role_type_num = len(self.role_type_stoi)\n self.valid_relation_entity = None\n self.valid_event_role = None\n self.valid_role_entity = None\n if valid_patterns:\n self.valid_event_role = valid_patterns['event_role']\n self.valid_relation_entity = valid_patterns['relation_entity']\n self.valid_role_entity = valid_patterns['role_entity']\n self.relation_directional = config.relation_directional\n self.symmetric_relations = config.symmetric_relations\n self.symmetric_relation_idxs = {self.relation_type_stoi[r]\n for r in self.symmetric_relations}\n\n # BERT encoder\n self.pretrained_model_name = config.pretrained_model_name\n self.cache_dir = config.cache_dir\n if self.pretrained_model_name.startswith('bert-'):\n self.bert = BertModel.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir,\n output_hidden_states=True)\n self.bert_config = BertConfig.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir)\n elif self.pretrained_model_name.startswith('roberta-'):\n self.bert = RobertaModel.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir,\n output_hidden_states=True)\n self.bert_config = RobertaConfig.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir)\n elif self.pretrained_model_name.startswith('xlm-'):\n self.bert = XLMRobertaModel.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir,\n output_hidden_states=True)\n self.bert_config = XLMRobertaConfig.from_pretrained(self.pretrained_model_name,\n cache_dir=self.cache_dir) \n else:\n raise ValueError\n self.bert_dim = self.bert_config.hidden_size\n self.extra_bert = config.extra_bert\n self.use_extra_bert = config.use_extra_bert\n if self.use_extra_bert:\n self.bert_dim *= 2\n self.bert_dropout = nn.Dropout(p=config.bert_dropout)\n self.multi_piece = config.multi_piece_strategy\n # local classifiers\n self.use_entity_type = config.use_entity_type\n self.binary_dim = self.bert_dim * 2\n linear_bias = config.linear_bias\n linear_dropout = config.linear_dropout\n entity_hidden_num = config.entity_hidden_num\n mention_hidden_num = config.mention_hidden_num\n event_hidden_num = config.event_hidden_num\n relation_hidden_num = config.relation_hidden_num\n role_hidden_num = config.role_hidden_num\n role_input_dim = self.binary_dim + (self.entity_type_num if self.use_entity_type else 0)\n self.entity_label_ffn = nn.Linear(self.bert_dim, self.entity_label_num,\n bias=linear_bias)\n self.trigger_label_ffn = nn.Linear(self.bert_dim, self.trigger_label_num,\n bias=linear_bias)\n self.entity_type_ffn = Linears([self.bert_dim, entity_hidden_num,\n self.entity_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.mention_type_ffn = Linears([self.bert_dim, mention_hidden_num,\n self.mention_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.event_type_ffn = Linears([self.bert_dim, event_hidden_num,\n self.event_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.relation_type_ffn = Linears([self.binary_dim, relation_hidden_num,\n self.relation_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n self.role_type_ffn = Linears([role_input_dim, role_hidden_num,\n self.role_type_num],\n dropout_prob=linear_dropout,\n bias=linear_bias,\n activation=config.linear_activation)\n # global features\n self.use_global_features = config.use_global_features\n self.global_features = config.global_features\n self.global_feature_maps = generate_global_feature_maps(vocabs, valid_patterns)\n self.global_feature_num = sum(len(m) for k, m in self.global_feature_maps.items()\n if k in self.global_features or\n not self.global_features)\n self.global_feature_weights = nn.Parameter(\n torch.zeros(self.global_feature_num).fill_(-0.0001))\n # decoder\n self.beam_size = config.beam_size\n self.beta_v = config.beta_v\n self.beta_e = config.beta_e\n # loss functions\n self.entity_criteria = torch.nn.CrossEntropyLoss()\n self.event_criteria = torch.nn.CrossEntropyLoss()\n self.mention_criteria = torch.nn.CrossEntropyLoss()\n self.relation_criteria = torch.nn.CrossEntropyLoss()\n self.role_criteria = torch.nn.CrossEntropyLoss()\n # others\n self.entity_crf = CRF(self.entity_label_stoi, bioes=False)\n self.trigger_crf = CRF(self.trigger_label_stoi, bioes=False)\n self.pad_vector = nn.Parameter(torch.randn(1, 1, self.bert_dim))\n\n def encode(self, piece_idxs, attention_masks, token_lens):\n \"\"\"Encode input sequences with BERT\n :param piece_idxs (LongTensor): word pieces indices\n :param attention_masks (FloatTensor): attention mask\n :param token_lens (list): token lengths\n \"\"\"\n batch_size, _ = piece_idxs.size()\n all_bert_outputs = self.bert(piece_idxs, attention_mask=attention_masks)\n bert_outputs = all_bert_outputs[0]\n\n if self.use_extra_bert:\n extra_bert_outputs = all_bert_outputs[2][self.extra_bert]\n bert_outputs = torch.cat([bert_outputs, extra_bert_outputs], dim=2)\n\n if self.multi_piece == 'first':\n # select the first piece for multi-piece words\n offsets = token_lens_to_offsets(token_lens)\n offsets = piece_idxs.new(offsets)\n # + 1 because the first vector is for [CLS]\n offsets = offsets.unsqueeze(-1).expand(batch_size, -1, self.bert_dim) + 1\n bert_outputs = torch.gather(bert_outputs, 1, offsets)\n elif self.multi_piece == 'average':\n # average all pieces for multi-piece words\n idxs, masks, token_num, token_len = token_lens_to_idxs(token_lens)\n idxs = piece_idxs.new(idxs).unsqueeze(-1).expand(batch_size, -1, self.bert_dim) + 1\n masks = bert_outputs.new(masks).unsqueeze(-1)\n bert_outputs = torch.gather(bert_outputs, 1, idxs) * masks\n bert_outputs = bert_outputs.view(batch_size, token_num, token_len, self.bert_dim)\n bert_outputs = bert_outputs.sum(2)\n else:\n raise ValueError('Unknown multi-piece token handling strategy: {}'\n .format(self.multi_piece))\n bert_outputs = self.bert_dropout(bert_outputs)\n return bert_outputs\n\n def scores(self, bert_outputs, graphs, entity_types_onehot=None,\n predict=False, gold_tri=False, gold_ent=False):\n (\n entity_idxs, entity_masks, entity_num, entity_len,\n trigger_idxs, trigger_masks, trigger_num, trigger_len,\n ) = graphs_to_node_idxs(graphs)\n\n batch_size, _, bert_dim = bert_outputs.size()\n\n entity_idxs = bert_outputs.new_tensor(entity_idxs, dtype=torch.long)\n trigger_idxs = bert_outputs.new_tensor(trigger_idxs, dtype=torch.long)\n entity_masks = bert_outputs.new_tensor(entity_masks)\n trigger_masks = bert_outputs.new_tensor(trigger_masks)\n\n # entity type scores\n entity_idxs = entity_idxs.unsqueeze(-1).expand(-1, -1, bert_dim)\n entity_masks = entity_masks.unsqueeze(-1).expand(-1, -1, bert_dim)\n entity_words = torch.gather(bert_outputs, 1, entity_idxs)\n entity_words = entity_words * entity_masks\n entity_words = entity_words.view(batch_size, entity_num, entity_len, bert_dim)\n entity_reprs = entity_words.sum(2)\n entity_type_scores = self.entity_type_ffn(entity_reprs)\n\n # mention type scores\n mention_type_scores = self.mention_type_ffn(entity_reprs)\n\n # trigger type scores\n trigger_idxs = trigger_idxs.unsqueeze(-1).expand(-1, -1, bert_dim)\n trigger_masks = trigger_masks.unsqueeze(-1).expand(-1, -1, bert_dim)\n trigger_words = torch.gather(bert_outputs, 1, trigger_idxs)\n trigger_words = trigger_words * trigger_masks\n trigger_words = trigger_words.view(batch_size, trigger_num, trigger_len, bert_dim)\n trigger_reprs = trigger_words.sum(2)\n event_type_scores = self.event_type_ffn(trigger_reprs)\n \n # Add for gold entity given case:\n # The idea is to make the gold entities' score become very high\n if gold_ent:\n for graph, entity_type_score in zip(graphs, entity_type_scores):\n for ent, score in zip(graph.entities, entity_type_score):\n score[ent[2]] = 10000 \n # Add for gold trigger given case:\n # The idea is to make the gold triggers' score become very high\n if gold_tri:\n for graph, event_type_score in zip(graphs, event_type_scores):\n for trig, score in zip(graph.triggers, event_type_score):\n score[trig[2]] = 10000\n\n # relation type score\n ee_idxs = generate_pairwise_idxs(entity_num, entity_num)\n ee_idxs = entity_idxs.new(ee_idxs)\n ee_idxs = ee_idxs.unsqueeze(0).unsqueeze(-1).expand(batch_size, -1, bert_dim)\n ee_reprs = torch.cat([entity_reprs, entity_reprs], dim=1)\n ee_reprs = torch.gather(ee_reprs, 1, ee_idxs)\n ee_reprs = ee_reprs.view(batch_size, -1, 2 * bert_dim)\n relation_type_scores = self.relation_type_ffn(ee_reprs)\n\n # role type score\n te_idxs = generate_pairwise_idxs(trigger_num, entity_num)\n te_idxs = entity_idxs.new(te_idxs)\n te_idxs = te_idxs.unsqueeze(0).unsqueeze(-1).expand(batch_size, -1, bert_dim)\n te_reprs = torch.cat([trigger_reprs, entity_reprs], dim=1)\n te_reprs = torch.gather(te_reprs, 1, te_idxs)\n te_reprs = te_reprs.view(batch_size, -1, 2 * bert_dim)\n\n if self.use_entity_type:\n if predict:\n entity_type_scores_softmax = entity_type_scores.softmax(dim=2)\n entity_type_scores_softmax = entity_type_scores_softmax.repeat(1, trigger_num, 1)\n te_reprs = torch.cat([te_reprs, entity_type_scores_softmax], dim=2)\n else:\n entity_types_onehot = entity_types_onehot.repeat(1, trigger_num, 1)\n te_reprs = torch.cat([te_reprs, entity_types_onehot], dim=2)\n role_type_scores = self.role_type_ffn(te_reprs)\n\n return (entity_type_scores, mention_type_scores, event_type_scores,\n relation_type_scores, role_type_scores)\n\n def forward(self, batch):\n # encoding\n bert_outputs = self.encode(batch.piece_idxs,\n batch.attention_masks,\n batch.token_lens)\n batch_size, _, _ = bert_outputs.size()\n # entity type indices -> one hot\n entity_types = batch.entity_type_idxs.view(batch_size, -1)\n entity_types = torch.clamp(entity_types, min=0)\n entity_types_onehot = bert_outputs.new_zeros(*entity_types.size(),\n self.entity_type_num)\n entity_types_onehot.scatter_(2, entity_types.unsqueeze(-1), 1)\n # identification\n entity_label_scores = self.entity_label_ffn(bert_outputs)\n trigger_label_scores = self.trigger_label_ffn(bert_outputs)\n\n entity_label_scores = self.entity_crf.pad_logits(entity_label_scores)\n entity_label_loglik = self.entity_crf.loglik(entity_label_scores,\n batch.entity_label_idxs,\n batch.token_nums)\n trigger_label_scores = self.trigger_crf.pad_logits(trigger_label_scores)\n trigger_label_loglik = self.trigger_crf.loglik(trigger_label_scores,\n batch.trigger_label_idxs,\n batch.token_nums)\n # classification\n scores = self.scores(bert_outputs, batch.graphs, entity_types_onehot)\n (\n entity_type_scores, mention_type_scores, event_type_scores,\n relation_type_scores, role_type_scores\n ) = scores\n entity_type_scores = entity_type_scores.view(-1, self.entity_type_num)\n event_type_scores = event_type_scores.view(-1, self.event_type_num)\n relation_type_scores = relation_type_scores.view(-1, self.relation_type_num)\n role_type_scores = role_type_scores.view(-1, self.role_type_num)\n mention_type_scores = mention_type_scores.view(-1, self.mention_type_num)\n classification_loss = self.entity_criteria(entity_type_scores,\n batch.entity_type_idxs) + \\\n self.event_criteria(event_type_scores,\n batch.event_type_idxs) + \\\n self.role_criteria(role_type_scores,\n batch.role_type_idxs) + \\\n self.relation_criteria(relation_type_scores,\n batch.relation_type_idxs) + \\\n self.mention_criteria(mention_type_scores,\n batch.mention_type_idxs)\n\n loss = classification_loss - entity_label_loglik.mean() - trigger_label_loglik.mean()\n\n # global features\n if self.use_global_features:\n gold_scores = self.compute_graph_scores(batch.graphs, scores)\n top_graphs = self.generate_locally_top_graphs(batch.graphs, scores)\n top_scores = self.compute_graph_scores(top_graphs, scores)\n global_loss = (top_scores - gold_scores).clamp(min=0)\n loss = loss + global_loss.mean()\n return loss\n\n def predict(self, batch, gold_tri=False, gold_ent=False):\n self.eval()\n with torch.no_grad():\n bert_outputs = self.encode(batch.piece_idxs,\n batch.attention_masks,\n batch.token_lens)\n batch_size, _, _ = bert_outputs.size()\n\n # identification\n entity_label_scores = self.entity_label_ffn(bert_outputs)\n entity_label_scores = self.entity_crf.pad_logits(entity_label_scores)\n trigger_label_scores = self.trigger_label_ffn(bert_outputs)\n trigger_label_scores = self.trigger_crf.pad_logits(trigger_label_scores)\n _, entity_label_preds = self.entity_crf.viterbi_decode(entity_label_scores,\n batch.token_nums)\n _, trigger_label_preds = self.trigger_crf.viterbi_decode(trigger_label_scores,\n batch.token_nums)\n entities = tag_paths_to_spans(entity_label_preds,\n batch.token_nums,\n self.entity_label_stoi)\n triggers = tag_paths_to_spans(trigger_label_preds,\n batch.token_nums,\n self.trigger_label_stoi)\n \n # Add for gold trigger/ gold entity given case.\n if gold_tri:\n triggers = [[list(trigger) for trigger in graph.triggers] for graph in batch.graphs]\n if gold_ent:\n entities = [[list(entity) for entity in graph.entities] for graph in batch.graphs]\n\n node_graphs = [Graph(e, t, [], [], self.vocabs)\n for e, t in zip(entities, triggers)]\n scores = self.scores(bert_outputs, node_graphs, predict=True, gold_tri=gold_tri, gold_ent=gold_ent)\n max_entity_num = max(max(len(seq_entities) for seq_entities in entities), 1)\n\n batch_graphs = []\n # Decode each sentence in the batch\n for i in range(batch_size):\n seq_entities, seq_triggers = entities[i], triggers[i]\n spans = sorted([(*i, True) for i in seq_entities] +\n [(*i, False) for i in seq_triggers],\n key=lambda x: (x[0], x[1], not x[-1]))\n entity_num, trigger_num = len(seq_entities), len(seq_triggers)\n if entity_num == 0 and trigger_num == 0:\n # skip decoding\n batch_graphs.append(Graph.empty_graph(self.vocabs))\n continue\n graph = self.decode(spans,\n entity_type_scores=scores[0][i],\n mention_type_scores=scores[1][i],\n event_type_scores=scores[2][i],\n relation_type_scores=scores[3][i],\n role_type_scores=scores[4][i],\n entity_num=max_entity_num)\n batch_graphs.append(graph)\n\n self.train()\n return batch_graphs\n\n def compute_graph_scores(self, graphs, scores):\n (\n entity_type_scores, _mention_type_scores,\n trigger_type_scores, relation_type_scores,\n role_type_scores\n ) = scores\n label_idxs = graphs_to_label_idxs(graphs)\n label_idxs = [entity_type_scores.new_tensor(idx,\n dtype=torch.long if i % 2 == 0\n else torch.float)\n for i, idx in enumerate(label_idxs)]\n (\n entity_idxs, entity_mask, trigger_idxs, trigger_mask,\n relation_idxs, relation_mask, role_idxs, role_mask\n ) = label_idxs\n # Entity score\n entity_idxs = entity_idxs.unsqueeze(-1)\n entity_scores = torch.gather(entity_type_scores, 2, entity_idxs)\n entity_scores = entity_scores.squeeze(-1) * entity_mask\n entity_score = entity_scores.sum(1)\n # Trigger score\n trigger_idxs = trigger_idxs.unsqueeze(-1)\n trigger_scores = torch.gather(trigger_type_scores, 2, trigger_idxs)\n trigger_scores = trigger_scores.squeeze(-1) * trigger_mask\n trigger_score = trigger_scores.sum(1)\n # Relation score\n relation_idxs = relation_idxs.unsqueeze(-1)\n relation_scores = torch.gather(relation_type_scores, 2, relation_idxs)\n relation_scores = relation_scores.squeeze(-1) * relation_mask\n relation_score = relation_scores.sum(1)\n # Role score\n role_idxs = role_idxs.unsqueeze(-1)\n role_scores = torch.gather(role_type_scores, 2, role_idxs)\n role_scores = role_scores.squeeze(-1) * role_mask\n role_score = role_scores.sum(1)\n\n score = entity_score + trigger_score + role_score + relation_score\n\n global_vectors = [generate_global_feature_vector(g, self.global_feature_maps, features=self.global_features)\n for g in graphs]\n global_vectors = entity_scores.new_tensor(global_vectors)\n global_weights = self.global_feature_weights.unsqueeze(0).expand_as(global_vectors)\n global_score = (global_vectors * global_weights).sum(1)\n score = score + global_score\n\n return score\n\n def generate_locally_top_graphs(self, graphs, scores):\n (\n entity_type_scores, _mention_type_scores,\n trigger_type_scores, relation_type_scores,\n role_type_scores\n ) = scores\n max_entity_num = max(max([g.entity_num for g in graphs]), 1)\n top_graphs = []\n for graph_idx, graph in enumerate(graphs):\n entity_num = graph.entity_num\n trigger_num = graph.trigger_num\n _, top_entities = entity_type_scores[graph_idx].max(1)\n top_entities = top_entities.tolist()[:entity_num]\n top_entities = [(i, j, k) for (i, j, _), k in\n zip(graph.entities, top_entities)]\n _, top_triggers = trigger_type_scores[graph_idx].max(1)\n top_triggers = top_triggers.tolist()[:trigger_num]\n top_triggers = [(i, j, k) for (i, j, _), k in\n zip(graph.triggers, top_triggers)]\n \n top_relation_scores, top_relation_labels = relation_type_scores[graph_idx].max(1)\n top_relation_scores = top_relation_scores.tolist()\n top_relation_labels = top_relation_labels.tolist()\n top_relations = [(i, j) for i, j in zip(top_relation_scores, top_relation_labels)]\n top_relation_list = []\n for i in range(entity_num):\n for j in range(entity_num):\n if i < j:\n score_1, label_1 = top_relations[i * max_entity_num + j]\n score_2, label_2 = top_relations[j * max_entity_num + i]\n if score_1 > score_2 and label_1 != 0:\n top_relation_list.append((i, j, label_1))\n if score_2 > score_1 and label_2 != 0: \n top_relation_list.append((j, i, label_2))\n\n _, top_roles = role_type_scores[graph_idx].max(1)\n top_roles = top_roles.tolist()\n top_roles = [(i, j, top_roles[i * max_entity_num + j])\n for i in range(trigger_num) for j in range(entity_num)\n if top_roles[i * max_entity_num + j] != 0]\n top_graphs.append(Graph(\n entities=top_entities,\n triggers=top_triggers,\n # relations=top_relations,\n relations=top_relation_list,\n roles=top_roles,\n vocabs=graph.vocabs\n ))\n return top_graphs\n\n def trim_beam_set(self, beam_set, beam_size):\n if len(beam_set) > beam_size:\n beam_set.sort(key=lambda x: self.compute_graph_score(x), reverse=True)\n beam_set = beam_set[:beam_size]\n return beam_set\n\n def compute_graph_score(self, graph):\n score = graph.graph_local_score\n if self.use_global_features:\n global_vector = generate_global_feature_vector(graph,\n self.global_feature_maps,\n features=self.global_features)\n global_vector = self.global_feature_weights.new_tensor(global_vector)\n global_score = global_vector.dot(self.global_feature_weights).item()\n score = score + global_score\n return score\n\n def decode(self,\n spans,\n entity_type_scores,\n mention_type_scores,\n event_type_scores,\n relation_type_scores,\n role_type_scores,\n entity_num):\n beam_set = [Graph.empty_graph(self.vocabs)]\n entity_idx, trigger_idx = 0, 0\n\n for start, end, _, is_entity_node in spans:\n # 1. node step\n if is_entity_node:\n node_scores = entity_type_scores[entity_idx].tolist()\n else:\n node_scores = event_type_scores[trigger_idx].tolist()\n node_scores_norm = normalize_score(node_scores)\n node_scores = [(s, i, n) for i, (s, n) in enumerate(zip(node_scores,\n node_scores_norm))]\n node_scores.sort(key=lambda x: x[0], reverse=True)\n top_node_scores = node_scores[:self.beta_v]\n\n beam_set_ = []\n for graph in beam_set:\n for score, label, score_norm in top_node_scores:\n graph_ = graph.copy()\n if is_entity_node:\n graph_.add_entity(start, end, label, score, score_norm)\n else:\n graph_.add_trigger(start, end, label, score, score_norm)\n beam_set_.append(graph_)\n beam_set = beam_set_\n\n # 2. edge step\n if is_entity_node:\n # add a new entity: new relations, new argument roles\n for i in range(entity_idx):\n # add relation edges\n edge_scores_1 = relation_type_scores[i * entity_num + entity_idx].tolist()\n edge_scores_2 = relation_type_scores[entity_idx * entity_num + i].tolist()\n edge_scores_norm_1 = normalize_score(edge_scores_1)\n edge_scores_norm_2 = normalize_score(edge_scores_2)\n\n if self.relation_directional:\n edge_scores = [(max(s1, s2), n2 if s1 < s2 else n1, i, s1 < s2)\n for i, (s1, s2, n1, n2)\n in enumerate(zip(edge_scores_1, edge_scores_2,\n edge_scores_norm_1,\n edge_scores_norm_2))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n else:\n edge_scores = [(max(s1, s2), n2 if s1 < n2 else n1, i, False)\n for i, (s1, s2, n1, n2)\n in enumerate(zip(edge_scores_1, edge_scores_2,\n edge_scores_norm_1,\n edge_scores_norm_2))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n\n beam_set_ = []\n for graph in beam_set:\n has_valid_edge = False\n for score, score_norm, label, inverse in top_edge_scores:\n rel_cur_ent = label * 1000 + graph.entities[-1][-1]\n rel_pre_ent = label * 1000 + graph.entities[i][-1]\n if self.valid_relation_entity is not None and self.valid_relation_entity is not None:\n if label == 0 or (rel_pre_ent in self.valid_relation_entity and\n rel_cur_ent in self.valid_relation_entity):\n graph_ = graph.copy()\n if self.relation_directional and inverse:\n graph_.add_relation(entity_idx, i, label, score, score_norm)\n else:\n graph_.add_relation(i, entity_idx, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n else:\n if label == 0:\n graph_ = graph.copy()\n if self.relation_directional and inverse:\n graph_.add_relation(entity_idx, i, label, score, score_norm)\n else:\n graph_.add_relation(i, entity_idx, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n if not has_valid_edge:\n graph_ = graph.copy()\n graph_.add_relation(i, entity_idx, 0, null_score)\n beam_set_.append(graph_)\n beam_set = beam_set_\n if len(beam_set) > 200:\n beam_set = self.trim_beam_set(beam_set, self.beam_size)\n\n for i in range(trigger_idx):\n # add argument role edges\n edge_scores = role_type_scores[i * entity_num + entity_idx].tolist()\n edge_scores_norm = normalize_score(edge_scores)\n edge_scores = [(s, i, n) for i, (s, n) in enumerate(zip(edge_scores, edge_scores_norm))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n\n beam_set_ = []\n for graph in beam_set:\n has_valid_edge = False\n for score, label, score_norm in top_edge_scores:\n role_entity = label * 1000 + graph.entities[-1][-1]\n event_role = graph.triggers[i][-1] * 1000 + label\n if (self.valid_event_role is not None) and (self.valid_role_entity is not None):\n if label == 0 or (event_role in self.valid_event_role and\n role_entity in self.valid_role_entity):\n graph_ = graph.copy()\n graph_.add_role(i, entity_idx, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n else:\n if label == 0 :\n graph_ = graph.copy()\n graph_.add_role(i, entity_idx, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n if not has_valid_edge:\n graph_ = graph.copy()\n graph_.add_role(i, entity_idx, 0, null_score)\n beam_set_.append(graph_)\n beam_set = beam_set_\n if len(beam_set) > 100:\n beam_set = self.trim_beam_set(beam_set, self.beam_size)\n beam_set = self.trim_beam_set(beam_set_, self.beam_size)\n\n else:\n # add a new trigger: new argument roles\n for i in range(entity_idx):\n edge_scores = role_type_scores[trigger_idx * entity_num + i].tolist()\n edge_scores_norm = normalize_score(edge_scores)\n edge_scores = [(s, i, n) for i, (s, n) in enumerate(zip(edge_scores,\n edge_scores_norm))]\n null_score = edge_scores[0][0]\n edge_scores.sort(key=lambda x: x[0], reverse=True)\n top_edge_scores = edge_scores[:self.beta_e]\n\n beam_set_ = []\n for graph in beam_set:\n has_valid_edge = False\n for score, label, score_norm in top_edge_scores:\n event_role = graph.triggers[-1][-1] * 1000 + label\n role_entity = label * 1000 + graph.entities[i][-1]\n if self.valid_event_role is not None and self.valid_role_entity is not None:\n if label == 0 or (event_role in self.valid_event_role\n and role_entity in self.valid_role_entity):\n graph_ = graph.copy()\n graph_.add_role(trigger_idx, i, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n else:\n if label == 0:\n graph_ = graph.copy()\n graph_.add_role(trigger_idx, i, label, score, score_norm)\n beam_set_.append(graph_)\n has_valid_edge = True\n if not has_valid_edge:\n graph_ = graph.copy()\n graph_.add_role(trigger_idx, i, 0, null_score)\n beam_set_.append(graph_)\n beam_set = beam_set_\n if len(beam_set) > 100:\n beam_set = self.trim_beam_set(beam_set, self.beam_size)\n\n beam_set = self.trim_beam_set(beam_set_, self.beam_size)\n\n if is_entity_node:\n entity_idx += 1\n else:\n trigger_idx += 1\n beam_set.sort(key=lambda x: self.compute_graph_score(x), reverse=True)\n graph = beam_set[0]\n\n # predict mention types\n _, mention_types = mention_type_scores.max(dim=1)\n mention_types = mention_types[:entity_idx]\n mention_list = [(i, j, l.item()) for (i, j, k), l\n in zip(graph.entities, mention_types)]\n graph.mentions = mention_list\n\n return graph" }, { "identifier": "IEDataset", "path": "TextEE/models/OneIE/data.py", "snippet": "class IEDataset(Dataset):\n def __init__(self, raw_data, tokenizer, max_length=128, gpu=False, ignore_title=False,\n relation_mask_self=True, relation_directional=False,\n coref=False, symmetric_relations=None, test=False):\n self.raw_data = raw_data\n self.data = []\n self.gpu = gpu\n self.max_length = max_length\n self.ignore_title = ignore_title\n self.relation_mask_self = relation_mask_self\n self.relation_directional = relation_directional\n self.coref = coref\n if symmetric_relations is None:\n self.symmetric_relations = set()\n else:\n self.symmetric_relations = symmetric_relations\n self.tokenizer = tokenizer\n self.test = test\n self.load_data()\n\n def __len__(self):\n return len(self.data)\n\n def __getitem__(self, item):\n return self.data[item]\n\n @property\n def entity_type_set(self):\n type_set = set()\n for inst in self.data:\n for entity in inst['entity_mentions']:\n type_set.add(entity.get('entity_type', \"UNK\"))\n return type_set\n\n @property\n def event_type_set(self):\n type_set = set()\n for inst in self.data:\n for event in inst['event_mentions']:\n type_set.add(event['event_type'])\n return type_set\n\n @property\n def relation_type_set(self):\n type_set = set()\n for inst in self.data:\n for relation in inst.get('relation_mentions', []):\n type_set.add(relation['relation_type'])\n return type_set\n\n @property\n def role_type_set(self):\n type_set = set()\n for inst in self.data:\n for event in inst['event_mentions']:\n for arg in event['arguments']:\n type_set.add(arg['role'])\n return type_set\n\n def load_data(self):\n overlength_num = 0\n for inst in self.raw_data:\n \n ## added\n pieces = [self.tokenizer.tokenize(t, is_split_into_words=True) for t in inst['tokens']]\n token_lens = [len(x) for x in pieces]\n if 0 in token_lens:\n raise ValueError\n pieces = [p for ps in pieces for p in ps]\n inst['pieces'] = pieces\n inst['token_lens'] = token_lens\n \n inst['entity_mentions'] = inst['extra_info']['entity_mentions']\n inst['relation_mentions'] = inst['extra_info']['relation_mentions']\n inst['event_mentions'] = inst['extra_info']['event_mentions']\n ##\n\n if not self.test:\n if self.max_length != -1 and len(pieces) > self.max_length - 2:\n overlength_num += 1\n continue\n else:\n if len(pieces) > self.max_length - 2:\n # add token_lens until over-length\n piece_counter = 0\n for max_token_include, token_len in enumerate(inst['token_lens']):\n if piece_counter + token_len >= self.max_length - 2:\n logger.info('overlength during testing...')\n break\n else:\n piece_counter += token_len\n inst['pieces'] = inst['pieces'][:piece_counter]\n inst['token_lens'] = inst['token_lens'][:max_token_include]\n inst['tokens'] = inst['tokens'][:max_token_include]\n self.data.append(inst)\n\n if overlength_num:\n logger.info('Discarded {} overlength instances'.format(overlength_num))\n logger.info('Loaded {} OneIE instances from {} E2E instances'.format(len(self), len(self.raw_data)))\n\n def numberize(self, tokenizer, vocabs):\n \"\"\"Numberize word pieces, labels, etcs.\n :param tokenizer: Bert tokenizer.\n :param vocabs (dict): a dict of vocabularies.\n \"\"\"\n entity_type_stoi = vocabs.get('entity_type', None)\n event_type_stoi = vocabs.get('event_type', None)\n relation_type_stoi = vocabs.get('relation_type', None)\n role_type_stoi = vocabs.get('role_type', None)\n mention_type_stoi = vocabs.get('mention_type', None)\n entity_label_stoi = vocabs.get('entity_label', None)\n trigger_label_stoi = vocabs.get('trigger_label', None)\n\n data = []\n for inst in self.data:\n doc_id = inst['doc_id']\n wnd_id = inst['wnd_id']\n tokens = inst['tokens']\n pieces = inst['pieces']\n sent_id = inst['wnd_id']\n entities = inst['entity_mentions']\n token_num = len(tokens)\n entities, entity_id_map = remove_overlap_entities(entities, token_num)\n entities.sort(key=lambda x: x['start'])\n events = inst['event_mentions']\n events.sort(key=lambda x: x['trigger']['start'])\n events = [eve for eve in events if eve['trigger']['end']<= token_num]\n relations = inst.get('relation_mentions', [])\n token_lens = inst['token_lens']\n\n # Pad word pieces with special tokens\n piece_idxs = tokenizer.encode(pieces,\n add_special_tokens=True,\n max_length=self.max_length,\n truncation=True)\n pad_num = self.max_length - len(piece_idxs)\n attn_mask = [1] * len(piece_idxs) + [0] * pad_num\n #piece_idxs = piece_idxs + [0] * pad_num\n pad_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.pad_token)\n piece_idxs = piece_idxs + [pad_id] * pad_num\n \n # Entity\n # - entity_labels and entity_label_idxs are used for identification\n # - entity_types and entity_type_idxs are used for classification\n # - entity_list is used for graph representation\n entity_labels = get_entity_labels(entities, token_num)\n entity_label_idxs = [entity_label_stoi[l] for l in entity_labels]\n entity_types = [e.get('entity_type', \"UNK\") for e in entities]\n entity_type_idxs = [entity_type_stoi[l] for l in entity_types]\n entity_list = [(e['start'], e['end'], entity_type_stoi[e.get('entity_type', \"UNK\")])\n for e in entities]\n # entity_num = len(entity_list)\n mention_types = [e.get('mention_type', \"UNK\") for e in entities]\n mention_type_idxs = [mention_type_stoi[l] for l in mention_types]\n mention_list = [(i, j, l) for (i, j, k), l\n in zip(entity_list, mention_type_idxs)]\n\n # Trigger\n # - trigger_labels and trigger_label_idxs are used for identification\n # - event_types and event_type_idxs are used for classification\n # - trigger_list is used for graph representation\n trigger_labels = get_trigger_labels(events, token_num)\n if self.test:\n trigger_label_idxs = []\n for l in trigger_labels:\n if l in trigger_label_stoi.keys():\n trigger_label_idxs.append(trigger_label_stoi[l])\n else:\n trigger_label_idxs.append(trigger_label_stoi['O']) # we need this when xl test\n event_type_idxs = [event_type_stoi[e['event_type']] for e in events \n if (e['trigger']['end'] <= token_num) and (e['event_type'] in event_type_stoi.keys())]\n trigger_list = [(e['trigger']['start'], e['trigger']['end'],\n event_type_stoi[e['event_type']])\n for e in events if e['event_type'] in event_type_stoi.keys()]\n else:\n trigger_label_idxs = [trigger_label_stoi[l]\n for l in trigger_labels]\n event_type_idxs = [event_type_stoi[e['event_type']] for e in events]\n trigger_list = [(e['trigger']['start'], e['trigger']['end'],\n event_type_stoi[e['event_type']])\n for e in events]\n\n # Relation\n relation_types = get_relation_types(entities, relations,\n entity_id_map,\n self.test,\n relation_type_stoi,\n directional=self.relation_directional,\n symmetric=self.symmetric_relations)\n relation_type_idxs = [[relation_type_stoi[l] for l in ls]\n for ls in relation_types]\n if self.relation_mask_self:\n for i in range(len(relation_type_idxs)):\n relation_type_idxs[i][i] = -100\n relation_list = get_relation_list(entities, relations,\n entity_id_map, relation_type_stoi, self.test,\n directional=self.relation_directional,\n symmetric=self.symmetric_relations)\n #relation_type_idxs = []\n #relation_list = []\n\n # Argument role\n role_types = get_role_types(entities, events, entity_id_map, role_type_stoi, self.test)\n role_type_idxs = [[role_type_stoi[l] for l in ls]\n for ls in role_types]\n role_list = get_role_list(entities, events,\n entity_id_map, role_type_stoi, self.test)\n\n # Graph\n graph = Graph(\n entities=entity_list,\n triggers=trigger_list,\n relations=relation_list,\n roles=role_list,\n mentions=mention_list,\n vocabs=vocabs,\n )\n \n instance = Instance(\n doc_id=doc_id,\n wnd_id=wnd_id,\n sent_id=sent_id,\n tokens=tokens,\n pieces=pieces,\n piece_idxs=piece_idxs,\n token_lens=token_lens,\n attention_mask=attn_mask,\n entity_label_idxs=entity_label_idxs,\n trigger_label_idxs=trigger_label_idxs,\n entity_type_idxs=entity_type_idxs,\n event_type_idxs=event_type_idxs,\n relation_type_idxs=relation_type_idxs,\n mention_type_idxs=mention_type_idxs,\n role_type_idxs=role_type_idxs,\n graph=graph,\n entity_num=len(entities),\n trigger_num=len(events),\n )\n data.append(instance)\n self.data = data\n\n def collate_fn(self, batch):\n batch_piece_idxs = []\n batch_tokens = []\n batch_entity_labels, batch_trigger_labels = [], []\n batch_entity_types, batch_event_types = [], []\n batch_relation_types, batch_role_types = [], []\n batch_mention_types = []\n batch_graphs = []\n batch_token_lens = []\n batch_attention_masks = []\n\n sent_ids = [inst.sent_id for inst in batch]\n token_nums = [len(inst.tokens) for inst in batch]\n max_token_num = max(token_nums)\n\n max_entity_num = max([inst.entity_num for inst in batch] + [1])\n max_trigger_num = max([inst.trigger_num for inst in batch] + [1])\n \n doc_ids = [inst.doc_id for inst in batch]\n wnd_ids = [inst.wnd_id for inst in batch]\n\n for inst in batch:\n token_num = len(inst.tokens)\n batch_piece_idxs.append(inst.piece_idxs)\n batch_attention_masks.append(inst.attention_mask)\n batch_token_lens.append(inst.token_lens)\n batch_graphs.append(inst.graph)\n batch_tokens.append(inst.tokens)\n # for identification\n batch_entity_labels.append(inst.entity_label_idxs +\n [0] * (max_token_num - token_num))\n batch_trigger_labels.append(inst.trigger_label_idxs +\n [0] * (max_token_num - token_num))\n # for classification\n batch_entity_types.extend(inst.entity_type_idxs +\n [-100] * (max_entity_num - inst.entity_num))\n batch_event_types.extend(inst.event_type_idxs +\n [-100] * (max_trigger_num - inst.trigger_num))\n batch_mention_types.extend(inst.mention_type_idxs +\n [-100] * (max_entity_num - inst.entity_num))\n for l in inst.relation_type_idxs:\n batch_relation_types.extend(\n l + [-100] * (max_entity_num - inst.entity_num))\n batch_relation_types.extend(\n [-100] * max_entity_num * (max_entity_num - inst.entity_num))\n for l in inst.role_type_idxs:\n batch_role_types.extend(\n l + [-100] * (max_entity_num - inst.entity_num))\n batch_role_types.extend(\n [-100] * max_entity_num * (max_trigger_num - inst.trigger_num))\n\n if self.gpu:\n batch_piece_idxs = torch.cuda.LongTensor(batch_piece_idxs)\n batch_attention_masks = torch.cuda.FloatTensor(\n batch_attention_masks)\n\n batch_entity_labels = torch.cuda.LongTensor(batch_entity_labels)\n batch_trigger_labels = torch.cuda.LongTensor(batch_trigger_labels)\n batch_entity_types = torch.cuda.LongTensor(batch_entity_types)\n batch_mention_types = torch.cuda.LongTensor(batch_mention_types)\n batch_event_types = torch.cuda.LongTensor(batch_event_types)\n batch_relation_types = torch.cuda.LongTensor(batch_relation_types)\n batch_role_types = torch.cuda.LongTensor(batch_role_types)\n\n token_nums = torch.cuda.LongTensor(token_nums)\n else:\n batch_piece_idxs = torch.LongTensor(batch_piece_idxs)\n batch_attention_masks = torch.FloatTensor(batch_attention_masks)\n\n batch_entity_labels = torch.LongTensor(batch_entity_labels)\n batch_trigger_labels = torch.LongTensor(batch_trigger_labels)\n batch_entity_types = torch.LongTensor(batch_entity_types)\n batch_mention_types = torch.LongTensor(batch_mention_types)\n batch_event_types = torch.LongTensor(batch_event_types)\n batch_relation_types = torch.LongTensor(batch_relation_types)\n batch_role_types = torch.LongTensor(batch_role_types)\n\n token_nums = torch.LongTensor(token_nums)\n\n return Batch(\n doc_ids=doc_ids,\n wnd_ids=wnd_ids,\n sent_ids=sent_ids,\n tokens=[inst.tokens for inst in batch],\n piece_idxs=batch_piece_idxs,\n token_lens=batch_token_lens,\n attention_masks=batch_attention_masks,\n entity_label_idxs=batch_entity_labels,\n trigger_label_idxs=batch_trigger_labels,\n entity_type_idxs=batch_entity_types,\n mention_type_idxs=batch_mention_types,\n event_type_idxs=batch_event_types,\n relation_type_idxs=batch_relation_types,\n role_type_idxs=batch_role_types,\n graphs=batch_graphs,\n token_nums=token_nums,\n )" }, { "identifier": "generate_vocabs", "path": "TextEE/models/OneIE/util.py", "snippet": "def generate_vocabs(datasets, coref=False,\n relation_directional=False,\n symmetric_relations=None):\n \"\"\"Generate vocabularies from a list of data sets\n :param datasets (list): A list of data sets\n :return (dict): A dictionary of vocabs\n \"\"\"\n entity_type_set = set()\n event_type_set = set()\n relation_type_set = set()\n role_type_set = set()\n for dataset in datasets:\n entity_type_set.update(dataset.entity_type_set)\n event_type_set.update(dataset.event_type_set)\n relation_type_set.update(dataset.relation_type_set)\n role_type_set.update(dataset.role_type_set)\n\n # add inverse relation types for non-symmetric relations\n if relation_directional:\n if symmetric_relations is None:\n symmetric_relations = []\n relation_type_set_ = set()\n for relation_type in relation_type_set:\n relation_type_set_.add(relation_type)\n if relation_directional and relation_type not in symmetric_relations:\n relation_type_set_.add(relation_type + '_inv')\n\n # entity and trigger labels\n prefix = ['B', 'I']\n entity_label_stoi = {'O': 0}\n trigger_label_stoi = {'O': 0}\n for t in entity_type_set:\n for p in prefix:\n entity_label_stoi['{}-{}'.format(p, t)] = len(entity_label_stoi)\n for t in event_type_set:\n for p in prefix:\n trigger_label_stoi['{}-{}'.format(p, t)] = len(trigger_label_stoi)\n\n entity_type_stoi = {k: i for i, k in enumerate(entity_type_set, 1)}\n entity_type_stoi['O'] = 0\n\n event_type_stoi = {k: i for i, k in enumerate(event_type_set, 1)}\n event_type_stoi['O'] = 0\n\n relation_type_stoi = {k: i for i, k in enumerate(relation_type_set, 1)}\n relation_type_stoi['O'] = 0\n if coref:\n relation_type_stoi['COREF'] = len(relation_type_stoi)\n\n role_type_stoi = {k: i for i, k in enumerate(role_type_set, 1)}\n role_type_stoi['O'] = 0\n\n mention_type_stoi = {'NAM': 0, 'NOM': 1, 'PRO': 2, 'UNK': 3}\n\n return {\n 'entity_type': entity_type_stoi,\n 'event_type': event_type_stoi,\n 'relation_type': relation_type_stoi,\n 'role_type': role_type_stoi,\n 'mention_type': mention_type_stoi,\n 'entity_label': entity_label_stoi,\n 'trigger_label': trigger_label_stoi,\n }" }, { "identifier": "load_valid_patterns", "path": "TextEE/models/OneIE/util.py", "snippet": "def load_valid_patterns(path, vocabs):\n if path is None:\n print('valid pattern path not exists, we do not apply valid pattern for decoding')\n return None\n event_type_vocab = vocabs['event_type']\n entity_type_vocab = vocabs['entity_type']\n relation_type_vocab = vocabs['relation_type']\n role_type_vocab = vocabs['role_type']\n\n # valid event-role\n valid_event_role = set()\n event_role = json.load(\n open(os.path.join(path, 'event_role.json'), 'r', encoding='utf-8'))\n for event, roles in event_role.items():\n if event not in event_type_vocab:\n continue\n event_type_idx = event_type_vocab[event]\n for role in roles:\n if role not in role_type_vocab:\n continue\n role_type_idx = role_type_vocab[role]\n valid_event_role.add(event_type_idx * 1000 + role_type_idx)\n\n # valid relation-entity\n valid_relation_entity = set()\n # relation_entity = json.load(\n # open(os.path.join(path, 'relation_entity.json'), 'r', encoding='utf-8'))\n # for relation, entities in relation_entity.items():\n # relation_type_idx = relation_type_vocab[relation]\n # for entity in entities:\n # entity_type_idx = entity_type_vocab[entity]\n # valid_relation_entity.add(\n # relation_type_idx * 1000 + entity_type_idx)\n\n # valid role-entity\n valid_role_entity = set()\n role_entity = json.load(\n open(os.path.join(path, 'role_entity.json'), 'r', encoding='utf-8'))\n for role, entities in role_entity.items():\n if role not in role_type_vocab:\n continue\n role_type_idx = role_type_vocab[role]\n for entity in entities:\n entity_type_idx = entity_type_vocab[entity]\n valid_role_entity.add(role_type_idx * 1000 + entity_type_idx)\n\n return {\n 'event_role': valid_event_role,\n 'relation_entity': valid_relation_entity,\n 'role_entity': valid_role_entity\n }" }, { "identifier": "save_result", "path": "TextEE/models/OneIE/util.py", "snippet": "def save_result(output_file, gold_graphs, pred_graphs, sent_ids, tokens=None):\n with open(output_file, 'w', encoding='utf-8') as w:\n for i, (gold_graph, pred_graph, sent_id) in enumerate(\n zip(gold_graphs, pred_graphs, sent_ids)):\n output = {'sent_id': sent_id,\n 'gold': gold_graph.to_dict(),\n 'pred': pred_graph.to_dict()}\n if tokens:\n output['tokens'] = tokens[i]\n w.write(json.dumps(output) + '\\n')" }, { "identifier": "best_score_by_task", "path": "TextEE/models/OneIE/util.py", "snippet": "def best_score_by_task(log_file, task, max_epoch=1000):\n with open(log_file, 'r', encoding='utf-8') as r:\n config = r.readline()\n\n best_scores = []\n best_dev_score = 0\n for line in r:\n record = json.loads(line)\n dev = record['dev']\n #test = record['test']\n test = record.get('test', None)\n epoch = record['epoch']\n if epoch > max_epoch:\n break\n if dev[task]['f'] > best_dev_score:\n best_dev_score = dev[task]['f']\n best_scores = [dev, test, epoch]\n\n print('Epoch: {}'.format(best_scores[-1]))\n tasks = ['entity', 'mention', 'relation', 'trigger_id', 'trigger',\n 'role_id', 'role']\n for t in tasks:\n print('{}: dev: {:.2f}, test: {:.2f}'.format(t,\n best_scores[0][t][\n 'f'] * 100.0,\n best_scores[1][t][\n 'f'] * 100.0))" }, { "identifier": "score_graphs", "path": "TextEE/models/OneIE/scorer.py", "snippet": "def score_graphs(gold_graphs, pred_graphs,\n relation_directional=False):\n gold_arg_num = pred_arg_num = arg_idn_num = arg_class_num = 0\n gold_trigger_num = pred_trigger_num = trigger_idn_num = trigger_class_num = 0\n gold_ent_num = pred_ent_num = ent_match_num = 0\n gold_rel_num = pred_rel_num = rel_match_num = 0\n gold_men_num = pred_men_num = men_match_num = 0\n\n for gold_graph, pred_graph in zip(gold_graphs, pred_graphs):\n # Entity\n gold_entities = gold_graph.entities\n pred_entities = pred_graph.entities\n gold_ent_num += len(gold_entities)\n pred_ent_num += len(pred_entities)\n ent_match_num += len([entity for entity in pred_entities\n if entity in gold_entities])\n\n # Mention\n gold_mentions = gold_graph.mentions\n pred_mentions = pred_graph.mentions\n gold_men_num += len(gold_mentions)\n pred_men_num += len(pred_mentions)\n men_match_num += len([mention for mention in pred_mentions\n if mention in gold_mentions])\n\n # Relation\n gold_relations = gold_graph.relations\n pred_relations = pred_graph.relations\n gold_rel_num += len(gold_relations)\n pred_rel_num += len(pred_relations)\n for arg1, arg2, rel_type in pred_relations:\n arg1_start, arg1_end, _ = pred_entities[arg1]\n arg2_start, arg2_end, _ = pred_entities[arg2]\n for arg1_gold, arg2_gold, rel_type_gold in gold_relations:\n arg1_start_gold, arg1_end_gold, _ = gold_entities[arg1_gold]\n arg2_start_gold, arg2_end_gold, _ = gold_entities[arg2_gold]\n if relation_directional:\n if (arg1_start == arg1_start_gold and\n arg1_end == arg1_end_gold and\n arg2_start == arg2_start_gold and\n arg2_end == arg2_end_gold\n ) and rel_type == rel_type_gold:\n rel_match_num += 1\n break\n else:\n if ((arg1_start == arg1_start_gold and\n arg1_end == arg1_end_gold and\n arg2_start == arg2_start_gold and\n arg2_end == arg2_end_gold) or (\n arg1_start == arg2_start_gold and\n arg1_end == arg2_end_gold and\n arg2_start == arg1_start_gold and\n arg2_end == arg1_end_gold\n )) and rel_type == rel_type_gold:\n rel_match_num += 1\n break\n\n # Trigger\n gold_triggers = gold_graph.triggers\n pred_triggers = pred_graph.triggers\n gold_trigger_num += len(gold_triggers)\n pred_trigger_num += len(pred_triggers)\n for trg_start, trg_end, event_type in pred_triggers:\n matched = [item for item in gold_triggers\n if item[0] == trg_start and item[1] == trg_end]\n if matched:\n trigger_idn_num += 1\n if matched[0][-1] == event_type:\n trigger_class_num += 1\n\n # Argument\n gold_args = convert_arguments(gold_triggers, gold_entities,\n gold_graph.roles)\n pred_args = convert_arguments(pred_triggers, pred_entities,\n pred_graph.roles)\n gold_arg_num += len(gold_args)\n pred_arg_num += len(pred_args)\n for pred_arg in pred_args:\n arg_start, arg_end, event_type, role = pred_arg\n gold_idn = {item for item in gold_args\n if item[0] == arg_start and item[1] == arg_end\n and item[2] == event_type}\n if gold_idn:\n arg_idn_num += 1\n gold_class = {item for item in gold_idn if item[-1] == role}\n if gold_class:\n arg_class_num += 1\n\n entity_prec, entity_rec, entity_f = compute_f1(\n pred_ent_num, gold_ent_num, ent_match_num)\n mention_prec, mention_rec, mention_f = compute_f1(\n pred_men_num, gold_men_num, men_match_num)\n trigger_id_prec, trigger_id_rec, trigger_id_f = compute_f1(\n pred_trigger_num, gold_trigger_num, trigger_idn_num)\n trigger_prec, trigger_rec, trigger_f = compute_f1(\n pred_trigger_num, gold_trigger_num, trigger_class_num)\n relation_prec, relation_rec, relation_f = compute_f1(\n pred_rel_num, gold_rel_num, rel_match_num)\n role_id_prec, role_id_rec, role_id_f = compute_f1(\n pred_arg_num, gold_arg_num, arg_idn_num)\n role_prec, role_rec, role_f = compute_f1(\n pred_arg_num, gold_arg_num, arg_class_num)\n\n print('Entity: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n entity_prec * 100.0, entity_rec * 100.0, entity_f * 100.0))\n print('Mention: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n mention_prec * 100.0, mention_rec * 100.0, mention_f * 100.0))\n print('Trigger identification: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n trigger_id_prec * 100.0, trigger_id_rec * 100.0, trigger_id_f * 100.0))\n print('Trigger: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n trigger_prec * 100.0, trigger_rec * 100.0, trigger_f * 100.0))\n print('Relation: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n relation_prec * 100.0, relation_rec * 100.0, relation_f * 100.0))\n print('Role identification: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n role_id_prec * 100.0, role_id_rec * 100.0, role_id_f * 100.0))\n print('Role: P: {:.2f}, R: {:.2f}, F: {:.2f}'.format(\n role_prec * 100.0, role_rec * 100.0, role_f * 100.0))\n\n scores = {\n 'entity': {'prec': entity_prec, 'rec': entity_rec, 'f': entity_f},\n 'mention': {'prec': mention_prec, 'rec': mention_rec, 'f': mention_f},\n 'trigger': {'prec': trigger_prec, 'rec': trigger_rec, 'f': trigger_f},\n 'trigger_id': {'prec': trigger_id_prec, 'rec': trigger_id_rec,\n 'f': trigger_id_f},\n 'role': {'prec': role_prec, 'rec': role_rec, 'f': role_f},\n 'role_id': {'prec': role_id_prec, 'rec': role_id_rec, 'f': role_id_f},\n 'relation': {'prec': relation_prec, 'rec': relation_rec,\n 'f': relation_f}\n }\n return scores" } ]
import os, sys, logging, tqdm, pprint, copy import torch import numpy as np import ipdb from transformers import (BertTokenizer, RobertaTokenizer, XLMRobertaTokenizer, AutoTokenizer, AdamW, get_linear_schedule_with_warmup) from torch.utils.data import DataLoader from torch.optim import AdamW from ..trainer import BasicTrainer from .E2Emodel import OneIEE2EModel from .data import IEDataset from .util import generate_vocabs, load_valid_patterns, save_result, best_score_by_task from .scorer import score_graphs from scorer import compute_f1, print_scores
16,322
def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"] self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else: self.valid_patterns = load_valid_patterns(self.config.valid_pattern_path, self.vocabs) self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.cuda(device=self.config.gpu_device) def load_model(self, checkpoint=None): self.load_tokenizer_(checkpoint=checkpoint) self.load_model_(checkpoint=checkpoint) def train(self, train_data, dev_data, **kwargs): self.load_tokenizer_() train_set = IEDataset(train_data, self.tokenizer, max_length=self.config.max_length, gpu=True, relation_mask_self=self.config.relation_mask_self, relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations, test=False ) dev_set = IEDataset(dev_data, self.tokenizer, max_length=self.config.max_length, gpu=True, relation_mask_self=self.config.relation_mask_self, relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations, test=False ) self.vocabs = generate_vocabs([train_set, dev_set]) train_set.numberize(self.tokenizer, self.vocabs) dev_set.numberize(self.tokenizer, self.vocabs) self.load_model_() batch_num = len(train_set) // self.config.batch_size + (len(train_set) % self.config.batch_size != 0) dev_batch_num = len(dev_set) // self.config.eval_batch_size + (len(dev_set) % self.config.eval_batch_size != 0) param_groups = [ { 'params': [p for n, p in self.model.named_parameters() if n.startswith('bert')], 'lr': self.config.bert_learning_rate, 'weight_decay': self.config.bert_weight_decay }, { 'params': [p for n, p in self.model.named_parameters() if not n.startswith('bert') and 'crf' not in n and 'global_feature' not in n], 'lr': self.config.learning_rate, 'weight_decay': self.config.weight_decay }, { 'params': [p for n, p in self.model.named_parameters() if not n.startswith('bert') and ('crf' in n or 'global_feature' in n)], 'lr': self.config.learning_rate, 'weight_decay': 0 } ] optimizer = AdamW(params=param_groups) schedule = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=batch_num*self.config.warmup_epoch, num_training_steps=batch_num*self.config.max_epoch) best_scores = {self.config.target_task: {"f": 0.0}} best_epoch = -1 target_task = self.config.target_task logger.info('================Start Training================') for epoch in range(self.config.max_epoch): logger.info('Epoch: {}'.format(epoch)) # training step progress = tqdm.tqdm(total=batch_num, ncols=75, desc='Train {}'.format(epoch)) optimizer.zero_grad() cummulate_loss = 0. for batch_idx, batch in enumerate(DataLoader( train_set, batch_size=self.config.batch_size // self.config.accumulate_step, shuffle=True, drop_last=False, collate_fn=train_set.collate_fn)): loss = self.model(batch) loss = loss * (1 / self.config.accumulate_step) cummulate_loss += loss loss.backward() if (batch_idx + 1) % self.config.accumulate_step == 0: progress.update(1) torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.config.grad_clipping) optimizer.step() schedule.step() optimizer.zero_grad() progress.close() logger.info({"average training loss": (cummulate_loss / batch_idx).data}) # dev set progress = tqdm.tqdm(total=dev_batch_num, ncols=75, desc='Dev {}'.format(epoch)) best_dev_role_model = False dev_gold_graphs, dev_pred_graphs, dev_tokens, dev_wnd_ids = [], [], [], [] for batch in DataLoader(dev_set, batch_size=self.config.eval_batch_size, shuffle=False, collate_fn=dev_set.collate_fn): progress.update(1) graphs = self.model.predict(batch, gold_tri=False) gold_graph = copy.deepcopy(batch.graphs) for graph in graphs: graph.clean(relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations) dev_gold_graphs.extend(gold_graph) dev_pred_graphs.extend(graphs) dev_tokens.extend(batch.tokens) dev_wnd_ids.extend(batch.wnd_ids) progress.close()
logger = logging.getLogger(__name__) class OneIEE2ETrainer(BasicTrainer): def __init__(self, config, type_set=None): super().__init__(config, type_set) self.tokenizer = None self.model = None self.valid_patterns = None @classmethod def add_extra_info_fn(cls, instances, raw_data, config): extra_info_map = {} for dt in raw_data: extra_info = { "entity_mentions": dt["entity_mentions"] if "entity_mentions" in dt else [], "relation_mentions": dt["relation_mentions"] if "relation_mentions" in dt else [], "event_mentions": dt["event_mentions"] if "event_mentions" in dt else [], } extra_info_map[(dt["doc_id"], dt["wnd_id"])] = extra_info for instance in instances: instance["extra_info"] = extra_info_map[(instance["doc_id"], instance["wnd_id"])] return instances def load_tokenizer_(self, checkpoint=None): if checkpoint: logger.info(f"Loading tokenizer from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.tokenizer")) self.tokenizer = state["tokenizer"] else: logger.info(f"Loading tokenizer from {self.config.pretrained_model_name}") if self.config.pretrained_model_name.startswith('bert-'): self.tokenizer = BertTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('roberta-'): self.tokenizer = RobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) elif self.config.pretrained_model_name.startswith('xlm-roberta-'): self.tokenizer = XLMRobertaTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir) else: self.tokenizer = AutoTokenizer.from_pretrained(self.config.pretrained_model_name, cache_dir=self.config.cache_dir, do_lower_case=False) def load_model_(self, checkpoint=None): assert self.tokenizer if checkpoint: logger.info(f"Loading model from {checkpoint}") state = torch.load(os.path.join(checkpoint, "best_model.state"), map_location=f'cuda:{self.config.gpu_device}') self.vocabs = state["vocabs"] self.type_set = state["type_set"] self.valid_patterns = state["valid_patterns"] self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.load_state_dict(state['model']) self.model.cuda(device=self.config.gpu_device) else: self.valid_patterns = load_valid_patterns(self.config.valid_pattern_path, self.vocabs) self.model = OneIEE2EModel(self.config, self.vocabs, self.valid_patterns) self.model.cuda(device=self.config.gpu_device) def load_model(self, checkpoint=None): self.load_tokenizer_(checkpoint=checkpoint) self.load_model_(checkpoint=checkpoint) def train(self, train_data, dev_data, **kwargs): self.load_tokenizer_() train_set = IEDataset(train_data, self.tokenizer, max_length=self.config.max_length, gpu=True, relation_mask_self=self.config.relation_mask_self, relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations, test=False ) dev_set = IEDataset(dev_data, self.tokenizer, max_length=self.config.max_length, gpu=True, relation_mask_self=self.config.relation_mask_self, relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations, test=False ) self.vocabs = generate_vocabs([train_set, dev_set]) train_set.numberize(self.tokenizer, self.vocabs) dev_set.numberize(self.tokenizer, self.vocabs) self.load_model_() batch_num = len(train_set) // self.config.batch_size + (len(train_set) % self.config.batch_size != 0) dev_batch_num = len(dev_set) // self.config.eval_batch_size + (len(dev_set) % self.config.eval_batch_size != 0) param_groups = [ { 'params': [p for n, p in self.model.named_parameters() if n.startswith('bert')], 'lr': self.config.bert_learning_rate, 'weight_decay': self.config.bert_weight_decay }, { 'params': [p for n, p in self.model.named_parameters() if not n.startswith('bert') and 'crf' not in n and 'global_feature' not in n], 'lr': self.config.learning_rate, 'weight_decay': self.config.weight_decay }, { 'params': [p for n, p in self.model.named_parameters() if not n.startswith('bert') and ('crf' in n or 'global_feature' in n)], 'lr': self.config.learning_rate, 'weight_decay': 0 } ] optimizer = AdamW(params=param_groups) schedule = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=batch_num*self.config.warmup_epoch, num_training_steps=batch_num*self.config.max_epoch) best_scores = {self.config.target_task: {"f": 0.0}} best_epoch = -1 target_task = self.config.target_task logger.info('================Start Training================') for epoch in range(self.config.max_epoch): logger.info('Epoch: {}'.format(epoch)) # training step progress = tqdm.tqdm(total=batch_num, ncols=75, desc='Train {}'.format(epoch)) optimizer.zero_grad() cummulate_loss = 0. for batch_idx, batch in enumerate(DataLoader( train_set, batch_size=self.config.batch_size // self.config.accumulate_step, shuffle=True, drop_last=False, collate_fn=train_set.collate_fn)): loss = self.model(batch) loss = loss * (1 / self.config.accumulate_step) cummulate_loss += loss loss.backward() if (batch_idx + 1) % self.config.accumulate_step == 0: progress.update(1) torch.nn.utils.clip_grad_norm_( self.model.parameters(), self.config.grad_clipping) optimizer.step() schedule.step() optimizer.zero_grad() progress.close() logger.info({"average training loss": (cummulate_loss / batch_idx).data}) # dev set progress = tqdm.tqdm(total=dev_batch_num, ncols=75, desc='Dev {}'.format(epoch)) best_dev_role_model = False dev_gold_graphs, dev_pred_graphs, dev_tokens, dev_wnd_ids = [], [], [], [] for batch in DataLoader(dev_set, batch_size=self.config.eval_batch_size, shuffle=False, collate_fn=dev_set.collate_fn): progress.update(1) graphs = self.model.predict(batch, gold_tri=False) gold_graph = copy.deepcopy(batch.graphs) for graph in graphs: graph.clean(relation_directional=self.config.relation_directional, symmetric_relations=self.config.symmetric_relations) dev_gold_graphs.extend(gold_graph) dev_pred_graphs.extend(graphs) dev_tokens.extend(batch.tokens) dev_wnd_ids.extend(batch.wnd_ids) progress.close()
dev_scores = score_graphs(dev_gold_graphs, dev_pred_graphs, self.vocabs['event_type'])
7
2023-11-15 21:32:56+00:00
24k
ahayler/s4c
models/bts/trainer_overfit.py
[ { "identifier": "make_datasets", "path": "datasets/data_util.py", "snippet": "def make_datasets(config):\n type = config.get(\"type\", \"KITTI_Raw\")\n if type == \"KITTI_Odometry\":\n train_dataset = KittiOdometryDataset(\n base_path=config[\"data_path\"],\n frame_count=config.get(\"data_fc\", 1),\n target_image_size=config.get(\"image_size\", (128, 256)),\n return_stereo=config.get(\"data_stereo\", False),\n sequences=config.get(\"train_sequences\", (\"00\",)),\n custom_pose_path=config.get(\"custom_pose_path\", None),\n keyframe_offset=0 #-(config.get(\"data_fc\", 1) // 2)\n )\n test_dataset = KittiOdometryDataset(\n base_path=config[\"data_path\"],\n frame_count=config.get(\"data_fc\", 1),\n target_image_size=config.get(\"image_size\", (128, 256)),\n return_stereo=config.get(\"data_stereo\", False),\n sequences=config.get(\"val_sequences\", (\"00\",)),\n custom_pose_path=config.get(\"custom_pose_path\", None),\n keyframe_offset=0 #-(config.get(\"data_fc\", 1) // 2)\n )\n return train_dataset, test_dataset\n\n elif type == \"KITTI_Raw\":\n train_dataset = KittiRawDataset(\n data_path=config[\"data_path\"],\n pose_path=config[\"pose_path\"],\n split_path=os.path.join(config[\"split_path\"], \"train_files.txt\"),\n target_image_size=config.get(\"image_size\", (192, 640)),\n frame_count=config.get(\"data_fc\", 1),\n return_stereo=config.get(\"data_stereo\", False),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n dilation=config.get(\"dilation\", 1),\n color_aug=config.get(\"color_aug\", False)\n )\n test_dataset = KittiRawDataset(\n data_path=config[\"data_path\"],\n pose_path=config[\"pose_path\"],\n split_path=os.path.join(config[\"split_path\"], \"val_files.txt\"),\n target_image_size=config.get(\"image_size\", (192, 640)),\n frame_count=config.get(\"data_fc\", 1),\n return_stereo=config.get(\"data_stereo\", False),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n dilation=config.get(\"dilation\", 1),\n )\n return train_dataset, test_dataset\n\n elif type == \"KITTI_360\":\n if config.get(\"split_path\", None) is None:\n train_split_path = None\n test_split_path = None\n else:\n train_split_path = os.path.join(config[\"split_path\"], \"train_files.txt\")\n test_split_path = os.path.join(config[\"split_path\"], \"val_files.txt\")\n\n train_dataset = Kitti360Dataset(\n data_path=config[\"data_path\"],\n data_segmentation_path=config.get(\"data_segmentation_path\", None),\n pose_path=config[\"pose_path\"],\n split_path=train_split_path,\n target_image_size=tuple(config.get(\"image_size\", (192, 640))),\n frame_count=config.get(\"data_fc\", 3),\n return_stereo=config.get(\"data_stereo\", True),\n return_fisheye=config.get(\"data_fisheye\", True),\n return_3d_bboxes=config.get(\"data_3d_bboxes\", False),\n return_segmentation=config.get(\"data_segmentation\", False),\n segmentation_mode=config.get(\"segmentation_mode\", None),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n dilation=config.get(\"dilation\", 1),\n fisheye_rotation=config.get(\"fisheye_rotation\", 0),\n fisheye_offset=config.get(\"fisheye_offset\", 1),\n color_aug=config.get(\"color_aug\", False),\n is_preprocessed=config.get(\"is_preprocessed\", False),\n load_kitti_360_segmentation_gt=False,\n constrain_to_datapoints=config.get(\"constrain_to_datapoints\", False),\n additional_random_front_offset=config.get(\"additional_random_front_offset\", False)\n )\n test_dataset = Kitti360Dataset(\n data_path=config[\"data_path\"],\n data_segmentation_path=config.get(\"data_segmentation_path\", None),\n pose_path=config[\"pose_path\"],\n split_path=test_split_path,\n target_image_size=tuple(config.get(\"image_size\", (192, 640))),\n frame_count=config.get(\"data_fc\", 3),\n return_stereo=config.get(\"data_stereo\", True),\n return_fisheye=config.get(\"data_fisheye\", True),\n return_3d_bboxes=config.get(\"data_3d_bboxes\", False),\n return_segmentation=config.get(\"data_segmentation\", False),\n segmentation_mode=config.get(\"segmentation_mode\", None),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n fisheye_rotation=config.get(\"fisheye_rotation\", 0),\n fisheye_offset=config.get(\"fisheye_offset\", 1),\n dilation=config.get(\"dilation\", 1),\n is_preprocessed=config.get(\"is_preprocessed\", False),\n load_kitti_360_segmentation_gt=True,\n constrain_to_datapoints=config.get(\"constrain_to_datapoints\", False),\n additional_random_front_offset=config.get(\"additional_random_front_offset\", False)\n )\n return train_dataset, test_dataset\n\n elif type == \"RealEstate10k\":\n train_dataset = RealEstate10kDataset(\n data_path=config[\"data_path\"],\n split_path=None,\n target_image_size=config.get(\"image_size\", (256, 384)),\n frame_count=config.get(\"data_fc\", 2),\n keyframe_offset=0, #-(config.get(\"data_fc\", 1) // 2),\n dilation=config.get(\"dilation\", 10),\n color_aug=config.get(\"color_aug\", False)\n )\n test_dataset = RealEstate10kDataset(\n data_path=config[\"data_path\"],\n split_path=os.path.join(config[\"split_path\"], \"val_files.txt\"),\n target_image_size=config.get(\"image_size\", (256, 384)),\n frame_count=config.get(\"data_fc\", 2),\n keyframe_offset=0, #-(config.get(\"data_fc\", 1) // 2),\n dilation=config.get(\"dilation\", 10),\n color_aug=False\n )\n return train_dataset, test_dataset\n\n elif type == \"Waymo\":\n if config.get(\"split_path\", None) is None:\n train_split_path = None\n test_split_path = None\n else:\n train_split_path = os.path.join(config[\"split_path\"], \"train_files.txt\")\n test_split_path = os.path.join(config[\"split_path\"], \"val_files.txt\")\n\n train_dataset = WaymoDataset(\n data_path=config[\"data_path\"],\n mode=\"training\",\n split_path=train_split_path,\n target_image_size=tuple(config.get(\"image_size\", (320, 480))),\n frame_count=config.get(\"data_fc\", 2),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n return_45=config.get(\"return_45\", True),\n return_90=config.get(\"return_90\", True),\n offset_45=config.get(\"offset_45\", 5),\n offset_90=config.get(\"offset_90\", 10),\n dilation=config.get(\"dilation\", 1),\n color_aug=config.get(\"color_aug\", True),\n correct_exposure=config.get(\"correct_exposure\", True),\n )\n test_dataset = WaymoDataset(\n data_path=config[\"data_path\"],\n mode=\"validation\",\n split_path=test_split_path,\n target_image_size=tuple(config.get(\"image_size\", (320, 480))),\n frame_count=config.get(\"data_fc\", 2),\n keyframe_offset=config.get(\"keyframe_offset\", 0),\n return_45=config.get(\"return_45\", True),\n return_90=config.get(\"return_90\", True),\n offset_45=config.get(\"offset_45\", 5),\n offset_90=config.get(\"offset_90\", 10),\n dilation=config.get(\"dilation\", 1),\n color_aug=False,\n return_depth=True,\n correct_exposure=config.get(\"correct_exposure\", True),\n )\n return train_dataset, test_dataset\n\n else:\n raise NotImplementedError(f\"Unsupported dataset type: {type}\")" }, { "identifier": "make_scheduler", "path": "models/common/model/scheduler.py", "snippet": "def make_scheduler(config, optim):\n type = config.get(\"type\", \"fix\")\n if type == \"fix\":\n scheduler = FixLR(optim)\n return scheduler\n elif type == \"step\":\n scheduler = StepLR(\n optim,\n config[\"step_size\"],\n config[\"gamma\"]\n )\n return scheduler\n else:\n raise NotImplementedError(f\"Unknown learning rate scheduler type: {type}\")" }, { "identifier": "NeRFRenderer", "path": "models/common/render/nerf.py", "snippet": "class NeRFRenderer(torch.nn.Module):\n \"\"\"\n NeRF differentiable renderer\n :param n_coarse number of coarse (binned uniform) samples\n :param n_fine number of fine (importance) samples\n :param n_fine_depth number of expected depth samples\n :param noise_std noise to add to sigma. We do not use it\n :param depth_std noise for depth samples\n :param eval_batch_size ray batch size for evaluation\n :param white_bkgd if true, background color is white; else black\n :param lindisp if to use samples linear in disparity instead of distance\n :param sched ray sampling schedule. list containing 3 lists of equal length.\n sched[0] is list of iteration numbers,\n sched[1] is list of coarse sample numbers,\n sched[2] is list of fine sample numbers\n \"\"\"\n\n def __init__(\n self,\n n_coarse=128,\n n_fine=0,\n n_fine_depth=0,\n noise_std=0.0,\n depth_std=0.01,\n eval_batch_size=100000,\n white_bkgd=False,\n lindisp=False,\n sched=None, # ray sampling schedule for coarse and fine rays\n hard_alpha_cap=False\n ):\n super().__init__()\n self.n_coarse = n_coarse\n self.n_fine = n_fine\n self.n_fine_depth = n_fine_depth\n\n self.noise_std = noise_std\n self.depth_std = depth_std\n\n self.eval_batch_size = eval_batch_size\n self.white_bkgd = white_bkgd\n self.lindisp = lindisp\n if lindisp:\n print(\"Using linear displacement rays\")\n self.using_fine = n_fine > 0\n self.sched = sched\n if sched is not None and len(sched) == 0:\n self.sched = None\n self.register_buffer(\n \"iter_idx\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.register_buffer(\n \"last_sched\", torch.tensor(0, dtype=torch.long), persistent=True\n )\n self.hard_alpha_cap = hard_alpha_cap\n\n def sample_coarse(self, rays):\n \"\"\"\n Stratified sampling. Note this is different from original NeRF slightly.\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :return (B, Kc)\n \"\"\"\n device = rays.device\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n\n step = 1.0 / self.n_coarse\n B = rays.shape[0]\n z_steps = torch.linspace(0, 1 - step, self.n_coarse, device=device) # (Kc)\n z_steps = z_steps.unsqueeze(0).repeat(B, 1) # (B, Kc)\n z_steps += torch.rand_like(z_steps) * step\n if not self.lindisp: # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n return 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n # Use linear sampling in depth space\n return near * (1 - z_steps) + far * z_steps # (B, Kc)\n\n def sample_coarse_from_dist(self, rays, weights, z_samp):\n device = rays.device\n B = rays.shape[0]\n\n num_bins = weights.shape[-1]\n num_samples = self.n_coarse\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(B, num_samples, dtype=torch.float32, device=device) # (B, Kf)\n interval_ids = torch.searchsorted(cdf, u, right=True) - 1 # (B, Kf)\n interval_ids = torch.clamp(interval_ids, 0, num_samples-1)\n interval_interp = torch.rand_like(interval_ids, dtype=torch.float32)\n\n # z_samps describe the centers of the respective histogram bins. Therefore, we have to extend them to the left and right\n if self.lindisp:\n z_samp = 1 / z_samp\n\n centers = .5 * (z_samp[:, 1:] + z_samp[:, :-1])\n interval_borders = torch.cat((z_samp[:, :1], centers, z_samp[:, -1:]), dim=-1)\n\n left_border = torch.gather(interval_borders, dim=-1, index=interval_ids)\n right_border = torch.gather(interval_borders, dim=-1, index=interval_ids+1)\n\n z_samp_new = left_border * (1 - interval_interp) + right_border * interval_interp\n\n if self.lindisp:\n z_samp_new = 1 / z_samp_new\n\n assert not torch.any(torch.isnan(z_samp_new))\n\n return z_samp_new\n\n def sample_fine(self, rays, weights):\n \"\"\"min\n Weighted stratified (importance) sample\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param weights (B, Kc)\n :return (B, Kf-Kfd)\n \"\"\"\n device = rays.device\n B = rays.shape[0]\n\n weights = weights.detach() + 1e-5 # Prevent division by zero\n pdf = weights / torch.sum(weights, -1, keepdim=True) # (B, Kc)\n cdf = torch.cumsum(pdf, -1) # (B, Kc)\n cdf = torch.cat([torch.zeros_like(cdf[:, :1]), cdf], -1) # (B, Kc+1)\n\n u = torch.rand(\n B, self.n_fine - self.n_fine_depth, dtype=torch.float32, device=device\n ) # (B, Kf)\n inds = torch.searchsorted(cdf, u, right=True).float() - 1.0 # (B, Kf)\n inds = torch.clamp_min(inds, 0.0)\n\n z_steps = (inds + torch.rand_like(inds)) / self.n_coarse # (B, Kf)\n\n near, far = rays[:, -2:-1], rays[:, -1:] # (B, 1)\n if not self.lindisp: # Use linear sampling in depth space\n z_samp = near * (1 - z_steps) + far * z_steps # (B, Kf)\n else: # Use linear sampling in disparity space\n z_samp = 1 / (1 / near * (1 - z_steps) + 1 / far * z_steps) # (B, Kf)\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def sample_fine_depth(self, rays, depth):\n \"\"\"\n Sample around specified depth\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param depth (B)\n :return (B, Kfd)\n \"\"\"\n z_samp = depth.unsqueeze(1).repeat((1, self.n_fine_depth))\n z_samp += torch.randn_like(z_samp) * self.depth_std\n # Clamp does not support tensor bounds\n z_samp = torch.max(torch.min(z_samp, rays[:, -1:]), rays[:, -2:-1])\n\n assert not torch.any(torch.isnan(z_samp))\n\n return z_samp\n\n def composite(self, model, rays, z_samp, coarse=True, sb=0, predict_segmentation=False):\n \"\"\"\n Render RGB and depth for each ray using NeRF alpha-compositing formula,\n given sampled positions along each ray (see sample_*)\n :param model should return (B, (r, g, b, sigma)) when called with (B, (x, y, z))\n should also support 'coarse' boolean argument\n :param rays ray [origins (3), directions (3), near (1), far (1)] (B, 8)\n :param z_samp z positions sampled for each ray (B, K)\n :param coarse whether to evaluate using coarse NeRF\n :param predict_segmentation if true also predict the semantic distribution\n :param sb super-batch dimension; 0 = disable\n :return weights (B, K), rgb (B, 3), depth (B)\n \"\"\"\n with profiler.record_function(\"renderer_composite\"):\n B, K = z_samp.shape\n\n deltas = z_samp[:, 1:] - z_samp[:, :-1] # (B, K-1)\n delta_inf = 1e10 * torch.ones_like(deltas[:, :1]) # infty (B, 1)\n # delta_inf = rays[:, -1:] - z_samp[:, -1:]\n deltas = torch.cat([deltas, delta_inf], -1) # (B, K)\n\n # (B, K, 3)\n points = rays[:, None, :3] + z_samp.unsqueeze(2) * rays[:, None, 3:6]\n points = points.reshape(-1, 3) # (B*K, 3)\n\n use_viewdirs = hasattr(model, \"use_viewdirs\") and model.use_viewdirs\n\n rgbs_all, invalid_all, sigmas_all, segs_all = [], [], [], []\n if sb > 0:\n points = points.reshape(\n sb, -1, 3\n ) # (SB, B'*K, 3) B' is real ray batch size\n eval_batch_size = (self.eval_batch_size - 1) // sb + 1\n eval_batch_dim = 1\n else:\n eval_batch_size = self.eval_batch_size\n eval_batch_dim = 0\n\n split_points = torch.split(points, eval_batch_size, dim=eval_batch_dim)\n if use_viewdirs:\n dim1 = K\n viewdirs = rays[:, None, 3:6].expand(-1, dim1, -1) # (B, K, 3)\n if sb > 0:\n viewdirs = viewdirs.reshape(sb, -1, 3) # (SB, B'*K, 3)\n else:\n viewdirs = viewdirs.reshape(-1, 3) # (B*K, 3)\n split_viewdirs = torch.split(\n viewdirs, eval_batch_size, dim=eval_batch_dim\n )\n for pnts, dirs in zip(split_points, split_viewdirs):\n rgbs, invalid, sigmas = model(pnts, coarse=coarse, viewdirs=dirs)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n else:\n for pnts in split_points:\n if predict_segmentation:\n rgbs, invalid, sigmas, segs = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n segs_all.append(segs)\n else:\n rgbs, invalid, sigmas = model(pnts, coarse=coarse,\n predict_segmentation=predict_segmentation)\n rgbs_all.append(rgbs)\n invalid_all.append(invalid)\n sigmas_all.append(sigmas)\n points = None\n viewdirs = None\n # (B*K, 4) OR (SB, B'*K, 4)\n rgbs = torch.cat(rgbs_all, dim=eval_batch_dim)\n invalid = torch.cat(invalid_all, dim=eval_batch_dim)\n sigmas = torch.cat(sigmas_all, dim=eval_batch_dim)\n\n if predict_segmentation:\n segs = torch.cat(segs_all, dim=eval_batch_dim)\n segs = segs.reshape(B, K, -1) # (B, K, n_classes)\n\n rgbs = rgbs.reshape(B, K, -1) # (B, K, 4 or 5)\n invalid = invalid.reshape(B, K, -1)\n sigmas = sigmas.reshape(B, K)\n\n if self.training and self.noise_std > 0.0:\n sigmas = sigmas + torch.randn_like(sigmas) * self.noise_std\n\n alphas = 1 - torch.exp(-deltas.abs() * torch.relu(sigmas)) # (B, K) (delta should be positive anyways)\n\n if self.hard_alpha_cap:\n alphas[:, -1] = 1\n\n deltas = None\n sigmas = None\n alphas_shifted = torch.cat(\n [torch.ones_like(alphas[:, :1]), 1 - alphas + 1e-10], -1\n ) # (B, K+1) = [1, a1, a2, ...]\n T = torch.cumprod(alphas_shifted, -1) # (B)\n weights = alphas * T[:, :-1] # (B, K)\n # alphas = None\n alphas_shifted = None\n\n rgb_final = torch.sum(weights.unsqueeze(-1) * rgbs, -2) # (B, 3)\n depth_final = torch.sum(weights * z_samp, -1) # (B)\n\n\n\n if self.white_bkgd:\n # White background\n pix_alpha = weights.sum(dim=1) # (B), pixel alpha\n rgb_final = rgb_final + 1 - pix_alpha.unsqueeze(-1) # (B, 3)\n\n if predict_segmentation:\n segs_final = torch.sum(weights.unsqueeze(-1) * segs, dim=-2) # (B, n_classes)\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs,\n # segs,\n segs_final\n )\n else:\n return (\n weights,\n rgb_final,\n depth_final,\n alphas,\n invalid,\n z_samp,\n rgbs\n )\n\n def forward(\n self, model, rays, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, predict_segmentation=False, sample_from_dist=None):\n \"\"\"\n :model nerf model, should return (SB, B, (r, g, b, sigma))\n when called with (SB, B, (x, y, z)), for multi-object:\n SB = 'super-batch' = size of object batch,\n B = size of per-object ray batch.\n Should also support 'coarse' boolean argument for coarse NeRF.\n :param rays ray spec [origins (3), directions (3), near (1), far (1)] (SB, B, 8)\n :param want_weights if true, returns compositing weights (SB, B, K)\n :param predict_segmentation if true, return the segmentation class distribution for each pixel\n :return render dict\n \"\"\"\n with profiler.record_function(\"renderer_forward\"):\n if self.sched is not None and self.last_sched.item() > 0:\n self.n_coarse = self.sched[1][self.last_sched.item() - 1]\n self.n_fine = self.sched[2][self.last_sched.item() - 1]\n\n assert len(rays.shape) == 3\n superbatch_size = rays.shape[0]\n rays = rays.reshape(-1, 8) # (SB * B, 8)\n\n if sample_from_dist is None:\n z_coarse = self.sample_coarse(rays) # (B, Kc)\n else:\n prop_weights, prop_z_samp = sample_from_dist\n n_samples = prop_weights.shape[-1]\n prop_weights = prop_weights.reshape(-1, n_samples)\n prop_z_samp = prop_z_samp.reshape(-1, n_samples)\n z_coarse = self.sample_coarse_from_dist(rays, prop_weights, prop_z_samp)\n z_coarse, _ = torch.sort(z_coarse, dim=-1)\n\n coarse_composite = self.composite(\n model, rays, z_coarse, coarse=True, sb=superbatch_size, predict_segmentation=predict_segmentation\n )\n\n outputs = DotMap(\n coarse=self._format_outputs(\n coarse_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas,\n want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps, want_segmentation=predict_segmentation\n ),\n )\n\n if self.using_fine:\n all_samps = [z_coarse]\n if self.n_fine - self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine(rays, coarse_composite[0].detach())\n ) # (B, Kf - Kfd)\n if self.n_fine_depth > 0:\n all_samps.append(\n self.sample_fine_depth(rays, coarse_composite[2])\n ) # (B, Kfd)\n z_combine = torch.cat(all_samps, dim=-1) # (B, Kc + Kf)\n z_combine_sorted, argsort = torch.sort(z_combine, dim=-1)\n fine_composite = self.composite(\n model, rays, z_combine_sorted, coarse=False, sb=superbatch_size,\n )\n outputs.fine = self._format_outputs(\n fine_composite, superbatch_size, want_weights=want_weights, want_alphas=want_alphas, want_z_samps=want_z_samps, want_rgb_samps=want_rgb_samps\n )\n\n return outputs\n\n def _format_outputs(\n self, rendered_outputs, superbatch_size, want_weights=False, want_alphas=False, want_z_samps=False, want_rgb_samps=False, want_segmentation=False\n ):\n if want_segmentation:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps, segs_final = rendered_outputs\n else:\n weights, rgb_final, depth, alphas, invalid, z_samps, rgb_samps = rendered_outputs\n\n n_smps = weights.shape[-1]\n out_d_rgb = rgb_final.shape[-1]\n out_d_i = invalid.shape[-1]\n\n if superbatch_size > 0:\n rgb_final = rgb_final.reshape(superbatch_size, -1, out_d_rgb)\n depth = depth.reshape(superbatch_size, -1)\n weights = weights.reshape(superbatch_size, -1, n_smps)\n alphas = alphas.reshape(superbatch_size, -1, n_smps)\n invalid = invalid.reshape(superbatch_size, -1, n_smps, out_d_i)\n z_samps = z_samps.reshape(superbatch_size, -1, n_smps)\n rgb_samps = rgb_samps.reshape(superbatch_size, -1, n_smps, out_d_rgb)\n\n if want_segmentation:\n out_segs = segs_final.shape[-1]\n segs_final = segs_final.reshape(superbatch_size, -1, out_segs)\n\n ret_dict = DotMap(rgb=rgb_final, depth=depth, invalid=invalid)\n if want_weights:\n ret_dict.weights = weights\n if want_alphas:\n ret_dict.alphas = alphas\n if want_z_samps:\n ret_dict.z_samps = z_samps\n if want_rgb_samps:\n ret_dict.rgb_samps = rgb_samps\n if want_segmentation:\n ret_dict.segs = segs_final\n # ret_dict.segs_raw = segs_raw\n return ret_dict\n\n def sched_step(self, steps=1):\n \"\"\"\n Called each training iteration to update sample numbers\n according to schedule\n \"\"\"\n if self.sched is None:\n return\n self.iter_idx += steps\n while (\n self.last_sched.item() < len(self.sched[0])\n and self.iter_idx.item() >= self.sched[0][self.last_sched.item()]\n ):\n self.n_coarse = self.sched[1][self.last_sched.item()]\n self.n_fine = self.sched[2][self.last_sched.item()]\n print(\n \"INFO: NeRF sampling resolution changed on schedule ==> c\",\n self.n_coarse,\n \"f\",\n self.n_fine,\n )\n self.last_sched += 1\n\n @classmethod\n def from_conf(cls, conf, white_bkgd=False, eval_batch_size=100000):\n return cls(\n conf.get(\"n_coarse\", 128),\n conf.get(\"n_fine\", 0),\n n_fine_depth=conf.get(\"n_fine_depth\", 0),\n noise_std=conf.get(\"noise_std\", 0.0),\n depth_std=conf.get(\"depth_std\", 0.01),\n white_bkgd=conf.get(\"white_bkgd\", white_bkgd),\n lindisp=conf.get(\"lindisp\", True),\n eval_batch_size=conf.get(\"eval_batch_size\", eval_batch_size),\n sched=conf.get(\"sched\", None),\n hard_alpha_cap=conf.get(\"hard_alpha_cap\", False)\n )\n\n def bind_parallel(self, net, gpus=None, simple_output=False):\n \"\"\"\n Returns a wrapper module compatible with DataParallel.\n Specifically, it renders rays with this renderer\n but always using the given network instance.\n Specify a list of GPU ids in 'gpus' to apply DataParallel automatically.\n :param net A PixelNeRF network\n :param gpus list of GPU ids to parallize to. If length is 1,\n does not parallelize\n :param simple_output only returns rendered (rgb, depth) instead of the \n full render output map. Saves data tranfer cost.\n :return torch module\n \"\"\"\n wrapped = _RenderWrapper(net, self, simple_output=simple_output)\n if gpus is not None and len(gpus) > 1:\n print(\"Using multi-GPU\", gpus)\n wrapped = torch.nn.DataParallel(wrapped, gpus, dim=1)\n return wrapped" }, { "identifier": "ReconstructionLoss", "path": "models/bts/model/loss.py", "snippet": "class ReconstructionLoss:\n def __init__(self, config, use_automasking=False) -> None:\n super().__init__()\n self.criterion_str = config.get(\"criterion\", \"l2\")\n if self.criterion_str == \"l2\":\n self.rgb_coarse_crit = torch.nn.MSELoss(reduction=\"none\")\n self.rgb_fine_crit = torch.nn.MSELoss(reduction=\"none\")\n elif self.criterion_str == \"l1\":\n self.rgb_coarse_crit = torch.nn.L1Loss(reduction=\"none\")\n self.rgb_fine_crit = torch.nn.L1Loss(reduction=\"none\")\n elif self.criterion_str == \"l1+ssim\":\n self.rgb_coarse_crit = compute_errors_l1ssim\n self.rgb_fine_crit = compute_errors_l1ssim\n self.invalid_policy = config.get(\"invalid_policy\", \"strict\")\n assert self.invalid_policy in [\"strict\", \"weight_guided\", \"weight_guided_diverse\", None, \"none\"]\n self.ignore_invalid = self.invalid_policy is not None and self.invalid_policy != \"none\"\n self.lambda_coarse = config.get(\"lambda_coarse\", 1)\n self.lambda_fine = config.get(\"lambda_fine\", 1)\n self.lambda_segmentation = config.get(\"lambda_segmentation\", 1)\n self.segmentation_class_weights = config.get(\"segmentation_class_weights\", None)\n\n if self.segmentation_class_weights is not None:\n self.segmentation_class_weights = torch.tensor(list(config.get(\"segmentation_class_weights\", None).values()))\n\n self.use_automasking = use_automasking\n\n self.lambda_entropy = config.get(\"lambda_entropy\", 0)\n self.lambda_density_entropy = config.get(\"lambda_density_entropy\", 0)\n self.lambda_depth_reg = config.get(\"lambda_depth_reg\", 0)\n self.lambda_alpha_reg = config.get(\"lambda_alpha_reg\", 0)\n self.lambda_surfaceness_reg = config.get(\"lambda_surfaceness_reg\", 0)\n self.lambda_edge_aware_smoothness = config.get(\"lambda_edge_aware_smoothness\", 0)\n self.lambda_depth_smoothness = config.get(\"lambda_depth_smoothness\", 0)\n\n self.median_thresholding = config.get(\"median_thresholding\", False)\n\n self.alpha_reg_reduction = config.get(\"alpha_reg_reduction\", \"ray\")\n self.alpha_reg_fraction = config.get(\"alpha_reg_fraction\", 1/8)\n\n if self.alpha_reg_reduction not in (\"ray\", \"slice\"):\n raise ValueError(f\"Unknown reduction for alpha regularization: {self.alpha_reg_reduction}\")\n\n @staticmethod\n def get_loss_metric_names():\n return [\"loss\", \"loss_rgb_coarse\", \"loss_rgb_fine\", \"loss_ray_entropy\", \"loss_depth_reg\"]\n\n def __call__(self, data):\n with profiler.record_function(\"loss_computation\"):\n n_scales = len(data[\"coarse\"])\n\n loss_dict = {}\n\n loss_coarse_all = 0\n loss_fine_all = 0\n loss_segmentation = 0\n loss = 0\n\n coarse_0 = data[\"coarse\"][0]\n fine_0 = data[\"fine\"][0]\n segmentation_0 = data[\"segmentation\"][0]\n invalid_coarse = coarse_0[\"invalid\"]\n invalid_fine = fine_0[\"invalid\"]\n invalid_segmentation = segmentation_0[\"invalid\"]\n\n weights_coarse = coarse_0[\"weights\"]\n weights_fine = fine_0[\"weights\"]\n weights_segmentation = segmentation_0[\"weights\"]\n\n if self.invalid_policy == \"strict\":\n # Consider all rays invalid where there is at least one invalidly sampled color\n invalid_coarse = torch.all(torch.any(invalid_coarse > .5, dim=-2), dim=-1).unsqueeze(-1)\n invalid_fine = torch.all(torch.any(invalid_fine > .5, dim=-2), dim=-1).unsqueeze(-1)\n invalid_segmentation = torch.all(torch.any(invalid_segmentation > .5, dim=-2), dim=-1).unsqueeze(-1)\n elif self.invalid_policy == \"weight_guided\":\n # Integrate invalid indicator function over the weights. It is invalid if > 90% of the mass is invalid. (Arbitrary threshold)\n invalid_coarse = torch.all((invalid_coarse.to(torch.float32) * weights_coarse.unsqueeze(-1)).sum(-2) > .9, dim=-1, keepdim=True)\n invalid_fine = torch.all((invalid_fine.to(torch.float32) * weights_fine.unsqueeze(-1)).sum(-2) > .9, dim=-1, keepdim=True)\n invalid_segmentation = torch.all((invalid_segmentation.to(torch.float32) * weights_segmentation.unsqueeze(-1)).sum(-2) > .9,\n dim=-1, keepdim=True)\n elif self.invalid_policy == \"weight_guided_diverse\":\n # We now also consider, whether there is enough variance in the ray colors to give a meaningful supervision signal.\n rgb_samps_c = coarse_0[\"rgb_samps\"]\n rgb_samps_f = fine_0[\"rgb_samps\"]\n ray_std_c = torch.std(rgb_samps_c, dim=-3).mean(-1)\n ray_std_f = torch.std(rgb_samps_f, dim=-3).mean(-1)\n\n # Integrate invalid indicator function over the weights. It is invalid if > 90% of the mass is invalid. (Arbitrary threshold)\n invalid_coarse = torch.all(((invalid_coarse.to(torch.float32) * weights_coarse.unsqueeze(-1)).sum(-2) > .9) | (ray_std_c < 0.01), dim=-1, keepdim=True)\n invalid_fine = torch.all(((invalid_fine.to(torch.float32) * weights_fine.unsqueeze(-1)).sum(-2) > .9) | (ray_std_f < 0.01), dim=-1, keepdim=True)\n\n # for now we just do the weight guided invalids for the segmentation\n invalid_segmentation = torch.all(\n (invalid_segmentation.to(torch.float32) * weights_segmentation.unsqueeze(-1)).sum(-2) > .9,\n dim=-1, keepdim=True)\n elif self.invalid_policy == \"none\":\n invalid_coarse = torch.zeros_like(torch.all(torch.any(invalid_coarse > .5, dim=-2), dim=-1).unsqueeze(-1), dtype=torch.bool)\n invalid_fine = torch.zeros_like(torch.all(torch.any(invalid_fine > .5, dim=-2), dim=-1).unsqueeze(-1), dtype=torch.bool)\n invalid_segmentation = torch.zeros_like(torch.all(torch.any(invalid_segmentation > .5, dim=-2), dim=-1).unsqueeze(-1),\n dtype=torch.bool)\n else:\n raise NotImplementedError\n\n loss_depth_reg = torch.tensor(0.0, device=invalid_fine.device)\n loss_alpha_reg = torch.tensor(0.0, device=invalid_fine.device)\n loss_surfaceness_reg = torch.tensor(0.0, device=invalid_fine.device)\n loss_eas = torch.tensor(0.0, device=invalid_fine.device)\n loss_depth_smoothness = torch.tensor(0.0, device=invalid_fine.device)\n\n for scale in range(n_scales):\n coarse = data[\"coarse\"][scale]\n fine = data[\"fine\"][scale]\n segmentation = data[\"segmentation\"][scale]\n\n rgb_coarse = coarse[\"rgb\"]\n rgb_fine = fine[\"rgb\"]\n rgb_gt = data[\"rgb_gt\"]\n segmentation_gt = data[\"segmentation_gt\"].permute(0, 4, 1, 2, 3).squeeze(1) #(batch_size, n_patches, h, w)\n bs, n_patch, ph, pw, n_classes = segmentation[\"segs\"].shape\n segmentation_gt = segmentation_gt.view(-1, ph, pw)\n\n # do cross entropy loss\n self.segmentation_class_weights = self.segmentation_class_weights.to(segmentation_gt.device).float()\n cp_loss_fn = torch.nn.NLLLoss(weight=self.segmentation_class_weights)\n # log_segmentation = torch.log(segmentation[\"segs\"] + 1e-5).permute(0, 4, 1, 2, 3) #(batch_size, n_classes, n_patches, h, w)\n patch_to_image = data[\"patch_to_image\"]\n front_indices = patch_to_image <= 4\n side_indices = patch_to_image > 4\n\n log_segmentation = torch.log(segmentation[\"segs\"] + 1e-5).reshape(-1, ph, pw, n_classes).permute(0, 3, 1, 2)\n\n # Account for the invalids\n # TODO: Adjust the mean so that we don't have a low loss just because we have a lot of invalids\n invalid_segmentation = invalid_segmentation.squeeze(-1).to(torch.float32).reshape(-1, ph, pw)\n\n cp_loss = cp_loss_fn(\n ((1 - invalid_segmentation.contiguous()).unsqueeze(1) * log_segmentation.contiguous()).float(),\n ((1 - invalid_segmentation.contiguous()) * segmentation_gt.contiguous()).long())\n\n loss_segmentation += self.lambda_segmentation * cp_loss.item()\n\n loss += self.lambda_segmentation * cp_loss\n\n if self.use_automasking:\n thresh_gt = rgb_gt[..., -1:]\n rgb_coarse = rgb_coarse[..., :-1]\n rgb_fine = rgb_fine[..., :-1]\n rgb_gt = rgb_gt[..., :-1]\n\n rgb_coarse = rgb_coarse\n rgb_fine = rgb_fine\n rgb_gt = rgb_gt.unsqueeze(-2)\n\n using_fine = len(fine) > 0\n\n b, pc, h, w, nv, c = rgb_coarse.shape\n\n # Take minimum across all reconstructed views\n rgb_loss = self.rgb_coarse_crit(rgb_coarse, rgb_gt)\n rgb_loss = rgb_loss.amin(-2)\n\n if self.use_automasking:\n rgb_loss = torch.min(rgb_loss, thresh_gt)\n\n if self.ignore_invalid:\n rgb_loss = rgb_loss * (1 - invalid_coarse.to(torch.float32))\n\n if self.median_thresholding:\n threshold = torch.median(rgb_loss.view(b, -1), dim=-1)[0].view(-1, 1, 1, 1, 1)\n rgb_loss = rgb_loss[rgb_loss <= threshold]\n\n rgb_loss = rgb_loss.mean()\n\n loss_coarse_all += rgb_loss.item() * self.lambda_coarse\n if using_fine:\n fine_loss = self.rgb_fine_crit(rgb_fine, rgb_gt)\n fine_loss = fine_loss.amin(-2)\n\n if self.use_automasking:\n fine_loss = torch.min(fine_loss, thresh_gt)\n\n if self.ignore_invalid:\n fine_loss = fine_loss * (1 - invalid_fine.to(torch.float32))\n\n if self.median_thresholding:\n threshold = torch.median(fine_loss.view(b, -1), dim=-1)[0].view(-1, 1, 1, 1, 1)\n fine_loss = fine_loss[fine_loss <= threshold]\n\n fine_loss = fine_loss.mean()\n rgb_loss = rgb_loss * self.lambda_coarse + fine_loss * self.lambda_fine\n loss_fine_all += fine_loss.item() * self.lambda_fine\n else:\n loss_dict[\"loss_rgb_fine\"] = 0\n\n loss += rgb_loss\n\n if self.lambda_depth_reg > 0:\n depths = coarse[\"depth\"]\n diffs_x = depths[:, :, 1:, :] - depths[:, :, :-1, :]\n diffs_y = depths[:, :, :, 1:] - depths[:, :, :, :-1]\n loss_depth_reg_s = (diffs_x ** 2).mean() + (diffs_y ** 2).mean()\n loss_depth_reg += loss_depth_reg_s # * self.lambda_depth_reg\n loss += loss_depth_reg_s * self.lambda_depth_reg\n\n if self.lambda_alpha_reg > 0:\n alphas = coarse[\"alphas\"]\n n_smps = alphas.shape[-1]\n\n # alphas = alphas[..., :-1].sum(-1)\n # loss_alpha_reg_s = (alphas - (n_smps * self.alpha_reg_fraction)).clamp_min(0)\n # if self.ignore_invalid:\n # loss_alpha_reg_s = loss_alpha_reg_s * (1 - invalid_coarse.squeeze(-1).to(torch.float32))\n\n alpha_sum = alphas[..., :-1].sum(-1)\n min_cap = torch.ones_like(alpha_sum) * (n_smps * self.alpha_reg_fraction)\n\n if self.ignore_invalid:\n alpha_sum = alpha_sum * (1 - invalid_coarse.squeeze(-1).to(torch.float32))\n min_cap = min_cap * (1 - invalid_coarse.squeeze(-1).to(torch.float32))\n\n if self.alpha_reg_reduction == \"ray\":\n loss_alpha_reg_s = (alpha_sum - min_cap).clamp_min(0)\n elif self.alpha_reg_reduction == \"slice\":\n loss_alpha_reg_s = (alpha_sum.sum(dim=-1) - min_cap.sum(dim=-1)).clamp_min(0) / alpha_sum.shape[-1]\n\n # alphas = alphas[..., :-n_smps//16]\n # alpha_deltas = alphas[..., 1:] - alphas[..., :-1]\n # The sum of deltas should be zero. This means that the number of peaks (ie objects) is not limited, but there needs to be free space afterwards again.\n # We don't consider the last 1/16 samples. They are likely background.\n # loss_alpha_reg_s = alpha_deltas.sum(-1).clamp_min(0)\n\n loss_alpha_reg_s = loss_alpha_reg_s.mean()\n\n loss_alpha_reg += loss_alpha_reg_s\n loss += loss_alpha_reg_s * self.lambda_alpha_reg\n\n if self.lambda_surfaceness_reg > 0:\n alphas = coarse[\"alphas\"]\n n_smps = alphas.shape[-1]\n\n p = -torch.log(torch.exp(-alphas.abs()) + torch.exp(-(1 - alphas).abs()))\n p = p.mean(-1)\n\n if self.ignore_invalid:\n p = p * (1 - invalid_coarse.squeeze(-1).to(torch.float32))\n\n loss_surfaceness_reg_s = p.mean()\n\n loss_surfaceness_reg += loss_surfaceness_reg_s\n loss += loss_surfaceness_reg_s * self.lambda_surfaceness_reg\n\n if self.lambda_edge_aware_smoothness > 0:\n gt_img = rgb_gt\n depths = coarse[\"depth\"]\n loss_eas_s = edge_aware_smoothness(gt_img, depths)\n\n if self.ignore_invalid:\n invalid_scale = torch.ceil(F.interpolate(invalid_coarse.squeeze(-1).to(torch.float32), size=(depths.shape[-2:])))\n loss_eas_s = loss_eas_s * (1 - invalid_scale)\n\n loss_eas_s = loss_eas_s.mean()\n\n loss_eas += loss_eas_s\n loss += loss_eas_s * self.lambda_edge_aware_smoothness / (2 ** scale)\n\n if self.lambda_depth_smoothness > 0:\n depths = coarse[\"depth\"]\n loss_depth_smoothness_s = ((depths[..., :-1, :] - depths[..., 1:, :]) ** 2).mean() + ((depths[..., :, :-1] - depths[..., :, 1:]) ** 2).mean()\n\n loss_depth_smoothness += loss_depth_smoothness_s\n loss += loss_depth_smoothness_s * self.lambda_depth_smoothness\n\n\n loss = loss / n_scales\n\n loss_ray_entropy = torch.tensor(0.0, device=loss.device)\n if self.lambda_entropy > 0:\n alphas = coarse_0[\"alphas\"]\n alphas = alphas + 1e-5\n\n ray_density = alphas / alphas.sum(dim=-1, keepdim=True)\n ray_entropy = -(ray_density * torch.log(ray_density)).sum(-1) / (math.log2(alphas.shape[-1]))\n ray_entropy = ray_entropy * (1 - invalid_coarse.squeeze(-1).to(torch.float32))\n loss_ray_entropy = ray_entropy.mean()\n\n loss = loss + loss_ray_entropy * self.lambda_entropy\n\n # add density entropy loss\n loss_density_entropy = torch.tensor(0.0, device=loss.device)\n\n if self.lambda_density_entropy > 0:\n alphas = coarse_0[\"alphas\"]\n alphas = alphas + 1e-5\n density_entropy = (1 - alphas)*alphas\n loss_density_entropy = torch.mean(density_entropy) * self.lambda_density_entropy\n\n loss = loss + loss_density_entropy\n\n loss_dict[\"loss_rgb_coarse\"] = loss_coarse_all\n loss_dict[\"loss_rgb_fine\"] = loss_fine_all\n loss_dict[\"loss_segmentation\"] = loss_segmentation\n loss_dict[\"loss_ray_entropy\"] = loss_ray_entropy.item()\n loss_dict[\"loss_density_entropy\"] = loss_density_entropy.item()\n loss_dict[\"loss_depth_reg\"] = loss_depth_reg.item()\n loss_dict[\"loss_alpha_reg\"] = loss_alpha_reg.item()\n loss_dict[\"loss_eas\"] = loss_eas.item()\n loss_dict[\"loss_depth_smoothness\"] = loss_depth_smoothness.item()\n loss_dict[\"loss_invalid_ratio\"] = invalid_coarse.float().mean().item()\n loss_dict[\"loss\"] = loss.item()\n\n return loss, loss_dict" }, { "identifier": "get_metrics", "path": "models/bts/trainer.py", "snippet": "class BTSWrapper(nn.Module):\n def __init__(self, renderer, config, eval_nvs=False) -> None:\n def get_loss_metric_names():\n def forward(self, data):\n def compute_segmentation_metrics(self, data):\n def compute_depth_metrics(self, data):\n def compute_nvs_metrics(self, data):\ndef training(local_rank, config):\ndef get_dataflow(config, logger=None):\ndef get_metrics(config, device):\ndef initialize(config: dict, logger=None):\ndef visualize(engine: Engine, logger: TensorboardLogger, step: int, tag: str):" }, { "identifier": "render_profile", "path": "scripts/inference_setup.py", "snippet": "def render_profile(net, cam_incl_adjust):\n \"\"\"Note: For this to work you have to encode the image with the net first!!!\"\"\"\n q_pts = get_pts(OUT_RES.X_RANGE, OUT_RES.Y_RANGE, OUT_RES.Z_RANGE, OUT_RES.P_RES_ZX[1], OUT_RES.P_RES_Y, OUT_RES.P_RES_ZX[0], cam_incl_adjust=cam_incl_adjust)\n q_pts = q_pts.to(device).view(1, -1, 3)\n\n batch_size = 50000\n if q_pts.shape[1] > batch_size:\n sigmas = []\n invalid = []\n l = q_pts.shape[1]\n for i in range(math.ceil(l / batch_size)):\n f = i * batch_size\n t = min((i + 1) * batch_size, l)\n q_pts_ = q_pts[:, f:t, :]\n _, invalid_, sigmas_ = net.forward(q_pts_)\n sigmas.append(sigmas_)\n invalid.append(invalid_)\n sigmas = torch.cat(sigmas, dim=1)\n invalid = torch.cat(invalid, dim=1)\n else:\n _, invalid, sigmas = net.forward(q_pts)\n\n sigmas[torch.any(invalid, dim=-1)] = 1\n alphas = sigmas\n\n alphas = alphas.reshape(OUT_RES.P_RES_Y, *OUT_RES.P_RES_ZX)\n\n alphas_sum = torch.cumsum(alphas, dim=0)\n profile = (alphas_sum <= 8).float().sum(dim=0) / alphas.shape[0]\n return profile" }, { "identifier": "map_fn", "path": "utils/array_operations.py", "snippet": "def map_fn(batch, fn):\ndef to(data, device, non_blocking=True):\ndef set_requires_grad(nets, requires_grad=False):\ndef mask_mean(t: torch.Tensor, m: torch.Tensor, dim=None, keepdim=False):\ndef apply_crop(array, crop):\ndef shrink_mask(mask, shrink=3):\ndef get_mask(size, border=5, device=None):\ndef get_grid(H, W, normalize=True):\ndef detach(t):" }, { "identifier": "base_training", "path": "utils/base_trainer.py", "snippet": "def base_training(local_rank, config, get_dataflow, initialize, get_metrics, visualize):\n\n # copy the segmentation mode to the data and model_conf part of the config\n config['data']['segmentation_mode'] = config.get(\"segmentation_mode\", None)\n config['model_conf']['segmentation_mode'] = config.get(\"segmentation_mode\", None)\n\n rank = idist.get_rank()\n manual_seed(config[\"seed\"] + rank)\n device = idist.device()\n\n logger = setup_logger(name=config[\"name\"])\n\n log_basic_info(logger, config)\n\n output_path = config[\"output_path\"]\n if rank == 0:\n if config[\"stop_iteration\"] is None:\n now = datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n else:\n now = f\"stop-on-{config['stop_iteration']}\"\n\n folder_name = f\"{config['name']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}\"\n output_path = Path(output_path) / folder_name\n if not output_path.exists():\n output_path.mkdir(parents=True)\n config[\"output_path\"] = output_path.as_posix()\n logger.info(f\"Output path: {config['output_path']}\")\n\n if \"cuda\" in device.type:\n config[\"cuda device name\"] = torch.cuda.get_device_name(local_rank)\n\n # Setup dataflow, model, optimizer, criterion\n loaders = get_dataflow(config, logger)\n if len(loaders) == 2:\n train_loader, test_loader = loaders\n vis_loader = None\n else:\n train_loader, test_loader, vis_loader = loaders\n\n if hasattr(train_loader, \"dataset\"):\n logger.info(f\"Dataset length: Train: {len(train_loader.dataset)}, Test: {len(test_loader.dataset)}\")\n\n config[\"num_iters_per_epoch\"] = len(train_loader)\n model, optimizer, criterion, lr_scheduler = initialize(config, logger)\n\n logger.info(f\"Model parameters: {sum(p.numel() for p in model.parameters())}\")\n\n # Let's now setup evaluator engine to perform model's validation and compute metrics\n metrics = get_metrics(config, device)\n metrics_loss = {k: MeanMetric((lambda y: lambda x: x[\"loss_dict\"][y])(k)) for k in criterion.get_loss_metric_names()}\n\n loss_during_validation = config.get(\"loss_during_validation\", True)\n if loss_during_validation:\n eval_metrics = {**metrics, **metrics_loss}\n else:\n eval_metrics = metrics\n\n # Create trainer for current task\n trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler if hasattr(train_loader, \"sampler\") else None, config, logger, metrics={})\n\n # We define two evaluators as they wont have exactly similar roles:\n # - `evaluator` will save the best model based on validation score\n evaluator = create_evaluator(model, metrics=eval_metrics, criterion=criterion if loss_during_validation else None, config=config)\n\n if vis_loader is not None:\n visualizer = create_evaluator(model, metrics=eval_metrics, criterion=criterion if loss_during_validation else None, config=config)\n else:\n visualizer = None\n\n def run_validation(engine):\n epoch = trainer.state.epoch\n state = evaluator.run(test_loader)\n log_metrics(logger, epoch, state.times[\"COMPLETED\"], \"Test\", state.metrics)\n\n def run_visualization(engine):\n epoch = trainer.state.epoch\n state = visualizer.run(vis_loader)\n log_metrics(logger, epoch, state.times[\"COMPLETED\"], \"Vis\", state.metrics)\n\n eval_use_iters = config.get(\"eval_use_iters\", False)\n vis_use_iters = config.get(\"vis_use_iters\", False)\n\n if not eval_use_iters:\n trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config[\"validate_every\"]) | Events.COMPLETED, run_validation)\n else:\n trainer.add_event_handler(Events.ITERATION_COMPLETED(every=config[\"validate_every\"]) | Events.COMPLETED, run_validation)\n\n if visualizer:\n if not vis_use_iters:\n trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config[\"visualize_every\"]) | Events.COMPLETED, run_visualization)\n else:\n trainer.add_event_handler(Events.ITERATION_COMPLETED(every=config[\"visualize_every\"]) | Events.COMPLETED, run_visualization)\n\n if rank == 0:\n # Setup TensorBoard logging on trainer and evaluators. Logged values are:\n # - Training metrics, e.g. running average loss values\n # - Learning rate\n # - Evaluation train/test metrics\n\n trainer_timer = IterationTimeHandler()\n trainer_timer_data = DataloaderTimeHandler()\n trainer.add_event_handler(Events.ITERATION_STARTED, trainer_timer.start_iteration)\n trainer.add_event_handler(Events.ITERATION_COMPLETED, trainer_timer.end_iteration)\n trainer.add_event_handler(Events.GET_BATCH_STARTED, trainer_timer_data.start_get_batch)\n trainer.add_event_handler(Events.GET_BATCH_COMPLETED, trainer_timer_data.end_get_batch)\n\n evaluator_timer = IterationTimeHandler()\n evaluator_timer_data = DataloaderTimeHandler()\n evaluator.add_event_handler(Events.ITERATION_STARTED, evaluator_timer.start_iteration)\n evaluator.add_event_handler(Events.ITERATION_COMPLETED, evaluator_timer.end_iteration)\n evaluator.add_event_handler(Events.GET_BATCH_STARTED, evaluator_timer_data.start_get_batch)\n evaluator.add_event_handler(Events.GET_BATCH_COMPLETED, evaluator_timer_data.end_get_batch)\n\n if visualizer:\n visualizer_timer = IterationTimeHandler()\n visualizer_timer_data = DataloaderTimeHandler()\n visualizer.add_event_handler(Events.ITERATION_STARTED, visualizer_timer.start_iteration)\n visualizer.add_event_handler(Events.ITERATION_COMPLETED, visualizer_timer.end_iteration)\n visualizer.add_event_handler(Events.GET_BATCH_STARTED, visualizer_timer_data.start_get_batch)\n visualizer.add_event_handler(Events.GET_BATCH_COMPLETED, visualizer_timer_data.end_get_batch)\n\n gst = lambda engine, event_name: trainer.state.epoch\n gst_it_epoch = lambda engine, event_name: (trainer.state.epoch - 1) * engine.state.epoch_length + engine.state.iteration - 1\n eval_gst_it_iters = lambda engine, event_name: (((trainer.state.epoch - 1) * trainer.state.epoch_length + trainer.state.iteration) // config[\"validate_every\"]) * engine.state.epoch_length + engine.state.iteration - 1\n vis_gst_it_iters = lambda engine, event_name: (((trainer.state.epoch - 1) * trainer.state.epoch_length + trainer.state.iteration) // config[\"visualize_every\"]) * engine.state.epoch_length + engine.state.iteration - 1\n\n eval_gst_ep_iters = lambda engine, event_name: (((trainer.state.epoch - 1) * trainer.state.epoch_length + trainer.state.iteration) // config[\"validate_every\"])\n vis_gst_ep_iters = lambda engine, event_name: (((trainer.state.epoch - 1) * trainer.state.epoch_length + trainer.state.iteration) // config[\"visualize_every\"])\n\n eval_gst_it = eval_gst_it_iters if eval_use_iters else gst_it_epoch\n vis_gst_it = vis_gst_it_iters if vis_use_iters else gst_it_epoch\n\n eval_gst_ep = eval_gst_ep_iters if eval_use_iters else gst\n vis_gst_ep = vis_gst_ep_iters if vis_use_iters else gst\n\n tb_logger = TensorboardLogger(log_dir=output_path)\n tb_logger.attach(trainer, MetricLoggingHandler(\"train\", optimizer), Events.ITERATION_COMPLETED(every=config.get(\"log_every_iters\", 1)))\n tb_logger.attach(evaluator, MetricLoggingHandler(\"val\", log_loss=False, global_step_transform=eval_gst_ep), Events.EPOCH_COMPLETED)\n if visualizer:\n tb_logger.attach(visualizer, MetricLoggingHandler(\"vis\", log_loss=False, global_step_transform=vis_gst_ep), Events.EPOCH_COMPLETED)\n\n # Plot config to tensorboard\n config_json = json.dumps(OmegaConf.to_container(config, resolve=True), indent=2)\n config_json = \"\".join(\"\\t\" + line for line in config_json.splitlines(True))\n tb_logger.writer.add_text(\"config\", text_string=config_json, global_step=0)\n\n if visualize is not None:\n train_log_interval = config.get(\"log_tb_train_every_iters\", -1)\n val_log_interval = config.get(\"log_tb_val_every_iters\", train_log_interval)\n vis_log_interval = config.get(\"log_tb_vis_every_iters\", 1)\n\n if train_log_interval > 0:\n tb_logger.attach(\n trainer,\n VisualizationHandler(tag=\"training\", visualizer=visualize),\n Events.ITERATION_COMPLETED(every=train_log_interval))\n if val_log_interval > 0:\n tb_logger.attach(\n evaluator,\n VisualizationHandler(tag=\"val\", visualizer=visualize, global_step_transform=eval_gst_it),\n Events.ITERATION_COMPLETED(every=val_log_interval))\n if visualizer and vis_log_interval > 0:\n tb_logger.attach(\n visualizer,\n VisualizationHandler(tag=\"vis\", visualizer=visualize, global_step_transform=vis_gst_it),\n Events.ITERATION_COMPLETED(every=vis_log_interval))\n\n if \"save_best\" in config:\n # Store 2 best models by validation accuracy starting from num_epochs / 2:\n save_best_config = config[\"save_best\"]\n metric_name = save_best_config[\"metric\"]\n sign = save_best_config.get(\"sign\", 1.0)\n\n best_model_handler = Checkpoint(\n {\"model\": model},\n get_save_handler(config),\n filename_prefix=\"best\",\n n_saved=2,\n global_step_transform=global_step_from_engine(trainer),\n score_name=metric_name,\n score_function=Checkpoint.get_default_score_fn(metric_name, score_sign=sign),\n )\n evaluator.add_event_handler(\n Events.COMPLETED(lambda *_: trainer.state.epoch > config[\"num_epochs\"] // 2), best_model_handler\n )\n\n # In order to check training resuming we can stop training on a given iteration\n if config[\"stop_iteration\"] is not None:\n\n @trainer.on(Events.ITERATION_STARTED(once=config[\"stop_iteration\"]))\n def _():\n logger.info(f\"Stop training on {trainer.state.iteration} iteration\")\n trainer.terminate()\n\n try:\n trainer.run(train_loader, max_epochs=config[\"num_epochs\"])\n except Exception as e:\n logger.exception(\"\")\n raise e\n\n if rank == 0:\n tb_logger.close()" }, { "identifier": "color_tensor", "path": "utils/plotting.py", "snippet": "def color_tensor(tensor: torch.Tensor, cmap, norm=False):\n if norm:\n tensor = (tensor - tensor.min()) / (tensor.max() - tensor.min())\n map = plt.cm.get_cmap(cmap)\n tensor = torch.tensor(map(tensor.cpu().numpy()), device=tensor.device)[..., :3]\n return tensor" }, { "identifier": "color_segmentation_tensor", "path": "utils/plotting.py", "snippet": "def color_segmentation_tensor(segmentation, n_classes=21):\n \"\"\"\n Transform a tensor of class indicies ranging from 0 to n_classes-1 into a rgb tensor\n (add another dimension to the end of size 3).\n \"\"\"\n # https://matplotlib.org/stable/gallery/color/colormap_reference.html\n palette = plt.cm.plasma(np.linspace(0, 1, n_classes))\n palette = palette[:, :3] # RGBA -> RGB\n\n segmentation = palette[segmentation.view(-1).cpu()].reshape(*segmentation.shape, 3)\n\n return segmentation" } ]
import math import ignite.distributed as idist import torch import numpy as np from copy import copy from typing import Optional, Union, Iterable, Sequence from ignite.contrib.handlers import TensorboardLogger from ignite.engine import Engine from matplotlib import pyplot as plt from torch import optim, nn from torch.utils.data import DataLoader, Dataset, Sampler from torch.utils.data.dataloader import T_co, _collate_fn_t, _worker_init_fn_t from torchvision.utils import make_grid from datasets.data_util import make_datasets from models.common.model.scheduler import make_scheduler from models.common.render import NeRFRenderer from models.bts.model.loss import ReconstructionLoss from models.bts.trainer import get_metrics, BTSWrapper, BTSNet from scripts.inference_setup import render_profile from utils.array_operations import map_fn, unsqueezer, to from utils.base_trainer import base_training from utils.plotting import color_tensor, color_segmentation_tensor
15,653
class EncoderDummy(nn.Module): def __init__(self, size, feat_dim, num_views=1) -> None: super().__init__() self.feats = nn.Parameter(torch.randn(num_views, feat_dim, *size)) self.latent_size = feat_dim def forward(self, x): n = x.shape[0] return [self.feats.expand(n, -1, -1, -1)] class DataloaderDummy(DataLoader): def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None, batch_sampler: Union[Sampler[Sequence], Iterable[Sequence], None] = None, num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None, pin_memory: bool = False, drop_last: bool = False, timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None, multiprocessing_context=None, generator=None, *, prefetch_factor: int = 2, persistent_workers: bool = False, pin_memory_device: str = ""): super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn, multiprocessing_context, generator, prefetch_factor=prefetch_factor, persistent_workers=persistent_workers, pin_memory_device=pin_memory_device) self.element = to(map_fn(map_fn(dataset.__getitem__(0), torch.tensor), unsqueezer), "cuda:0") def _get_iterator(self): return iter([self.element]) def __iter__(self): return super().__iter__() def __len__(self) -> int: return 1
class EncoderDummy(nn.Module): def __init__(self, size, feat_dim, num_views=1) -> None: super().__init__() self.feats = nn.Parameter(torch.randn(num_views, feat_dim, *size)) self.latent_size = feat_dim def forward(self, x): n = x.shape[0] return [self.feats.expand(n, -1, -1, -1)] class DataloaderDummy(DataLoader): def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, shuffle: Optional[bool] = None, sampler: Union[Sampler, Iterable, None] = None, batch_sampler: Union[Sampler[Sequence], Iterable[Sequence], None] = None, num_workers: int = 0, collate_fn: Optional[_collate_fn_t] = None, pin_memory: bool = False, drop_last: bool = False, timeout: float = 0, worker_init_fn: Optional[_worker_init_fn_t] = None, multiprocessing_context=None, generator=None, *, prefetch_factor: int = 2, persistent_workers: bool = False, pin_memory_device: str = ""): super().__init__(dataset, batch_size, shuffle, sampler, batch_sampler, num_workers, collate_fn, pin_memory, drop_last, timeout, worker_init_fn, multiprocessing_context, generator, prefetch_factor=prefetch_factor, persistent_workers=persistent_workers, pin_memory_device=pin_memory_device) self.element = to(map_fn(map_fn(dataset.__getitem__(0), torch.tensor), unsqueezer), "cuda:0") def _get_iterator(self): return iter([self.element]) def __iter__(self): return super().__iter__() def __len__(self) -> int: return 1
class BTSWrapperOverfit(BTSWrapper):
4
2023-11-12 21:53:27+00:00
24k
newcastleuniversity/DISPEL
dispel/providers/generic/sensor.py
[ { "identifier": "Reading", "path": "dispel/data/core.py", "snippet": "class Reading(FlagMixIn):\n \"\"\"A data capture from an experiment.\n\n Attributes\n ----------\n evaluation\n The evaluation information for this reading\n session\n The session information for this reading\n measure_set\n A list of measures already processed on the device\n schema\n The schema of the reading\n date\n The time the reading was recorded\n device\n The device that captured the reading\n\n Parameters\n ----------\n evaluation\n The evaluation information for this reading\n session\n The session information for this reading\n levels\n An iterable of Level\n measure_set\n A list of measures already processed on the device\n schema\n The schema of the reading\n date\n The time the reading was recorded\n device\n The device that captured the reading\n \"\"\"\n\n def __init__(\n self,\n evaluation: Evaluation,\n session: Optional[Session] = None,\n levels: Optional[Iterable[Level]] = None,\n measure_set: Optional[MeasureSet] = None,\n schema: Optional[ReadingSchema] = None,\n date: Any = None,\n device: Optional[Device] = None,\n ):\n super().__init__()\n self.evaluation = evaluation\n self.session = session\n self.measure_set: MeasureSet = measure_set or MeasureSet()\n self.schema = schema\n self.date = pd.Timestamp(date) if date else None\n self.device = device\n self._attempt: Dict[str, int] = defaultdict(int)\n\n # verify time frame compatibility\n if (\n self.session\n and not self.session.is_incomplete\n and not self.session.contains(self.evaluation)\n ):\n raise ValueError(\"Evaluation start and end must be within session\")\n\n # create dictionary of levels\n self._levels: Dict[LevelId, Level] = {}\n\n # set level if arg is provided\n if levels:\n for level in levels:\n self.set(level)\n\n def get_level(self, level_id: Optional[LevelIdType] = None) -> Level:\n \"\"\"Get level for a given level_id.\n\n Parameters\n ----------\n level_id\n The id identifying the level.\n\n Returns\n -------\n Level\n The level identified by ``level_id``. If no level id is provided and the\n reading contains only one level it will be returned. Otherwise, the function\n will raise a :class:`ValueError`.\n\n Raises\n ------\n ValueError\n If the given id does not match any existing level within the reading.\n ValueError\n If no id has been provided, and there are multiple levels withing the\n reading.\n \"\"\"\n # check if an arg is provided\n if level_id:\n if isinstance(level_id, str):\n level_id = LevelId.from_str(level_id) # type: ignore\n # check that this is a correct id\n if level_id not in self._levels:\n raise ValueError(\n f\"{level_id=} does not match any Level in {self._levels.keys()}\"\n )\n return self._levels[level_id] # type: ignore\n\n # if no level_id provided, check if there is only one level\n if len(self._levels) == 1:\n return next(iter(self._levels.values()))\n\n # if not, ask user for a level_id\n raise ValueError(\n f\"There are {len(self._levels)} levels, please provide a level_id in\"\n f\" {self._levels.keys()}\"\n )\n\n def __repr__(self) -> str:\n return f'<Reading: {plural(\"level\", len(self))} ({self.flag_count_repr})>'\n\n def __iter__(self) -> Iterable[Tuple[LevelIdType, Level]]:\n yield from self._levels.items()\n\n def __len__(self) -> int:\n return len(self._levels)\n\n @property\n def empty(self) -> bool:\n \"\"\"Check whether the reading is empty.\"\"\"\n return len(self) == 0\n\n @property\n def levels(self) -> ValuesView[Level]:\n \"\"\"Get a list of all Level in the reading.\"\"\"\n return self._levels.values()\n\n @property\n def level_ids(self) -> List[LevelId]:\n \"\"\"Get the list of level_id keys.\"\"\"\n return [level.id for level in self._levels.values()]\n\n def has_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> bool:\n \"\"\"Check whether the reading contains the desired raw data set.\n\n Parameters\n ----------\n data_set_id\n The id of the raw data set that will be searched for.\n level_id\n The level id in which the raw data set is to searched for.\n\n Returns\n -------\n bool\n ``True`` if the raw data set exists inside the given level. ``False``\n otherwise.\n \"\"\"\n return self.get_level(level_id).has_raw_data_set(data_set_id)\n\n def get_raw_data_set(\n self,\n data_set_id: str,\n level_id: LevelIdType,\n ) -> RawDataSet:\n \"\"\"Get the raw data set for a given data set id and a level.\n\n Parameters\n ----------\n data_set_id\n The id of the raw data set that will be retrieved.\n level_id\n The level id from which the raw data set is to retrieved.\n\n Returns\n -------\n RawDataSet\n The raw data set with the matching id.\n \"\"\"\n return self.get_level(level_id).get_raw_data_set(data_set_id)\n\n def get_measure_set(self, level_id: Optional[LevelIdType] = None) -> MeasureSet:\n \"\"\"Get measure_set from level identified with level_id.\"\"\"\n if not level_id:\n return self.measure_set\n return self.get_level(level_id).measure_set\n\n def get_merged_measure_set(self) -> MeasureSet:\n \"\"\"Get a measure set containing all the reading's measure values.\"\"\"\n return sum(\n (self.measure_set, *(level.measure_set for level in self.levels)),\n MeasureSet(),\n )\n\n @singledispatchmethod\n def set(self, value, **kwargs):\n \"\"\"Set a value inside a reading.\"\"\"\n raise TypeError(f\"Unsupported set type: {type(value)}\")\n\n def _get_level(self, level: Optional[Union[LevelIdType, Level]] = None) -> Level:\n \"\"\"Get level from id or level itself.\"\"\"\n if isinstance(level, Level):\n return level\n return self.get_level(level)\n\n @set.register(MeasureSet)\n def _measure_set(\n self,\n value: MeasureSet,\n level: Optional[Union[LevelIdType, Level]] = None,\n ):\n if level is None:\n self.measure_set += value\n else:\n self._get_level(level).set(value)\n\n @set.register(MeasureValue)\n def _measure_value(\n self,\n value: MeasureValue,\n level: Optional[Union[LevelIdType, Level]] = None,\n epoch: Optional[LevelEpoch] = None,\n ):\n if epoch is not None:\n epoch.set(value)\n else:\n if level is None:\n measure_set = self.measure_set\n else:\n measure_set = self._get_level(level).measure_set\n\n measure_set.set(value)\n\n @set.register(RawDataSet)\n def _raw_data_set(\n self,\n value: RawDataSet,\n level: Union[LevelIdType, Level],\n concatenate: bool = False,\n overwrite: bool = False,\n ):\n self._get_level(level).set(value, concatenate=concatenate, overwrite=overwrite)\n\n @set.register(LevelEpoch)\n def _epoch_measure_set(self, value: LevelEpoch, level: Union[LevelIdType, Level]):\n self._get_level(level).set(value)\n\n @set.register(Level)\n def _level(self, value: Level):\n \"\"\"Set a level.\"\"\"\n level_id_str = str(value.id)\n for lev in self._levels:\n if str(lev).startswith(level_id_str) and level_id_str in self._attempt:\n self._attempt[level_id_str] += 1\n break\n if level_id_str not in self._attempt:\n new_level = LevelId.from_str(level_id_str)\n self._levels[new_level] = value # type: ignore\n self._attempt[str(new_level.id)] = 1\n else:\n new_level_id_str = \"-\".join(\n [level_id_str, str(self._attempt[level_id_str]).zfill(2)]\n )\n value.id = cast(LevelId, LevelId.from_str(new_level_id_str))\n self._levels[value.id] = value\n # TODO: use sorting by effective time frame to ensure orders to\n # attempts :\n # level_ids = sorted(level_ids, key=lambda x:\n # reading.get_level(x).effective_time_frame.start )\n self._levels[value.id].context.set(\n value=self._attempt[level_id_str],\n definition=ValueDefinition(\n id_=\"attempt\", name=f\"The attempt number: {self._attempt[level_id_str]}\"\n ),\n )\n\n @set.register(Flag)\n def _set_flag(self, value: Flag):\n self.add_flag(value)" }, { "identifier": "Level", "path": "dispel/data/levels.py", "snippet": "class Level(Epoch):\n \"\"\"An entity to separate sub-task inside each test (Levels).\n\n FIXME: DOC\n\n Attributes\n ----------\n context\n Contextual information about the level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n\n Parameters\n ----------\n id_\n The identifier of a given Level.\n start\n The timestamp of the beginning of the level\n end\n The timestamp of the end of the level\n context\n Contextual information about the level\n raw_data_sets\n An iterable of :class:'~dispel.data.raw.RawDataSet' of a given Level\n measure_set\n A :class:'~dispel.data.measures.MeasureSet' of a given Level\n epochs\n An iterable of :class:`~dispel.data.measures.EpochMeasureSet` to be added to the\n level.\n \"\"\"\n\n def __init__(\n self,\n id_: Union[str, List[str], LevelId],\n start: Any,\n end: Any,\n context: Optional[Context] = None,\n raw_data_sets: Optional[Iterable[RawDataSet]] = None,\n measure_set: Optional[MeasureSet] = None,\n epochs: Optional[Iterable[LevelEpoch]] = None,\n ):\n if not isinstance(id_, LevelId):\n id_ = LevelId(id_)\n\n definition = EpochDefinition(id_=id_)\n super().__init__(start=start, end=end, definition=definition)\n\n self.context = context or Context()\n self.measure_set = measure_set or MeasureSet()\n\n # create dictionary of raw data sets\n self._raw_data_sets: Dict[str, RawDataSet] = {}\n\n # set raw data sets if arg is provided\n if raw_data_sets:\n for raw_data_set in raw_data_sets:\n self.set(raw_data_set)\n\n # create data frame for each epoch\n self._epochs = pd.DataFrame(columns=[\"definition_id\", \"start\", \"end\", \"epoch\"])\n if epochs:\n for epoch in epochs:\n self.set(epoch)\n\n @property\n def id(self) -> LevelId:\n \"\"\"Get the ID of the level from its definition.\n\n Returns\n -------\n LevelId\n The ID of the definition provided via `definition`.\n \"\"\"\n assert self.definition is not None, \"Require definition to access id\"\n return cast(LevelId, self.definition.id)\n\n @id.setter\n def id(self, value: Union[str, DefinitionId]):\n \"\"\"Set the ID of the level's definition.\n\n Parameters\n ----------\n value\n The ID to be set.\n \"\"\"\n assert self.definition is not None, \"Require definition to set id\"\n self.definition.id = value # type: ignore\n\n def __hash__(self):\n return hash(self.id)\n\n def __repr__(self):\n return f\"<Level: {self.id} ({self.flag_count_repr})>\"\n\n @property\n def raw_data_sets(self) -> List[RawDataSet]:\n \"\"\"Get all raw data sets.\"\"\"\n return list(self._raw_data_sets.values())\n\n def has_raw_data_set(self, id_: str) -> bool:\n \"\"\"Return ``True`` if the level contains the desired raw data set.\"\"\"\n return id_ in self._raw_data_sets\n\n def get_raw_data_set(self, id_: str) -> RawDataSet:\n \"\"\"Get the raw data set for a given data set id.\n\n Parameters\n ----------\n id_\n The id of the raw data set to be returned\n\n Returns\n -------\n RawDataSet\n The raw data set with the matching id\n\n Raises\n ------\n ValueError\n If the given id does not correspond to any existing raw data set within the\n level.\n \"\"\"\n if id_ not in self._raw_data_sets:\n raise ValueError(\n f'Unknown data set with id: \"{id_}\" for level_id == \"{self.id}\" '\n f\"please provide an id within {list(self._raw_data_sets.keys())}\"\n )\n\n return self._raw_data_sets[id_]\n\n @property\n def epochs(self) -> List[LevelEpoch]:\n \"\"\"Get all epoch measure sets.\"\"\"\n return self._epochs[\"epoch\"].tolist()\n\n @singledispatchmethod\n def set(self, value, **kwargs):\n \"\"\"Set a value inside a level.\"\"\"\n raise TypeError(f\"Unsupported set type: {type(value)}\")\n\n @set.register(MeasureSet)\n def _set_measure_set(self, value: MeasureSet):\n self.measure_set += value\n\n @set.register(MeasureValue)\n def _set_measure_value(self, value: MeasureValue):\n self.measure_set.set(value)\n\n @set.register(RawDataSet)\n def _set_raw_data_set(\n self, value: RawDataSet, concatenate: bool = False, overwrite: bool = False\n ):\n if overwrite and concatenate:\n raise ValueError(\n \"You cannot both concatenate and overwrite an existing raw data set. \"\n \"Only one of these arguments must be set to ``True``.\"\n )\n\n if (id_ := value.id) in self._raw_data_sets: # pylint: disable=all\n if concatenate:\n value = value.concat(self.get_raw_data_set(id_))\n elif not overwrite:\n raise RawDataSetAlreadyExists(\n id_, self.id, \"Use overwrite=True to overwrite\"\n )\n\n self._raw_data_sets[id_] = value\n\n @set.register(LevelEpoch)\n def _set_epoch(self, value: LevelEpoch):\n new_index = len(self._epochs)\n self._epochs.loc[new_index] = pd.Series(\n dict(\n definition_id=value.id if value.definition else None,\n start=value.start,\n end=value.end,\n epoch=value,\n )\n )\n\n @set.register(Flag)\n def _set_flag(self, value: Flag):\n self.add_flag(value)" }, { "identifier": "MeasureValueDefinitionPrototype", "path": "dispel/data/measures.py", "snippet": "class MeasureValueDefinitionPrototype(ValueDefinitionPrototype):\n \"\"\"A task measure value definition prototype.\n\n This is a convenience method that populates the ``cls`` argument with the\n :class:`~dispel.data.measures.MeasureValueDefinition` class.\n \"\"\"\n\n def __init__(self, **kwargs: Any):\n cls = kwargs.pop(\"cls\", MeasureValueDefinition)\n super().__init__(cls=cls, **kwargs)" }, { "identifier": "ACCELEROMETER_COLUMNS", "path": "dispel/data/raw.py", "snippet": "ACCELEROMETER_COLUMNS = [f\"userAcceleration{x}\" for x in \"XYZ\"]" }, { "identifier": "DEFAULT_COLUMNS", "path": "dispel/data/raw.py", "snippet": "DEFAULT_COLUMNS = list(\"xyz\")" }, { "identifier": "GRAVITY_COLUMNS", "path": "dispel/data/raw.py", "snippet": "GRAVITY_COLUMNS = [f\"gravity{x}\" for x in \"XYZ\"]" }, { "identifier": "RawDataValueDefinition", "path": "dispel/data/raw.py", "snippet": "class RawDataValueDefinition(ValueDefinition):\n \"\"\"The definition of raw data set values.\n\n Attributes\n ----------\n is_index\n ``True`` if the values are part of the raw data set index. Otherwise, ``False``.\n \"\"\"\n\n def __init__(\n self,\n id_: str,\n name: str,\n unit: Optional[str] = None,\n description: Optional[str] = None,\n data_type: Optional[str] = None,\n precision: Optional[int] = None,\n is_index: bool = False,\n ):\n super().__init__(\n id_=id_,\n name=name,\n unit=unit,\n description=description,\n data_type=data_type,\n precision=precision,\n )\n self.is_index = is_index" }, { "identifier": "AbbreviatedValue", "path": "dispel/data/values.py", "snippet": "class AbbreviatedValue:\n \"\"\"An abbreviated value.\n\n Examples\n --------\n This class allows to consistently handle abbreviated terms. Assuming you have a name\n of an assessment, e.g. `Cognitive Processing Speed` test and the respective\n abbreviation would be `CPS`, then you can create an abbreviated value like this:\n\n >>> from dispel.data.values import AbbreviatedValue as AV\n >>> value = AV('Cognitive Processing Speed test', 'CPS')\n >>> value\n Cognitive Processing Speed test (CPS)\n\n While this seems like a lot of overhead, it comes in handy when describing value\n definitions or higher-level abstractions, such as measure definitions.\n\n Parameters\n ----------\n value\n The full description of the value\n abbr\n The abbreviated form of the value\n\n Attributes\n ----------\n value\n The full description of the value\n \"\"\"\n\n def __init__(self, value: str, abbr: Optional[str] = None):\n self.value = value\n self._abbr = abbr\n\n @property\n def abbr(self):\n \"\"\"Get the abbreviated form of the value.\"\"\"\n return self._abbr or self.value\n\n def __str__(self):\n return self.value\n\n def __repr__(self):\n if self._abbr:\n return f\"{self.value} ({self._abbr})\"\n return self.value\n\n def __hash__(self):\n return hash((self.value, self._abbr))\n\n def __eq__(self, other):\n if isinstance(other, str):\n return self._abbr is None and self.value == other\n if isinstance(other, AbbreviatedValue):\n return self.value == other.value and self.abbr == other.abbr\n return False\n\n def __lt__(self, other):\n if not isinstance(other, AbbreviatedValue):\n raise ValueError(f\"Unsupported type in comparison: {type(other)}\")\n if self.value == other.value:\n return self.abbr < other.abbr\n return self.value < other.value\n\n def format(self, *args, **kwargs):\n \"\"\"Format an abbreviated value.\"\"\"\n return AbbreviatedValue(\n self.value.format(*args, **kwargs),\n self._abbr.format(*args, **kwargs) if self._abbr else None,\n )\n\n @classmethod\n def wrap(cls, value):\n \"\"\"Wrap a value into an abbreviated value.\n\n This is a small helper class to conveniently wrap values into an abbreviated\n value, if they are not already one.\n\n Parameters\n ----------\n value\n The value to be wrapped\n\n Returns\n -------\n AbbreviatedValue\n The passed ``value`` if it is an instance of :class:`AbbreviatedValue`. If a\n string is passed, then the string is passed as ``value`` argument to the\n constructor.\n\n Raises\n ------\n ValueError\n If the passed value is neither a string nor an instance of\n :class:`AbbreviatedValue`.\n \"\"\"\n if isinstance(value, cls):\n return value\n if isinstance(value, str):\n return cls(value)\n\n raise ValueError(f\"Can only wrap string values. Got: {type(value)}\")" }, { "identifier": "NotEmptyDataSetAssertionMixin", "path": "dispel/processing/assertions.py", "snippet": "class NotEmptyDataSetAssertionMixin(DataSetProcessingStepProtocol, metaclass=ABCMeta):\n \"\"\"A mixin to ensure that processed data sets are not empty.\"\"\"\n\n #: The assertion message\n assertion_message = \"Empty dataset {data_set_id} for level {level}\"\n\n #: The handling if a data set is empty\n empty_data_set_handling = ErrorHandling.RAISE\n\n def assert_valid_data_sets(\n self,\n data_sets: Sequence[pd.DataFrame],\n level: Level,\n reading: Reading,\n **kwargs,\n ):\n \"\"\"Assert that data sets are not empty.\"\"\"\n super().assert_valid_data_sets(data_sets, level, reading, **kwargs)\n for data, data_set_id in zip(data_sets, self.get_data_set_ids()):\n assert not data.empty, (\n self.assertion_message.format(data_set_id=data_set_id, level=level),\n self.empty_data_set_handling,\n )" }, { "identifier": "transformation", "path": "dispel/processing/data_set.py", "snippet": "def transformation(_func=None, **kwargs):\n \"\"\"Decorate a function as a transformation function.\"\"\"\n\n def wrapper(func):\n func.__transform_function__ = True\n func.__transform_kwargs__ = kwargs\n return func\n\n if _func is None:\n return wrapper\n\n return wrapper(_func)" }, { "identifier": "ExtractMultipleStep", "path": "dispel/processing/extract.py", "snippet": "class ExtractMultipleStep(ExtractStep):\n r\"\"\"A measure extraction processing step for multiple measures.\n\n This processing step allows to produce multiple\n :class:`~dispel.data.measures.MeasureValue`\\ s by providing a list of functions and a\n :class:`~dispel.data.values.ValueDefinitionPrototype` to create the\n :class:`~dispel.data.values.ValueDefinition`\\ s from.\n\n Parameters\n ----------\n data_set_ids\n An optional list of data set ids to be used for the transformation. See\n :class:`~dispel.processing.data_set.DataSetProcessingStepMixin`.\n transform_functions\n An optional list of dictionaries containing at least the processing function\n under the key ``func``, which consumes the specified data sets though\n ``data_set_ids`` as positional arguments and returns a measure value passed to\n :class:`~dispel.data.measures.MeasureValue`. Additional keywords will be passed to\n :meth:`~dispel.data.values.ValueDefinitionPrototype.create_definition`. If no\n functions are provided, the :data:`transform_functions` class variable will be\n used.\n definition\n A :class:`~dispel.data.values.ValueDefinitionPrototype` that is used to create the\n :class:`~dispel.data.measures.MeasureValueDefinition`\\ s for the transformation\n functions provided in ``transform_functions``.\n level_filter\n An optional filter to limit the levels being processed. See\n :class:`~dispel.processing.level.LevelProcessingStep`.\n yield_if_nan\n If ``True``, yield null values as measure values. Otherwise, processing will not\n return a measure value in case of a null result for the extraction.\n\n Examples\n --------\n To ease the generation of multiple similar measures the :class:`ExtractMultipleStep`\n provides a convenient way to do so. Assume you want to create both the mean and\n median of a data set this can be achieved as follows:\n\n >>> import numpy as np\n >>> from dispel.data.values import ValueDefinitionPrototype\n >>> from dispel.processing.extract import ExtractMultipleStep\n >>> step = ExtractMultipleStep(\n ... 'data-set-id',\n ... [\n ... {'func': np.mean, 'method': 'average'},\n ... {'func': np.median, 'method': 'median'}\n ... ],\n ... ValueDefinitionPrototype(\n ... id_='measure-{method}',\n ... name='{method} measure',\n ... unit='s'\n ... )\n ... )\n\n This extraction step will result in two measure values, one for the mean and one\n with the median.\n \"\"\"\n\n transform_functions: Iterable[Dict[str, Any]]\n\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n transform_functions: Optional[Iterable[Dict[str, Any]]] = None,\n definition: Optional[ValueDefinitionPrototype] = None,\n level_filter: Optional[LevelFilterType] = None,\n yield_if_nan: Optional[bool] = None,\n ):\n super().__init__(\n definition=definition,\n data_set_ids=data_set_ids,\n level_filter=level_filter,\n yield_if_nan=yield_if_nan,\n )\n\n if transform_functions:\n self.transform_functions = transform_functions\n\n def get_transform_functions(self) -> TransformationFunctionGeneratorType:\n \"\"\"Get the transform functions applied to the data sets.\"\"\"\n yield from super().get_transform_functions()\n\n for function_spec in self.transform_functions:\n spec = function_spec.copy()\n yield spec.pop(\"func\"), spec" }, { "identifier": "ExtractStep", "path": "dispel/processing/extract.py", "snippet": "class ExtractStep(\n MeasureDefinitionMixin, TransformStepChainMixIn, MutateDataSetProcessingStepBase\n):\n r\"\"\"A measure extraction processing step.\n\n This class provides a convenient way to extract a measure from one or more data sets\n by specifying their id, their level_ids or level filter, a transformation function\n and a measure value definition.\n\n Parameters\n ----------\n data_set_ids\n An optional list of data set ids to be used for the transformation. See\n :class:`~dispel.processing.data_set.DataSetProcessingStepMixin`.\n transform_function\n An optional function to be applied to the data sets. See\n :class:`~dispel.processing.data_set.MutateDataSetProcessingStepBase`.\n definition\n An optional value definition or prototype. See\n :class:`MeasureDefinitionMixin`.\n level_filter\n An optional filter to limit the levels being processed. See\n :class:`~dispel.processing.level.LevelProcessingStep`.\n yield_if_nan\n If ``True``, yield null values as measure values. Otherwise, processing\n will not return a measure value in case of a null result for the extraction.\n\n Examples\n --------\n Assuming we wanted to compute the maximum value of a raw data set we can create the\n following step\n\n >>> from dispel.data.values import ValueDefinition\n >>> from dispel.processing.extract import ExtractStep\n >>> step = ExtractStep(\n ... 'data-set-id',\n ... lambda data: data.max(axis=0),\n ... ValueDefinition('maximum','Maximum value')\n ... )\n\n A common approach is to define a processing step for re-use and leveraging the\n ``@transformation`` decorator to specify the transformation function:\n\n >>> import pandas as pd\n >>> from dispel.data.values import ValueDefinition\n >>> from dispel.processing.extract import ExtractStep\n >>> from dispel.processing.data_set import transformation\n >>> class MyExtractStep(ExtractStep):\n ... data_set_ids = 'data-set-id'\n ... definition = ValueDefinition('maximum','Maximum value')\n ...\n ... @transformation\n ... def _max(self, data: pd.DataFrame) -> float:\n ... return data.max(axis=0)\n\n Often one wants to extract multiple measures from one data set. This can be achieved\n by using prototypes and optional named arguments with ``@transformation``:\n\n >>> import pandas as pd\n >>> from dispel.data.values import ValueDefinitionPrototype\n >>> from dispel.processing.extract import ExtractStep\n >>> from dispel.processing.data_set import transformation\n >>> class MyExtractStep(ExtractStep):\n ... data_set_ids = 'data-set-id'\n ... definition = ValueDefinitionPrototype(\n ... id_='id-{agg_abbr}',\n ... name='{agg} value'\n ... )\n ...\n ... @transformation(agg='Maximum', agg_abbr='max')\n ... def _max(self, data: pd.DataFrame) -> float:\n ... return data.max(axis=0)\n ...\n ... @transformation(agg='Minimum', agg_abbr='min')\n ... def _min(self, data: pd.DataFrame) -> float:\n ... return data.min(axis=0)\n\n \"\"\"\n\n yield_if_nan: bool = False\n\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n transform_function: Optional[Callable[..., Any]] = None,\n definition: Optional[Union[ValueDefinition, ValueDefinitionPrototype]] = None,\n level_filter: Optional[LevelFilterType] = None,\n yield_if_nan: Optional[bool] = None,\n ):\n super().__init__(\n definition=definition,\n data_set_ids=data_set_ids,\n transform_function=transform_function,\n level_filter=level_filter,\n )\n self.yield_if_nan = yield_if_nan or self.yield_if_nan\n\n def wrap_result(\n self, res: Any, level: Level, reading: Reading, **kwargs: Any\n ) -> WrapResultGeneratorType:\n \"\"\"Wrap the result from the processing function into a class.\n\n Parameters\n ----------\n res\n Any result returned by the extraction step. If res is a\n :class:`~dispel.data.flags.WrappedResult`, the flag contained\n in the object will be automatically added to the\n :class:`~dispel.data.measures.MeasureValue`, hence the flagged wrapped\n results will always translate into flagged\n :class:`~dispel.data.measures.MeasureValue`.\n level\n The current level\n reading\n The current reading\n kwargs\n Additional kwargs\n\n Yields\n ------\n LevelProcessingResult\n The processing result\n \"\"\"\n try:\n if len(res) == 0:\n res = math.nan\n warnings.warn(\"Extract step returned an iterable!\", UserWarning)\n except TypeError:\n pass\n if is_wrapped := isinstance(res, WrappedResult):\n measure_value = res.measure_value\n else:\n measure_value = res\n\n if not (is_nan := math.isnan(measure_value)) or (is_nan and self.yield_if_nan):\n value = self.get_value(measure_value, **kwargs)\n # If result is wrapped, add the flag to the measure value\n if is_wrapped:\n value.add_flags(res, ignore_duplicates=True)\n\n yield LevelProcessingResult(\n step=self,\n sources=self.get_raw_data_sets(level),\n result=value,\n level=level,\n )" }, { "identifier": "LevelFilterType", "path": "dispel/processing/level.py", "snippet": "class LevelProcessingResultBase:\nclass LevelProcessingResult(ProcessingResult, LevelProcessingResultBase):\nclass LevelProcessingControlResult(ProcessingControlResult, LevelProcessingResultBase):\nclass LevelFilter(ABC):\nclass LevelIdFilter(LevelFilter):\nclass DefaultLevelFilter(LevelFilter):\nclass LevelProcessingStepProtocol(metaclass=ABCMeta):\nclass LevelFilterProcessingStepMixin:\nclass LevelProcessingStep(\n LevelProcessingStepProtocol, LevelFilterProcessingStepMixin, ProcessingStep\n):\nclass FlagLevelStep(FlagStepMixin, LevelProcessingStep):\nclass ProcessingStepGroup(LevelFilterProcessingStepMixin, CoreProcessingStepGroup):\n def __post_init__(self):\n def from_assertion_error(\n cls,\n step: \"ProcessingStep\",\n error: AssertionError,\n level: Optional[Level] = None,\n ):\n def from_flag(\n cls,\n flag: Flag,\n step: \"ProcessingStep\",\n targets: Iterable[EntityType],\n level: Optional[Level] = None,\n ):\ndef _intersection(a, b):\ndef _union(a, b):\n def __call__(self, levels: Iterable[Level]) -> Set[Level]:\n def __repr__(self) -> str:\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def _combined(\n self, other: \"LevelFilter\", func: Callable[[Set, Set], Set]\n ) -> \"LevelFilter\":\n def _match(levels: Iterable[Level]) -> Set[Level]:\n def _repr() -> str:\n def __and__(self, other: \"LevelFilter\") -> \"LevelFilter\":\n def __or__(self, other: \"LevelFilter\") -> \"LevelFilter\":\n def __invert__(self) -> \"LevelFilter\":\n def _inverted_filter(levels: Iterable[Level]) -> Set[Level]:\n def _repr() -> str:\n def __init__(self, level_ids: MultipleLevelIdsType):\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def repr(self) -> str:\n def filter(self, levels: Iterable[Level]) -> Set[Level]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def get_level_flag_targets(\n self, level: Level, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def __init__(self, *args, **kwargs):\n def get_level_filter(self) -> LevelFilter:\n def set_level_filter(self, level_filter: LevelFilterType):\n def inject_level_filter_from_step(self, step: \"LevelFilterProcessingStepMixin\"):\n def _get_level_filter(inner_self) -> LevelFilter:\n def process_reading(self, reading: Reading, **kwargs) -> ProcessResultType:\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def assert_valid_level(self, level: Level, reading: Reading, **kwargs):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def __init__(\n self,\n level_filter: Optional[LevelFilterType] = None,\n task_name: Optional[Union[AV, str]] = None,\n flag_name: Optional[Union[AV, str]] = None,\n flag_type: Optional[Union[FlagType, str]] = None,\n flag_severity: Optional[Union[FlagSeverity, str]] = None,\n reason: Optional[Union[AV, str]] = None,\n stop_processing: bool = False,\n flagging_function: Optional[Callable[..., bool]] = None,\n ):\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n def get_level_flag_targets(\n self, level: Level, reading: Reading, **kwargs\n ) -> Iterable[EntityType]:\n def get_flag_targets(\n self, reading: Reading, level: Optional[Level] = None, **kwargs\n ) -> Iterable[EntityType]:\n def flag_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> Generator[Flag, None, None]:\n def set_steps(self, steps: List[ProcessingStep]):\n def inject_level_filter_from_step(self, step: LevelFilterProcessingStepMixin):" }, { "identifier": "SensorModality", "path": "dispel/processing/modalities.py", "snippet": "class SensorModality(AVEnum):\n # FIXME remove class\n \"\"\"Sensor types enumerator.\"\"\"\n\n def unit(self, order: int = 1) -> str:\n \"\"\"Get the unit of the sensor signal.\n\n Parameters\n ----------\n order\n The unit order.\n\n Returns\n -------\n str\n The unit of the sensor.\n \"\"\"\n basis = {\"acc\": \"G\", \"gyr\": \"rad/s\", \"itrem\": \"pixel\"}[self.abbr]\n if order == 1:\n return basis\n return \"/\".join([x + f\"^{order}\" for x in basis.split(\"/\")])\n\n ACCELEROMETER = (\"accelerometer\", \"acc\")\n GYROSCOPE = (\"gyroscope\", \"gyr\")\n INTENTIONAL = (\"intentional tremors\", \"itrem\")" }, { "identifier": "Apply", "path": "dispel/processing/transform.py", "snippet": "class Apply(TransformStep):\n r\"\"\"Apply a method onto columns of a raw data set.\n\n Parameters\n ----------\n data_set_id\n The data set id of the data set on which the method is to be applied\n method\n The method in question. This can be any method that accepts a pandas series and\n returns an array of same length. See also :meth:`pandas.DataFrame.apply`.\n method_kwargs\n Optional arguments required for the methods.\n columns\n The columns to be considered during the method application.\n drop_nan\n ```True`` if NaN values are to be droped after transformation.\n level_filter\n An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels\n to be transformed. If no filter is provided, all levels will be transformed. The\n ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\\ s\n and lists of either and passes them to a\n :class:`~dispel.processing.level.LevelIdFilter` for convenience.\n new_data_set_id\n The ``id`` used for the :class:`~dispel.data.raw.RawDataSetDefinition`.\n\n Examples\n --------\n Assuming you want to low-pass filter your gyroscope data of a ``reading`` you can\n create the following step to do so (note that the filtering expects a\n time-index-based and constant frequency-based data frame, so you might have to\n leverage :class:`~dispel.providers.generic.sensor.SetTimestampIndex` and\n :class:`~dispel.providers.generic.sensor.Resample` first):\n\n >>> from dispel.processing.transform import Apply\n >>> from dispel.signal.filter import butterworth_low_pass_filter\n >>> step = Apply(\n ... 'gyroscope_ts_resampled',\n ... butterworth_low_pass_filter,\n ... dict(cutoff=1.5, order=2),\n ... list('xyz'),\n ... )\n\n This step will apply a 2. order butterworth low pass filter to the columns ``x``,\n ``y``, and ``z`` with a cut-off frequency of 1.5Hz.\n \"\"\"\n\n def __init__(\n self,\n data_set_id: str,\n method: Callable[..., Any],\n method_kwargs: Optional[Dict[str, Any]] = None,\n columns: Optional[List[str]] = None,\n new_data_set_id: Optional[str] = None,\n drop_nan: Optional[bool] = False,\n level_filter: Optional[LevelFilterType] = None,\n ):\n method_kwargs = method_kwargs or {}\n columns = columns or DEFAULT_COLUMNS\n\n def _transform_function(data: pd.DataFrame) -> pd.DataFrame:\n res = data[columns].apply(method, **method_kwargs)\n if drop_nan:\n return res.dropna()\n return res\n\n def _definition_factory(column: str) -> RawDataValueDefinition:\n return RawDataValueDefinition(\n column, f\"{method.__name__} applied on {column}\"\n )\n\n super().__init__(\n data_set_id,\n _transform_function,\n new_data_set_id or f\"{data_set_id}_{method.__name__}\",\n [_definition_factory(column) for column in columns],\n level_filter=level_filter,\n )" }, { "identifier": "TransformStep", "path": "dispel/processing/transform.py", "snippet": "class TransformStep(TransformStepChainMixIn, MutateDataSetProcessingStepBase):\n r\"\"\"A raw data set transformation processing step.\n\n This class provides a convenient way to transform one or more data sets by\n specifying their ids, their level_ids or a level filter, a transformation function\n and specifications of a new data set to be returned as result of the processing\n step.\n\n Parameters\n ----------\n data_set_ids\n An optional list of data set ids to be used for the transformation. See\n :class:`~dispel.processing.data_set.DataSetProcessingStepMixin`.\n transform_function\n An optional function to be applied to the data sets. See\n :class:`~dispel.processing.data_set.MutateDataSetProcessingStepBase`. The transform\n function is expected to produce one or more columns of a data set according to\n the specification in `definitions`. The function can return NumPy unidimensional\n arrays, Pandas series and data frames.\n new_data_set_id\n An optional id used for the\n :class:`~dispel.data.raw.RawDataSetDefinition`. If no id was provided, the\n :data:`new_data_set_id` class variable will be used. Alternatively, one can\n overwrite :meth:`get_new_data_set_id` to provide the new data set id.\n definitions\n An optional list of :class:`~dispel.data.raw.RawDataValueDefinition` that has to\n match the number of columns returned by the :attr:`transform_function`. If no\n definitions were provided, the :data:`definitions` class variable will be used.\n Alternatively, one can overwrite :meth:`get_definitions` to provide the list of\n definitions.\n level_filter\n An optional filter to limit the levels being processed. See\n :class:`~dispel.processing.level.LevelProcessingStep`.\n storage_error\n This argument is only useful when the given new data id already exists.\n In which case, the following options are available:\n\n - ``'ignore'``: the computation of the transformation step for the concerned\n level will be ignored.\n - ``'overwrite'``: the existing data set id will be overwritten by the\n result of transform step computation.\n - ``'concatenate'``: the existing data set id will be concatenated with the\n result of transform step computation.\n - ``'raise'``: An error will be raised if we want to overwrite on an\n existing data set id.\n\n Examples\n --------\n Assuming you want to calculate the euclidean norm of a data set ``'acceleration'``\n for a specific level ``'left-small'`` and then name the new data set\n ``'accelerometer-norm'``, you can create the following step:\n\n >>> from dispel.data.raw import RawDataValueDefinition\n >>> from dispel.processing.transform import TransformStep\n >>> from dispel.signal.core import euclidean_norm\n >>> step = TransformStep(\n ... 'accelerometer',\n ... euclidean_norm,\n ... 'accelerometer-norm',\n ... [RawDataValueDefinition('norm', 'Accelerometer norm', 'm/s^2')]\n ... )\n\n The transformation function will be called with the specified data sets as\n arguments. If the function has named parameters matching ``level`` or ``reading``,\n the respective level and reading will be passed to the transformation function.\n\n Another common scenario is to define a class that can be reused.\n\n >>> from dispel.data.raw import RawDataValueDefinition\n >>> from dispel.processing.transform import TransformStep\n >>> class MyTransformStep(TransformStep):\n ... data_set_ids = 'accelerometer'\n ... transform_function = euclidean_norm\n ... new_data_set_id = 'accelerometer-norm'\n ... definitions = [\n ... RawDataValueDefinition('norm', 'Accelerometer norm', 'm/s^2')\n ... ]\n\n Another convenient way to provide the transformation function is to use the\n ``@transformation`` decorator:\n\n >>> import pandas as pd\n >>> import numpy as np\n >>> from dispel.data.raw import RawDataValueDefinition\n >>> from dispel.processing.data_set import transformation\n >>> from dispel.processing.transform import TransformStep\n >>> class MyTransformStep(TransformStep):\n ... data_set_ids = 'accelerometer'\n ... new_data_set_id = 'accelerometer-norm'\n ... definitions = [\n ... RawDataValueDefinition('norm', 'Accelerometer norm', 'm/s^2')\n ... ]\n ...\n ... @transformation\n ... def _euclidean_norm(self, data: pd.DataFrame) -> pd.Series:\n ... return data.pow(2).sum(axis=1).apply(np.sqrt)\n\n Note that the decorated functions can also use ``level`` and ``reading`` as\n parameters to gain access to the respective level and reading being processed.\n \"\"\"\n\n new_data_set_id: str\n\n definitions: List[RawDataValueDefinition]\n\n storage_error: StorageError = StorageError.RAISE\n\n def __init__(\n self,\n data_set_ids: Optional[Union[str, Iterable[str]]] = None,\n transform_function: Optional[Callable[..., Any]] = None,\n new_data_set_id: Optional[str] = None,\n definitions: Optional[List[RawDataValueDefinition]] = None,\n level_filter: Optional[LevelFilterType] = None,\n storage_error: Optional[\n Union[StorageError, Literal[\"raise\", \"ignore\", \"overwrite\", \"concatenate\"]]\n ] = None,\n ):\n super().__init__(\n data_set_ids=data_set_ids,\n transform_function=transform_function,\n level_filter=level_filter,\n )\n\n if new_data_set_id:\n self.new_data_set_id = new_data_set_id\n if definitions:\n self.definitions = definitions\n if storage_error:\n self.storage_error = StorageError(storage_error)\n\n def get_new_data_set_id(self) -> str:\n \"\"\"Get the id of the new data set to be created.\"\"\"\n return self.new_data_set_id\n\n def get_definitions(self) -> List[RawDataValueDefinition]:\n \"\"\"Get the definitions of the raw data set values.\"\"\"\n return self.definitions\n\n def get_raw_data_set_definition(self):\n \"\"\"Get the raw data set definition.\"\"\"\n return RawDataSetDefinition(\n id=self.get_new_data_set_id(),\n source=RawDataSetSource(self.__class__.__name__),\n value_definitions_list=self.get_definitions(),\n is_computed=True,\n )\n\n def wrap_result(\n self, res: Any, level: Level, reading: Reading, **kwargs: Any\n ) -> WrapResultGeneratorType:\n \"\"\"Wrap the result from the processing function into a class.\"\"\"\n # handle series should they be provided\n if isinstance(res, (pd.Series, np.ndarray)):\n # Wrap into series if it is numpy array\n if isinstance(res, np.ndarray):\n assert res.ndim == 1, \"Cannot handle multidimensional arrays\"\n res = pd.Series(res)\n\n def_ids = [\n d.id for d in filter(lambda d: ~d.is_index, self.get_definitions())\n ]\n if len(def_ids) != 1:\n raise ValueError(\n \"Processing returned a series but did not get single \"\n \"RawDataValueDefinition\"\n )\n res = res.to_frame(def_ids[0])\n\n raw_data_set = RawDataSet(self.get_raw_data_set_definition(), res)\n\n yield RawDataSetProcessingResult(\n step=self,\n sources=self.get_raw_data_sets(level),\n result=raw_data_set,\n level=level,\n concatenate=self.storage_error.concatenate,\n overwrite=self.storage_error.overwrite,\n )\n\n def process_level(\n self, level: Level, reading: Reading, **kwargs\n ) -> ProcessResultType:\n \"\"\"Process the provided Level.\"\"\"\n raw_data_set_exists = level.has_raw_data_set(self.get_new_data_set_id())\n\n if raw_data_set_exists and self.storage_error == StorageError.RAISE:\n raise RawDataSetAlreadyExists(\n self.get_new_data_set_id(),\n level.id,\n 'Please select for `storage_error` either \"ignore\" to ignore the '\n 'transformation if the data set already exists, \"overwrite\" to '\n \"overwrite the existing data set with the newly computed one, \"\n '\"concatenate\" to try and concatenate the two raw data sets or simply '\n \"change the name of the new data set to a valid one.\",\n )\n if raw_data_set_exists and self.storage_error == StorageError.IGNORE:\n pass\n else:\n yield from super().process_level(level, reading, **kwargs)" }, { "identifier": "BDHReading", "path": "dispel/providers/bdh/data.py", "snippet": "class BDHReading(Reading):\n \"\"\"BDH reading.\"\"\"" }, { "identifier": "GRAVITY_CONSTANT", "path": "dispel/signal/accelerometer.py", "snippet": "GRAVITY_CONSTANT = 9.80665" }, { "identifier": "apply_rotation_matrices", "path": "dispel/signal/accelerometer.py", "snippet": "def apply_rotation_matrices(\n rotation_matrices: pd.Series, sensor: pd.DataFrame\n) -> pd.DataFrame:\n \"\"\"Apply rotation matrices on a sensor time series.\n\n Parameters\n ----------\n rotation_matrices\n The rotation matrices obtained with :func:`compute_rotation_matrices`\n sensor\n The sensor time series to be rotated\n\n Returns\n -------\n pandas.DataFrame\n The rotated sensor values based on ``rotation_matrices``.\n\n \"\"\"\n # TODO: fix for BDH format\n common_timestamps = sensor.index.intersection(rotation_matrices.index)\n if len(common_timestamps) < 0.7 * len(sensor.index):\n warnings.warn(\n \"More than 30% of the sensor signal has been ignored.\", UserWarning\n )\n\n return pd.DataFrame(\n (\n ri @ vi\n for ri, vi in zip(\n rotation_matrices.loc[common_timestamps],\n sensor.loc[common_timestamps].values,\n )\n ),\n index=common_timestamps,\n columns=sensor.columns,\n )" }, { "identifier": "compute_rotation_matrices_quaternion", "path": "dispel/signal/accelerometer.py", "snippet": "def compute_rotation_matrices_quaternion(\n gravity: pd.DataFrame, target_gravity: Tuple[float, float, float]\n) -> pd.Series:\n \"\"\"Compute rotation matrices from gravity time series (quaternion-based).\n\n Parameters\n ----------\n gravity\n The gravity time series obtained from the accelerometer sensor.\n target_gravity\n The unit vector onto which to rotate to\n\n Returns\n -------\n pandas.Series\n A series of rotation matrix objects for the provided ``gravity`` entries.\n\n \"\"\"\n # convert target into an array of same shape as gravity\n frame = np.tile(target_gravity, (gravity.shape[0], 1))\n\n # get quaternion from directional vectors\n quaternion = compute_quaternion_between_vectors(gravity.values, frame)\n\n # move scalar part to the end to prepare for scipy rotation format\n quaternion = np.roll(quaternion, shift=-1, axis=1)\n\n # convert quaternion to matrices\n matrices_list = Rotation.from_quat(quaternion).as_matrix()\n\n return pd.Series([m for m in matrices_list], index=gravity.index)" }, { "identifier": "remove_gravity_component", "path": "dispel/signal/accelerometer.py", "snippet": "def remove_gravity_component(data: pd.DataFrame):\n \"\"\"Remove the gravity component of acceleration.\n\n Based on paper :\n Two-stage Recognition of Raw Acceleration Signals for 3-D Gesture-Understanding Cell\n Phones, Cho et al., 2006\n\n Get the linear accelerations - without gravity component - by subtracting the mean\n acceleration signals, such that\n :math:`A_1(t) = A(t) - A_mean where A(t) = [a_x(t),a_y(t),a_z(t)]`\n\n Parameters\n ----------\n data\n Input acceleration data\n\n Returns\n -------\n Tuple[pandas.DataFrame, float]\n A tuple with first, the acceleration data with mean removed and second the mean.\n \"\"\"\n mean_acc = data.mean()\n lin_acc = data - mean_acc\n\n return lin_acc, mean_acc" }, { "identifier": "remove_gravity_component_ori", "path": "dispel/signal/accelerometer.py", "snippet": "def remove_gravity_component_ori(\n acc_sensor, q_global_sensor, unit: str = \"g\"\n) -> Tuple[pd.DataFrame, pd.DataFrame]:\n \"\"\"Remove the gravity component of acceleration based on orientation.\n\n Get the linear accelerations - without gravity component - by converting to the\n global coordinate frame, subtracting the constant gravity and converting back to the\n initial sensor frame.\n\n Parameters\n ----------\n acc_sensor\n Input acceleration data expressed on the sensor frame.\n q_global_sensor\n Input orientation from sensor to global coordinate frame.\n unit\n The unit in which the accelerometer data is expressed.\n\n Returns\n -------\n Tuple[pandas.DataFrame, pandas.DataFrame]\n A tuple with first, the acceleration data with mean removed and second the\n gravity.\n \"\"\"\n if unit == \"g\":\n gravity_constant = 1.0\n else:\n gravity_constant = GRAVITY_CONSTANT\n\n # rotate acceleration from sensor to global frame\n acc_global = quaternion_rotate_vector(q_global_sensor, acc_sensor)\n\n # derive the user acceleration by subtracting gravity\n user_acc_global = acc_global - [0, 0, gravity_constant]\n\n # derive gravity in global as array (should be [0, 0, GRAVITY_CONSTANT])\n gravity_global = acc_global - user_acc_global\n\n # get the conjugate quaternion to express the opposite rotation\n q_sensor_global = np.conjugate(quaternion.as_quat_array(q_global_sensor))\n q_sensor_global = quaternion.as_float_array(q_sensor_global)\n\n # rotate user and gravity back to the sensor frame\n user_acc_sensor = quaternion_rotate_vector(q_sensor_global, user_acc_global)\n gravity_sensor = quaternion_rotate_vector(q_sensor_global, gravity_global)\n\n return user_acc_sensor, gravity_sensor" }, { "identifier": "amplitude", "path": "dispel/signal/core.py", "snippet": "def amplitude(power_spectrum_: np.ndarray) -> float:\n \"\"\"Compute the amplitude of a signal.\n\n Parameters\n ----------\n power_spectrum_\n An array containing the power spectrum of the signal in question.\n\n Returns\n -------\n float\n The signal's amplitude.\n \"\"\"\n return np.max(power_spectrum_)" }, { "identifier": "discretize_sampling_frequency", "path": "dispel/signal/core.py", "snippet": "def discretize_sampling_frequency(\n data: pd.Series, fs_expected: List[int], max_frequency_distance: int = 5\n) -> int:\n \"\"\"Discretize the sampling frequency from a time series.\n\n First we extract the median sampling frequency of data, then return the closest\n expected frequency if the estimated sampling frequency is close enough\n (``np.abs(fs_expected, fs_estimate) < 5``) to one of the expected sampling\n frequencies.\n\n Parameters\n ----------\n data\n Any pandas series with a time series as index.\n fs_expected\n An iterable of expected sampling frequency in Hz.\n max_frequency_distance\n An optional integer specifying the maximum accepted distance between the\n expected frequency and the estimated frequency above which we raise an error.\n\n Returns\n -------\n int\n Discretized sampling frequency.\n\n Raises\n ------\n ValueError\n If estimated sampling frequency is too far\n (abs distance > max_frequency_distance) from all the expected sampling frequency\n in ``fs_expected``.\n \"\"\"\n # Estimate sampling_frequency\n fs_estimate = extract_sampling_frequency(data)\n\n # Compute the distance to expected frequencies\n frequency_distance = np.abs(np.array(fs_expected) - fs_estimate)\n\n # Check if we are close enough otherwise raise a warning\n if min(frequency_distance) > max_frequency_distance:\n raise ValueError(\n f\"Estimated sampling frequency {fs_estimate} is further than 5 Hz from \"\n f\"expected frequencies: {fs_expected}.\"\n )\n if min(frequency_distance) > 1:\n warnings.warn(\n f\"Estimated sampling frequency {fs_estimate} is further than 1 Hz from \"\n f\"expected frequencies: {fs_expected}.\"\n )\n return fs_expected[int(np.argmin(frequency_distance))]" }, { "identifier": "energy", "path": "dispel/signal/core.py", "snippet": "def energy(\n power_spectrum_: pd.Series,\n lowcut: Optional[float] = None,\n highcut: Optional[float] = None,\n) -> float:\n \"\"\"Compute the energy of a signal.\n\n Parameters\n ----------\n power_spectrum_\n A pandas series containing the power spectrum of the signal in question and the\n frequencies in index.\n lowcut\n The lower bound of frequencies to filter.\n highcut\n The higher bound of frequencies to filter.\n\n Returns\n -------\n float\n The signal's energy.\n \"\"\"\n assert_time_series_has_frequency(power_spectrum_)\n if lowcut is not None:\n mask = power_spectrum_.index.to_series().between(lowcut, highcut)\n windowed_data = power_spectrum_[mask]\n else:\n windowed_data = power_spectrum_\n return 0.5 * (windowed_data * windowed_data.index).sum()" }, { "identifier": "entropy", "path": "dispel/signal/core.py", "snippet": "def entropy(power_spectrum_: np.ndarray) -> float:\n \"\"\"Compute the entropy of a signal.\n\n Parameters\n ----------\n power_spectrum_\n An array containing the power spectrum of the signal in question.\n\n Returns\n -------\n float\n The signal's entropy.\n \"\"\"\n data = power_spectrum_ / np.sum(power_spectrum_)\n return -np.sum(data * np.log2(data))" }, { "identifier": "euclidean_norm", "path": "dispel/signal/core.py", "snippet": "def euclidean_norm(data: pd.DataFrame) -> pd.Series:\n \"\"\"Calculate the euclidean norm of a pandas Data Frame.\n\n Parameters\n ----------\n data\n A pandas data frame for which to compute the euclidean norm\n\n Returns\n -------\n pandas.Series\n The euclidean norm of ``data``\n \"\"\"\n return data.pow(2).sum(axis=1).apply(np.sqrt)" }, { "identifier": "peak", "path": "dispel/signal/core.py", "snippet": "def peak(\n power_spectrum_: pd.Series,\n) -> float:\n \"\"\"Compute the peak frequency of a signal.\n\n Parameters\n ----------\n power_spectrum_\n An array containing the power spectrum of the signal in question.\n\n Returns\n -------\n float\n The signal's peak frequency.\n \"\"\"\n assert_time_series_has_frequency(power_spectrum_)\n return power_spectrum_.idxmax()" }, { "identifier": "SENSOR_UNIT", "path": "dispel/signal/sensor.py", "snippet": "SENSOR_UNIT = {\"acc\": \"G\", \"gyr\": \"rad/s\", \"diss\": \"pixel\"}" }, { "identifier": "find_zero_crossings", "path": "dispel/signal/sensor.py", "snippet": "def find_zero_crossings(data: pd.DataFrame, col: str) -> pd.DataFrame:\n \"\"\"Find zero crossing in the signal.\"\"\"\n zero_crossings = data.index[(data[col] > 0).diff().fillna(False)]\n return data.loc[zero_crossings, col]" } ]
from functools import partial from typing import Iterable, List, Optional, Tuple, Union from dispel.data.core import Reading from dispel.data.levels import Level from dispel.data.measures import MeasureValueDefinitionPrototype from dispel.data.raw import ( ACCELEROMETER_COLUMNS, DEFAULT_COLUMNS, GRAVITY_COLUMNS, RawDataValueDefinition, ) from dispel.data.values import AbbreviatedValue as AV from dispel.processing.assertions import NotEmptyDataSetAssertionMixin from dispel.processing.data_set import transformation from dispel.processing.extract import ExtractMultipleStep, ExtractStep from dispel.processing.level import LevelFilterType from dispel.processing.modalities import SensorModality from dispel.processing.transform import Apply, TransformStep from dispel.providers.bdh.data import BDHReading from dispel.signal.accelerometer import ( GRAVITY_CONSTANT, apply_rotation_matrices, compute_rotation_matrices_quaternion, remove_gravity_component, remove_gravity_component_ori, ) from dispel.signal.core import ( amplitude, discretize_sampling_frequency, energy, entropy, euclidean_norm, peak, ) from dispel.signal.sensor import SENSOR_UNIT, find_zero_crossings import numpy as np import pandas as pd
16,999
for axis in "XYZ" ] + [ RawDataValueDefinition( f"gravity{axis}", f"gravity component along the {axis} axis.", data_type="float", ) for axis in "XYZ" ] + [RawDataValueDefinition("ts", "time index")] ) @staticmethod def add_gravity( accelerometer: pd.DataFrame, level: Level, gravity: Optional[pd.DataFrame] = None, ) -> pd.DataFrame: """Format gravity data to ADS format.""" if gravity is None: cols = ["x", "y", "z"] raw_acc = level.get_raw_data_set("raw_accelerometer").data accelerometer = raw_acc if level.has_raw_data_set("attitude"): ori = level.get_raw_data_set("attitude").data ori_cols = ["w", "x", "y", "z"] lin_accelerometer, gravity = remove_gravity_component_ori( accelerometer[cols].values, ori[ori_cols].values ) lin_accelerometer = pd.DataFrame(lin_accelerometer, columns=cols) gravity = pd.DataFrame(gravity, columns=cols) else: lin_accelerometer, gravity = remove_gravity_component( accelerometer[cols] ) res = pd.DataFrame( { "userAccelerationX": lin_accelerometer["x"], "userAccelerationY": lin_accelerometer["y"], "userAccelerationZ": lin_accelerometer["z"], } ) res["gravityX"] = gravity["x"] res["gravityY"] = gravity["y"] res["gravityZ"] = gravity["z"] res["ts"] = accelerometer["ts"] else: # Merging on the timestamps vs. on the indexes acc_renamed = accelerometer.rename( mapper={ "x": "userAccelerationX", "y": "userAccelerationY", "z": "userAccelerationZ", }, axis=1, ) gravity_renamed = gravity.rename( mapper={"x": "gravityX", "y": "gravityY", "z": "gravityZ"}, axis=1 ) merged = acc_renamed.merge(gravity_renamed, how="outer") merged = merged.set_index("ts") merged_sorted = merged.sort_index() merged_sorted_interpolated = merged_sorted.interpolate( method="nearest", limit_direction="both" ) res = merged_sorted_interpolated.loc[acc_renamed.ts].reset_index() return res.dropna() @staticmethod @transformation def _reformat(accelerometer: pd.DataFrame, level: Level) -> pd.DataFrame: target_cols = { f"{sensor}{axis}" for sensor in ("userAcceleration", "gravity") for axis in "XYZ" } if not target_cols.issubset(accelerometer.columns): try: return TransformUserAcceleration.add_gravity( accelerometer, level, level.get_raw_data_set("gravity").data ) except ValueError: # Happens in BDH pinch return TransformUserAcceleration.add_gravity(accelerometer, level) return accelerometer class TransformGyroscope(TransformStep): r"""Format gyroscope data to ADS format if not already the case. On ADS format, the gyroscope is synchronized with the accelerometer. Here we make sure gyroscope is synchronized with the acc data set. Parameters ---------- level_filter An optional :class:`dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ data_set_ids = ["acc", "gyroscope"] new_data_set_id = "gyroscope" definitions = [ RawDataValueDefinition( axis, f"Rotation speed along the {axis} axis.", data_type="float" ) for axis in "xyz" ] + [RawDataValueDefinition("ts", "time index")] @staticmethod @transformation def _synchronize_gyroscope( accelerometer: pd.DataFrame, gyroscope: pd.DataFrame, reading: Reading ) -> pd.DataFrame:
"""Generic functionality for signal processing steps.""" # Define expected sampling frequencies FREQ_20HZ = 20 FREQ_50HZ = 50 FREQ_60HZ = 60 FREQ_100HZ = 100 # SensorLog can sample at 100Hz FREQ_128HZ = 128 # APDM files are sampled at 128Hz VALID_FREQ_LIST = [FREQ_20HZ, FREQ_50HZ, FREQ_100HZ, FREQ_128HZ] class RenameColumns(TransformStep): r"""Rename and select columns of a raw data set. Parameters ---------- data_set_id The data set id of the time series to be renamed. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. kwargs All arguments passed into this class will serve as a renaming mapping for the raw data set. """ def __init__( self, data_set_id: str, level_filter: Optional[LevelFilterType] = None, **kwargs ): def _transform_function(data: pd.DataFrame) -> pd.DataFrame: data_ = data.rename(columns=kwargs) return data_[kwargs.values()] super().__init__( data_set_id, _transform_function, f"{data_set_id}_renamed", [RawDataValueDefinition(column, column) for column in kwargs.values()], level_filter=level_filter, ) class SetTimestampIndex(TransformStep): r"""Create a new time series based on a date time or time delta column. Parameters ---------- data_set_id The data set id of the time series to be transformed. columns The columns to consider in the new raw data set. time_stamp_column The time series column name to use as index. level_filter An optional :class:`dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. duplicates The strategy used to handle duplicates. Has to be one of ``ignore``, ``raise``, ``first``, ``last``. """ def __init__( self, data_set_id: str, columns: List[str], time_stamp_column: str = "ts", level_filter: Optional[LevelFilterType] = None, duplicates: Optional[str] = None, ): def _transform_function( data: pd.DataFrame, rm_duplicate: Optional[str] ) -> pd.DataFrame: if rm_duplicate is None: return data.set_index(time_stamp_column)[columns].copy() res = data.set_index(time_stamp_column)[columns].copy() return res[~res.index.duplicated(keep=duplicates)] super().__init__( data_set_id, lambda x: _transform_function(x, duplicates), f"{data_set_id}_ts", [RawDataValueDefinition(column, column) for column in columns], level_filter=level_filter, ) class Trim(TransformStep): """Trim a sensor signal at the beginning and/or end. Parameters ---------- trim_left The amount of data to trim from the left side of the sensor readings. trim_right The amount of data to trim from the right side of the sensor readings. ts_column The column id to be used in the provided raw data set through ``data_set_ids``. If no column is provided, the data set is expected to have a time-based index that is used to trim the data set. """ trim_left = pd.Timedelta(0) trim_right = pd.Timedelta(0) ts_column: Optional[str] = None def __init__(self, *args, **kwargs): if (left := kwargs.pop("trim_left", None)) is not None: self.trim_left = left if (right := kwargs.pop("trim_right", None)) is not None: self.trim_right = right if (column := kwargs.pop("ts_column", None)) is not None: self.ts_column = column super().__init__(*args, **kwargs) @transformation def _trim(self, data: pd.DataFrame) -> pd.DataFrame: ts_col = data.index if self.ts_column is None else data[self.ts_column] if self.trim_left > pd.Timedelta(0): data = data[ts_col > ts_col.min() + self.trim_left] if self.trim_right > pd.Timedelta(0): data = data[ts_col < ts_col.max() - self.trim_right] return data.copy() class Resample(NotEmptyDataSetAssertionMixin, TransformStep): r"""Resample a time-based raw data set to a specific sampling frequency. The resampling creates a new raw data set which is accessible via the data set comprised of the original one concatenated with ``_resampled``. Parameters ---------- data_set_id The data set to be resampled. This has to be a data set that uses a time-based index. You might first have to apply the :class:`SetTimestampIndex` processing step before you can apply this step. aggregations A list of resampling methods to be applied in order. Each can be any method that is also accepted by :meth:`pandas.DataFrame.agg`. columns The columns to be considered during the resampling. freq The frequency to resample to. See also :meth:`pandas.DataFrame.resample` for details. If freq is not provided the frequency is estimated automatically taking the median frequency. max_frequency_distance An optional integer specifying the maximum accepted distance between the expected frequency and the estimated frequency above which we raise an error. level_filter An optional :class:`dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ def __init__( self, data_set_id: str, aggregations: Iterable[str], columns: Iterable[str], freq: Optional[Union[float, str]] = None, max_frequency_distance: Optional[int] = None, level_filter: Optional[LevelFilterType] = None, ): def _resample( data: pd.DataFrame, sampling_frequency: Optional[Union[float, str]] = None ) -> pd.DataFrame: # Check if a sampling frequency is provided # If not, we discretized the sampling frequency if sampling_frequency is None: discretize_args = [data, VALID_FREQ_LIST] if max_frequency_distance: discretize_args.append(max_frequency_distance) sampling_frequency = discretize_sampling_frequency(*discretize_args) # Convert the float sampling frequency to a Timedelta format if not isinstance(sampling_frequency, str): sampling_frequency = pd.Timedelta(1 / sampling_frequency, unit="s") resample_obj = data[columns].resample(sampling_frequency) for method in aggregations: resample_obj = resample_obj.agg(method) return resample_obj def _definition_factory(column: str) -> RawDataValueDefinition: return RawDataValueDefinition( column, f"{column} resampled with {aggregations}" ) super().__init__( data_set_id, partial(_resample, sampling_frequency=freq), f"{data_set_id}_resampled", [_definition_factory(column) for column in columns], level_filter=level_filter, ) class Upsample(Apply): r"""Upsample a time-based raw data set to a specific sampling frequency. The upsampling creates a new raw data set which is an upsampled version of the original data set identified by data_set_id. The upsampled data set is accessible via the new_data_set_id which is a concatenation of the original data_set_id and a suffix ``_upsampled``. Parameters ---------- interpolation_method Interpolation technique to use to fill NaN values. It should be a method that is also accepted by :meth:`pandas.DataFrame.interpolate`. freq The frequency to upsample to. See also :meth:`pandas.DataFrame.resample` for details. """ def get_new_data_set_id(self) -> str: """Overwrite new_data_set_id.""" return f"{self.get_data_set_ids()[0]}_upsampled" # type: ignore def __init__(self, interpolation_method: str, freq: Union[float, str], **kwargs): def _upsample( data: pd.DataFrame, sampling_frequency: Union[float, str] ) -> pd.DataFrame: """Upsample a dataframe to a given sampling frequency.""" # Convert the float sampling frequency to a Timedelta format if not isinstance(sampling_frequency, str): sampling_frequency = pd.Timedelta(1 / sampling_frequency, unit="s") resample_obj = data.resample(sampling_frequency) return resample_obj.interpolate(interpolation_method) super().__init__( method=_upsample, method_kwargs={"sampling_frequency": freq}, **kwargs ) class ExtractAverageSignalEnergy(NotEmptyDataSetAssertionMixin, ExtractStep): r"""An average signal energy extraction step. Parameters ---------- sensor The type of sensor on which the extraction is to be performed. data_set_id The data set id on which the extraction is to be performed. columns The columns onto which the signal energy is to be computed. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ def __init__( self, sensor: SensorModality, data_set_id: str, columns: List[str], level_filter: Optional[LevelFilterType] = None, ): def _average_signal(data: pd.DataFrame): return np.linalg.norm(data[columns], ord=2) super().__init__( data_set_id, _average_signal, definition=MeasureValueDefinitionPrototype( measure_name=AV(f"average {sensor} energy", f"{sensor.abbr}_sig_ene"), data_type="float64", description=f"The average {sensor} energy of the " f'{"".join(columns)} columns of the signal.', unit=SENSOR_UNIT[sensor.abbr], ), level_filter=level_filter, ) class ExtractPowerSpectrumMeasures(NotEmptyDataSetAssertionMixin, ExtractMultipleStep): r"""A measure extraction processing step for power spectrum measures. Parameters ---------- sensor The type of sensor on which the extraction is to be performed. data_set_id The data set id on which the extraction is to be performed. columns The columns onto which the power spectrum measures are to be extracted. lower_bound The lower bound of frequencies below which the signal is filtered. upper_bound The higher bound of frequencies above which the signal is filtered. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ def __init__( self, sensor: SensorModality, data_set_id: str, columns: List[str], lower_bound: Optional[float] = None, upper_bound: Optional[float] = None, level_filter: Optional[LevelFilterType] = None, ): unit = sensor.unit(order=2) atomic_functions = [ { "func": partial(energy, lowcut=lower_bound, highcut=upper_bound), "name": AV("energy", "ene"), "description": "The power spectrum energy summed between the " f"frequencies ({lower_bound}, {upper_bound}) " f"of the {{axis}} axis for the {sensor} " f"signal.", "unit": unit, "outcome_uuid": "99ef9a8d-a925-4eb0-9e80-be58cd4a9ac9", }, { "func": peak, "name": AV("peak", "peak"), "description": f"The frequency at which the power spectrum of " "the {axis} axis reaches its maximum value for " f"the {sensor} signal.", "unit": "Hz", "outcome_uuid": "87512c93-3a5b-4c9e-9575-fd9ed19649ca", }, { "func": entropy, "name": AV("entropy", "ent"), "description": "The power spectrum entropy of the {axis} axis " f"for the {sensor} signal.", "unit": unit, "outcome_uuid": "6726bb5a-8084-49f5-a53e-6a28a8f27695", }, { "func": amplitude, "name": AV("amplitude", "amp"), "description": "The power spectrum amplitude (i.e. the maximum" " value) of the {axis} axis for the " f"{sensor} signal.", "unit": unit, "outcome_uuid": "bde2c1f9-abf7-41e7-91f8-e0ddddf34a5c", }, ] def _function_factory(atomic_function, axis): return dict( func=lambda x: atomic_function["func"](x[axis]), description=atomic_function["description"].format(axis=axis), unit=atomic_function["unit"], measure_name=AV( f'{sensor} power spectrum {atomic_function["name"]} {axis}' f" axis", f'{sensor.abbr}_ps_{atomic_function["name"].abbr}_{axis}', ), ) functions = [ _function_factory(atomic_function, axis) for atomic_function in atomic_functions for axis in columns ] super().__init__( data_set_id, functions, definition=MeasureValueDefinitionPrototype(data_type="float64"), level_filter=level_filter, ) class ComputeGravityRotationMatrices(TransformStep): r"""Compute a series of rotation matrices to align sensors to gravity. This transformation step creates a series of rotation matrices based on the gravity information contained in the accelerometer sensor. This allows to rotate other sensors on a desired orientation related to gravity. This is in particular of interest if we want to measure physical interactions with devices around the plane perpendicular to gravity. Parameters ---------- target_gravity The target gravity vector, e.g. ``(-1, 0, 0)`` to create rotation matrices that rotate the x-axis of a device onto gravity. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ def __init__( self, data_set_id: str, target_gravity: Tuple[float, float, float], **kwargs ): def _transform_function(data: pd.DataFrame) -> pd.Series: return compute_rotation_matrices_quaternion( data[GRAVITY_COLUMNS], target_gravity ) super().__init__( data_set_id, _transform_function, "gravity_rotation_matrices", [RawDataValueDefinition("rotation_matrix", "Rotation Matrix")], **kwargs, ) class RotateSensorWithGravityRotationMatrices(TransformStep): r"""Apply a series of rotation matrices to a sensor. This is a complementary step to :class:`ComputeGravityRotationMatrices` and applies the rotation matrices to the specified sensor. Parameters ---------- data_set_id The id of the sensor data set to be rotated. columns The columns of the sensor data set to be considered in the rotation. level_filter An optional :class:`~dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. Examples -------- Assuming you want to rotate the gyroscope vector onto gravity you can achieve this by chaining the following steps: .. doctest:: processing >>> from dispel.data.raw import DEFAULT_COLUMNS >>> from dispel.processing import process >>> from dispel.providers.generic.sensor import ( ... ComputeGravityRotationMatrices, ... RotateSensorWithGravityRotationMatrices ... ) >>> cols = DEFAULT_COLUMNS >>> steps = [ ... ComputeGravityRotationMatrices('accelerometer', (-1, 0, 0)), ... RotateSensorWithGravityRotationMatrices('gyroscope', cols) ... ] >>> _ = process(reading, steps) # doctest: +SKIP The results of the roation are available in the raw data set with the id ``<data_set_id>_rotated``: .. doctest:: processing :options: +NORMALIZE_WHITESPACE >>> level = reading.get_level(level_id) # doctest: +SKIP >>> level.get_raw_data_set('gyroscope').data.head() # doctest: +SKIP x y z ts 0 0.035728 -0.021515 0.014879 2020-05-04 17:31:38.574 1 -0.012046 0.005010 -0.009029 2020-05-04 17:31:38.625 2 0.006779 0.000761 -0.003253 2020-05-04 17:31:38.680 3 0.032636 -0.020272 -0.021915 2020-05-04 17:31:38.729 4 0.007495 -0.014061 0.012886 2020-05-04 17:31:38.779 >>> level.get_raw_data_set( ... 'gyroscope_rotated' ... ).data.head() # doctest: +SKIP x y z 0 -0.002309 -0.042509 -0.012182 1 -0.003754 0.014983 0.003624 2 -0.002237 -0.002116 -0.006901 3 -0.030461 -0.021654 -0.023656 4 0.001203 -0.019580 0.005924 """ def __init__( self, data_set_id: str, columns: Iterable[str], level_filter: Optional[LevelFilterType] = None, ): def _transform_function( sensor_df: pd.DataFrame, matrices: pd.DataFrame ) -> pd.DataFrame: return apply_rotation_matrices( matrices["rotation_matrix"], sensor_df[columns] ) def _definition_factory(column: str) -> RawDataValueDefinition: return RawDataValueDefinition(column, f"{column} rotated") super().__init__( [data_set_id, "gravity_rotation_matrices"], _transform_function, f"{data_set_id}_rotated", [_definition_factory(column) for column in columns], level_filter=level_filter, ) class TransformUserAcceleration(TransformStep): r"""Format accelerometer data to ADS format if not already the case. Prior to formatting, linear acceleration and gravity are decoupled from acceleration. Parameters ---------- level_filter An optional :class:`dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ data_set_ids = "accelerometer" new_data_set_id = "acc" definitions = ( [ RawDataValueDefinition( f"userAcceleration{axis}", f"Linear Acceleration along the {axis} axis.", data_type="float", ) for axis in "XYZ" ] + [ RawDataValueDefinition( f"gravity{axis}", f"gravity component along the {axis} axis.", data_type="float", ) for axis in "XYZ" ] + [RawDataValueDefinition("ts", "time index")] ) @staticmethod def add_gravity( accelerometer: pd.DataFrame, level: Level, gravity: Optional[pd.DataFrame] = None, ) -> pd.DataFrame: """Format gravity data to ADS format.""" if gravity is None: cols = ["x", "y", "z"] raw_acc = level.get_raw_data_set("raw_accelerometer").data accelerometer = raw_acc if level.has_raw_data_set("attitude"): ori = level.get_raw_data_set("attitude").data ori_cols = ["w", "x", "y", "z"] lin_accelerometer, gravity = remove_gravity_component_ori( accelerometer[cols].values, ori[ori_cols].values ) lin_accelerometer = pd.DataFrame(lin_accelerometer, columns=cols) gravity = pd.DataFrame(gravity, columns=cols) else: lin_accelerometer, gravity = remove_gravity_component( accelerometer[cols] ) res = pd.DataFrame( { "userAccelerationX": lin_accelerometer["x"], "userAccelerationY": lin_accelerometer["y"], "userAccelerationZ": lin_accelerometer["z"], } ) res["gravityX"] = gravity["x"] res["gravityY"] = gravity["y"] res["gravityZ"] = gravity["z"] res["ts"] = accelerometer["ts"] else: # Merging on the timestamps vs. on the indexes acc_renamed = accelerometer.rename( mapper={ "x": "userAccelerationX", "y": "userAccelerationY", "z": "userAccelerationZ", }, axis=1, ) gravity_renamed = gravity.rename( mapper={"x": "gravityX", "y": "gravityY", "z": "gravityZ"}, axis=1 ) merged = acc_renamed.merge(gravity_renamed, how="outer") merged = merged.set_index("ts") merged_sorted = merged.sort_index() merged_sorted_interpolated = merged_sorted.interpolate( method="nearest", limit_direction="both" ) res = merged_sorted_interpolated.loc[acc_renamed.ts].reset_index() return res.dropna() @staticmethod @transformation def _reformat(accelerometer: pd.DataFrame, level: Level) -> pd.DataFrame: target_cols = { f"{sensor}{axis}" for sensor in ("userAcceleration", "gravity") for axis in "XYZ" } if not target_cols.issubset(accelerometer.columns): try: return TransformUserAcceleration.add_gravity( accelerometer, level, level.get_raw_data_set("gravity").data ) except ValueError: # Happens in BDH pinch return TransformUserAcceleration.add_gravity(accelerometer, level) return accelerometer class TransformGyroscope(TransformStep): r"""Format gyroscope data to ADS format if not already the case. On ADS format, the gyroscope is synchronized with the accelerometer. Here we make sure gyroscope is synchronized with the acc data set. Parameters ---------- level_filter An optional :class:`dispel.processing.level.LevelFilter` to determine the levels to be transformed. If no filter is provided, all levels will be transformed. The ``level_filter`` also accepts :class:`str`, :class:`~dispel.data.core.LevelId`\ s and lists of either and passes them to a :class:`~dispel.processing.level.LevelFilter` for convenience. """ data_set_ids = ["acc", "gyroscope"] new_data_set_id = "gyroscope" definitions = [ RawDataValueDefinition( axis, f"Rotation speed along the {axis} axis.", data_type="float" ) for axis in "xyz" ] + [RawDataValueDefinition("ts", "time index")] @staticmethod @transformation def _synchronize_gyroscope( accelerometer: pd.DataFrame, gyroscope: pd.DataFrame, reading: Reading ) -> pd.DataFrame:
if isinstance(reading, BDHReading):
16
2023-11-14 10:06:46+00:00
24k
Jisencc/yolov5_dual_weighting
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # ONNX Runtime: *.onnx\n # ONNX OpenCV DNN: *.onnx --dnn\n # OpenVINO: *_openvino_model\n # CoreML: *.mlmodel\n # TensorRT: *.engine\n # TensorFlow SavedModel: *_saved_model\n # TensorFlow GraphDef: *.pb\n # TensorFlow Lite: *.tflite\n # TensorFlow Edge TPU: *_edgetpu.tflite\n # PaddlePaddle: *_paddle_model\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n fp16 &= pt or jit or onnx or engine or triton # FP16\n nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)\n stride = 32 # default stride\n cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA\n if not (pt or triton):\n w = attempt_download(w) # download if not local\n\n if pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n stride = max(int(model.stride.max()), 32) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n model.half() if fp16 else model.float()\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n model.half() if fp16 else model.float()\n if extra_files['config.txt']: # load metadata dict\n d = json.loads(extra_files['config.txt'],\n object_hook=lambda d: {\n int(k) if k.isdigit() else k: v\n for k, v in d.items()})\n stride, names = int(d['stride']), d['names']\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements('opencv-python>=4.5.4')\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n output_names = [x.name for x in session.get_outputs()]\n meta = session.get_modelmeta().custom_metadata_map # metadata\n if 'stride' in meta:\n stride, names = int(meta['stride']), eval(meta['names'])\n elif xml: # OpenVINO\n LOGGER.info(f'Loading {w} for OpenVINO inference...')\n check_requirements('openvino>=2023.0') # requires openvino-dev: https://pypi.org/project/openvino-dev/\n from openvino.runtime import Core, Layout, get_batch\n core = Core()\n if not Path(w).is_file(): # if not *.xml\n w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir\n ov_model = core.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n if ov_model.get_parameters()[0].get_layout().empty:\n ov_model.get_parameters()[0].set_layout(Layout('NCHW'))\n batch_dim = get_batch(ov_model)\n if batch_dim.is_static:\n batch_size = batch_dim.get_length()\n ov_compiled_model = core.compile_model(ov_model, device_name='AUTO') # AUTO selects best available device\n stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0\n if device.type == 'cpu':\n device = torch.device('cuda:0')\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n context = model.create_execution_context()\n bindings = OrderedDict()\n output_names = []\n fp16 = False # default updated below\n dynamic = False\n for i in range(model.num_bindings):\n name = model.get_binding_name(i)\n dtype = trt.nptype(model.get_binding_dtype(i))\n if model.binding_is_input(i):\n if -1 in tuple(model.get_binding_shape(i)): # dynamic\n dynamic = True\n context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n if dtype == np.float16:\n fp16 = True\n else: # output\n output_names.append(name)\n shape = tuple(context.get_binding_shape(i))\n im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif saved_model: # TF SavedModel\n LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n import tensorflow as tf\n keras = False # assume TF1 saved_model\n model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=''), []) # wrapped\n ge = x.graph.as_graph_element\n return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n def gd_outputs(gd):\n name_list, input_list = [], []\n for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef\n name_list.append(node.name)\n input_list.extend(node.input)\n return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))\n\n gd = tf.Graph().as_graph_def() # TF GraphDef\n with open(w, 'rb') as f:\n gd.ParseFromString(f.read())\n frozen_func = wrap_frozen_graph(gd, inputs='x:0', outputs=gd_outputs(gd))\n elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n from tflite_runtime.interpreter import Interpreter, load_delegate\n except ImportError:\n import tensorflow as tf\n Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n delegate = {\n 'Linux': 'libedgetpu.so.1',\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n else: # TFLite\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n # load metadata\n with contextlib.suppress(zipfile.BadZipFile):\n with zipfile.ZipFile(w, 'r') as model:\n meta_file = model.namelist()[0]\n meta = ast.literal_eval(model.read(meta_file).decode('utf-8'))\n stride, names = int(meta['stride']), meta['names']\n elif tfjs: # TF.js\n raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')\n elif paddle: # PaddlePaddle\n LOGGER.info(f'Loading {w} for PaddlePaddle inference...')\n check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')\n import paddle.inference as pdi\n if not Path(w).is_file(): # if not *.pdmodel\n w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir\n weights = Path(w).with_suffix('.pdiparams')\n config = pdi.Config(str(w), str(weights))\n if cuda:\n config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n predictor = pdi.create_predictor(config)\n input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n output_names = predictor.get_output_names()\n elif triton: # NVIDIA Triton Inference Server\n LOGGER.info(f'Using {w} as Triton Inference Server...')\n check_requirements('tritonclient[all]')\n from utils.triton import TritonRemoteModel\n model = TritonRemoteModel(url=w)\n nhwc = model.runtime.startswith('tensorflow')\n else:\n raise NotImplementedError(f'ERROR: {w} is not a supported format')\n\n # class names\n if 'names' not in locals():\n names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}\n if names[0] == 'n01440764' and len(names) == 1000: # ImageNet\n names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names\n\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.fp16 and im.dtype != torch.float16:\n im = im.half() # to FP16\n if self.nhwc:\n im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n if self.pt: # PyTorch\n y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n elif self.jit: # TorchScript\n y = self.model(im)\n elif self.dnn: # ONNX OpenCV DNN\n im = im.cpu().numpy() # torch to numpy\n self.net.setInput(im)\n y = self.net.forward()\n elif self.onnx: # ONNX Runtime\n im = im.cpu().numpy() # torch to numpy\n y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n elif self.xml: # OpenVINO\n im = im.cpu().numpy() # FP32\n y = list(self.ov_compiled_model(im).values())\n elif self.engine: # TensorRT\n if self.dynamic and im.shape != self.bindings['images'].shape:\n i = self.model.get_binding_index('images')\n self.context.set_binding_shape(i, im.shape) # reshape if dynamic\n self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)\n for name in self.output_names:\n i = self.model.get_binding_index(name)\n self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n s = self.bindings['images'].shape\n assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = [self.bindings[x].data for x in sorted(self.output_names)]\n elif self.coreml: # CoreML\n im = im.cpu().numpy()\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.BILINEAR)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n if 'confidence' in y:\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n else:\n y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)\n elif self.paddle: # PaddlePaddle\n im = im.cpu().numpy().astype(np.float32)\n self.input_handle.copy_from_cpu(im)\n self.predictor.run()\n y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n elif self.triton: # NVIDIA Triton Inference Server\n y = self.model(im)\n else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n im = im.cpu().numpy()\n if self.saved_model: # SavedModel\n y = self.model(im, training=False) if self.keras else self.model(im)\n elif self.pb: # GraphDef\n y = self.frozen_func(x=self.tf.constant(im))\n else: # Lite or Edge TPU\n input = self.input_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = []\n for output in self.output_details:\n x = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n x = (x.astype(np.float32) - zero_point) * scale # re-scale\n y.append(x)\n y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels\n\n if isinstance(y, (list, tuple)):\n return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n else:\n return self.from_numpy(y)\n\n def from_numpy(self, x):\n return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n def warmup(self, imgsz=(1, 3, 640, 640)):\n # Warmup model by running inference once\n warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n if any(warmup_types) and (self.device.type != 'cpu' or self.triton):\n im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input\n for _ in range(2 if self.jit else 1): #\n self.forward(im) # warmup\n\n @staticmethod\n def _model_type(p='path/to/model.pt'):\n # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n from export import export_formats\n from utils.downloads import is_url\n sf = list(export_formats().Suffix) # export suffixes\n if not is_url(p, check=False):\n check_suffix(p, sf) # checks\n url = urlparse(p) # if url may be Triton inference server\n types = [s in Path(p).name for s in sf]\n types[8] &= not types[9] # tflite &= not edgetpu\n triton = not any(types) and all([any(s in url.scheme for s in ['http', 'grpc']), url.netloc])\n return types + [triton]\n\n @staticmethod\n def _load_metadata(f=Path('path/to/meta.yaml')):\n # Load metadata from meta.yaml if it exists\n if f.exists():\n d = yaml_load(f)\n return d['stride'], d['names'] # assign stride, names\n return None, None" }, { "identifier": "SegmentationModel", "path": "models/yolo.py", "snippet": "class SegmentationModel(DetectionModel):\n # YOLOv5 segmentation model\n def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):\n super().__init__(cfg, ch, nc, anchors)" }, { "identifier": "Callbacks", "path": "utils/callbacks.py", "snippet": "class Callbacks:\n \"\"\"\"\n Handles all registered callbacks for YOLOv5 Hooks\n \"\"\"\n\n def __init__(self):\n # Define the available callbacks\n self._callbacks = {\n 'on_pretrain_routine_start': [],\n 'on_pretrain_routine_end': [],\n 'on_train_start': [],\n 'on_train_epoch_start': [],\n 'on_train_batch_start': [],\n 'optimizer_step': [],\n 'on_before_zero_grad': [],\n 'on_train_batch_end': [],\n 'on_train_epoch_end': [],\n 'on_val_start': [],\n 'on_val_batch_start': [],\n 'on_val_image_end': [],\n 'on_val_batch_end': [],\n 'on_val_end': [],\n 'on_fit_epoch_end': [], # fit = train + val\n 'on_model_save': [],\n 'on_train_end': [],\n 'on_params_update': [],\n 'teardown': [], }\n self.stop_training = False # set True to interrupt training\n\n def register_action(self, hook, name='', callback=None):\n \"\"\"\n Register a new action to a callback hook\n\n Args:\n hook: The callback hook name to register the action to\n name: The name of the action for later reference\n callback: The callback to fire\n \"\"\"\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n assert callable(callback), f\"callback '{callback}' is not callable\"\n self._callbacks[hook].append({'name': name, 'callback': callback})\n\n def get_registered_actions(self, hook=None):\n \"\"\"\"\n Returns all the registered actions by callback hook\n\n Args:\n hook: The name of the hook to check, defaults to all\n \"\"\"\n return self._callbacks[hook] if hook else self._callbacks\n\n def run(self, hook, *args, thread=False, **kwargs):\n \"\"\"\n Loop through the registered actions and fire all callbacks on main thread\n\n Args:\n hook: The name of the hook to check, defaults to all\n args: Arguments to receive from YOLOv5\n thread: (boolean) Run callbacks in daemon thread\n kwargs: Keyword Arguments to receive from YOLOv5\n \"\"\"\n\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n for logger in self._callbacks[hook]:\n if thread:\n threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()\n else:\n logger['callback'](*args, **kwargs)" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "FILE = Path(__file__).resolve()\nROOT = FILE.parents[1] # YOLOv5 root directory\nRANK = int(os.getenv('RANK', -1))\nNUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads\nDATASETS_DIR = Path(os.getenv('YOLOv5_DATASETS_DIR', ROOT.parent / 'datasets')) # global datasets directory\nAUTOINSTALL = str(os.getenv('YOLOv5_AUTOINSTALL', True)).lower() == 'true' # global auto-install mode\nVERBOSE = str(os.getenv('YOLOv5_VERBOSE', True)).lower() == 'true' # global verbose mode\nTQDM_BAR_FORMAT = '{l_bar}{bar:10}{r_bar}' # tqdm bar format\nFONT = 'Arial.ttf' # https://ultralytics.com/assets/Arial.ttf\nLOGGING_NAME = 'yolov5'\nLOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)\nCONFIG_DIR = user_config_dir() # Ultralytics settings dir\ndef is_ascii(s=''):\ndef is_chinese(s='人工智能'):\ndef is_colab():\ndef is_jupyter():\ndef is_kaggle():\ndef is_docker() -> bool:\ndef is_writeable(dir, test=False):\ndef set_logging(name=LOGGING_NAME, verbose=True):\ndef user_config_dir(dir='Ultralytics', env_var='YOLOV5_CONFIG_DIR'):\n def __init__(self, t=0.0):\n def __enter__(self):\n def __exit__(self, type, value, traceback):\n def time(self):\n def __init__(self, seconds, *, timeout_msg='', suppress_timeout_errors=True):\n def _timeout_handler(self, signum, frame):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\n def __init__(self, new_dir):\n def __enter__(self):\n def __exit__(self, exc_type, exc_val, exc_tb):\ndef methods(instance):\ndef print_args(args: Optional[dict] = None, show_file=True, show_func=False):\ndef init_seeds(seed=0, deterministic=False):\ndef intersect_dicts(da, db, exclude=()):\ndef get_default_args(func):\ndef get_latest_run(search_dir='.'):\ndef file_age(path=__file__):\ndef file_date(path=__file__):\ndef file_size(path):\ndef check_online():\n def run_once():\ndef git_describe(path=ROOT): # path must be a directory\ndef check_git_status(repo='ultralytics/yolov5', branch='master'):\ndef check_git_info(path='.'):\ndef check_python(minimum='3.8.0'):\ndef check_version(current='0.0.0', minimum='0.0.0', name='version ', pinned=False, hard=False, verbose=False):\ndef check_img_size(imgsz, s=32, floor=0):\ndef check_imshow(warn=False):\ndef check_suffix(file='yolov5s.pt', suffix=('.pt', ), msg=''):\ndef check_yaml(file, suffix=('.yaml', '.yml')):\ndef check_file(file, suffix=''):\ndef check_font(font=FONT, progress=False):\ndef check_dataset(data, autodownload=True):\ndef check_amp(model):\n def amp_allclose(model, im):\ndef yaml_load(file='data.yaml'):\ndef yaml_save(file='data.yaml', data={}):\ndef unzip_file(file, path=None, exclude=('.DS_Store', '__MACOSX')):\ndef url2file(url):\ndef download(url, dir='.', unzip=True, delete=True, curl=False, threads=1, retry=3):\n def download_one(url, dir):\ndef make_divisible(x, divisor):\ndef clean_str(s):\ndef one_cycle(y1=0.0, y2=1.0, steps=100):\ndef colorstr(*input):\ndef labels_to_class_weights(labels, nc=80):\ndef labels_to_image_weights(labels, nc=80, class_weights=np.ones(80)):\ndef coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\ndef xyxy2xywh(x):\ndef xywh2xyxy(x):\ndef xywhn2xyxy(x, w=640, h=640, padw=0, padh=0):\ndef xyxy2xywhn(x, w=640, h=640, clip=False, eps=0.0):\ndef xyn2xy(x, w=640, h=640, padw=0, padh=0):\ndef segment2box(segment, width=640, height=640):\ndef segments2boxes(segments):\ndef resample_segments(segments, n=1000):\ndef scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\ndef scale_segments(img1_shape, segments, img0_shape, ratio_pad=None, normalize=False):\ndef clip_boxes(boxes, shape):\ndef clip_segments(segments, shape):\ndef non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\ndef strip_optimizer(f='best.pt', s=''): # from utils.general import *; strip_optimizer()\ndef print_mutation(keys, results, hyp, save_dir, bucket, prefix=colorstr('evolve: ')):\ndef apply_classifier(x, model, img, im0):\ndef increment_path(path, exist_ok=False, sep='', mkdir=False):\ndef imread(filename, flags=cv2.IMREAD_COLOR):\ndef imwrite(filename, img):\ndef imshow(path, im):\nclass Profile(contextlib.ContextDecorator):\nclass Timeout(contextlib.ContextDecorator):\nclass WorkingDirectory(contextlib.ContextDecorator):" }, { "identifier": "ConfusionMatrix", "path": "utils/metrics.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc, conf=0.25, iou_thres=0.45):\n self.matrix = np.zeros((nc + 1, nc + 1))\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n if detections is None:\n gt_classes = labels.int()\n for gc in gt_classes:\n self.matrix[self.nc, gc] += 1 # background FN\n return\n\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(int)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[detection_classes[m1[j]], gc] += 1 # correct\n else:\n self.matrix[self.nc, gc] += 1 # true background\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[dc, self.nc] += 1 # predicted background\n\n def tp_fp(self):\n tp = self.matrix.diagonal() # true positives\n fp = self.matrix.sum(1) - tp # false positives\n # fn = self.matrix.sum(0) - tp # false negatives (missed detections)\n return tp[:-1], fp[:-1] # remove background class\n\n @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure')\n def plot(self, normalize=True, save_dir='', names=()):\n import seaborn as sn\n\n array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)\n nc, nn = self.nc, len(names) # number of classes, names\n sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size\n labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels\n ticklabels = (names + ['background']) if labels else 'auto'\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered\n sn.heatmap(array,\n ax=ax,\n annot=nc < 30,\n annot_kws={\n 'size': 8},\n cmap='Blues',\n fmt='.2f',\n square=True,\n vmin=0.0,\n xticklabels=ticklabels,\n yticklabels=ticklabels).set_facecolor((1, 1, 1))\n ax.set_xlabel('True')\n ax.set_ylabel('Predicted')\n ax.set_title('Confusion Matrix')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n plt.close(fig)\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "box_iou", "path": "utils/metrics.py", "snippet": "def box_iou(box1, box2, eps=1e-7):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)\n inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)\n\n # IoU = inter / (area1 + area2 - inter)\n return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)" }, { "identifier": "output_to_target", "path": "utils/plots.py", "snippet": "def output_to_target(output, max_det=300):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting\n targets = []\n for i, o in enumerate(output):\n box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)\n j = torch.full((conf.shape[0], 1), i)\n targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))\n return torch.cat(targets, 0).numpy()" }, { "identifier": "plot_val_study", "path": "utils/plots.py", "snippet": "def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\n # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)\n save_dir = Path(file).parent if file else Path(dir)\n plot2 = False # plot additional results\n if plot2:\n ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()\n\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:\n for f in sorted(save_dir.glob('study*.txt')):\n y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n x = np.arange(y.shape[1]) if x is None else np.array(x)\n if plot2:\n s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']\n for i in range(7):\n ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n\n j = y[3].argmax() + 1\n ax2.plot(y[5, 1:j],\n y[3, 1:j] * 1E2,\n '.-',\n linewidth=2,\n markersize=8,\n label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))\n\n ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n 'k.-',\n linewidth=2,\n markersize=8,\n alpha=.25,\n label='EfficientDet')\n\n ax2.grid(alpha=0.2)\n ax2.set_yticks(np.arange(20, 60, 5))\n ax2.set_xlim(0, 57)\n ax2.set_ylim(25, 55)\n ax2.set_xlabel('GPU Speed (ms/img)')\n ax2.set_ylabel('COCO AP val')\n ax2.legend(loc='lower right')\n f = save_dir / 'study.png'\n print(f'Saving {f}...')\n plt.savefig(f, dpi=300)" }, { "identifier": "create_dataloader", "path": "utils/segment/dataloaders.py", "snippet": "def create_dataloader(path,\n imgsz,\n batch_size,\n stride,\n single_cls=False,\n hyp=None,\n augment=False,\n cache=False,\n pad=0.0,\n rect=False,\n rank=-1,\n workers=8,\n image_weights=False,\n quad=False,\n prefix='',\n shuffle=False,\n mask_downsample_ratio=1,\n overlap_mask=False,\n seed=0):\n if rect and shuffle:\n LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')\n shuffle = False\n with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP\n dataset = LoadImagesAndLabelsAndMasks(\n path,\n imgsz,\n batch_size,\n augment=augment, # augmentation\n hyp=hyp, # hyperparameters\n rect=rect, # rectangular batches\n cache_images=cache,\n single_cls=single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix,\n downsample_ratio=mask_downsample_ratio,\n overlap=overlap_mask)\n\n batch_size = min(batch_size, len(dataset))\n nd = torch.cuda.device_count() # number of CUDA devices\n nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)\n loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates\n generator = torch.Generator()\n generator.manual_seed(6148914691236517205 + seed + RANK)\n return loader(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle and sampler is None,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn,\n worker_init_fn=seed_worker,\n generator=generator,\n ), dataset" }, { "identifier": "mask_iou", "path": "utils/segment/general.py", "snippet": "def mask_iou(mask1, mask2, eps=1e-7):\n \"\"\"\n mask1: [N, n] m1 means number of predicted objects\n mask2: [M, n] m2 means number of gt objects\n Note: n means image_w x image_h\n\n return: masks iou, [N, M]\n \"\"\"\n intersection = torch.matmul(mask1, mask2.t()).clamp(0)\n union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection\n return intersection / (union + eps)" }, { "identifier": "process_mask", "path": "utils/segment/general.py", "snippet": "def process_mask(protos, masks_in, bboxes, shape, upsample=False):\n \"\"\"\n Crop before upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n ih, iw = shape\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW\n\n downsampled_bboxes = bboxes.clone()\n downsampled_bboxes[:, 0] *= mw / iw\n downsampled_bboxes[:, 2] *= mw / iw\n downsampled_bboxes[:, 3] *= mh / ih\n downsampled_bboxes[:, 1] *= mh / ih\n\n masks = crop_mask(masks, downsampled_bboxes) # CHW\n if upsample:\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n return masks.gt_(0.5)" }, { "identifier": "process_mask_native", "path": "utils/segment/general.py", "snippet": "def process_mask_native(protos, masks_in, bboxes, shape):\n \"\"\"\n Crop after upsample.\n protos: [mask_dim, mask_h, mask_w]\n masks_in: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape: input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n c, mh, mw = protos.shape # CHW\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n gain = min(mh / shape[0], mw / shape[1]) # gain = old / new\n pad = (mw - shape[1] * gain) / 2, (mh - shape[0] * gain) / 2 # wh padding\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(mh - pad[1]), int(mw - pad[0])\n masks = masks[:, top:bottom, left:right]\n\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n masks = crop_mask(masks, bboxes) # CHW\n return masks.gt_(0.5)" }, { "identifier": "scale_image", "path": "utils/segment/general.py", "snippet": "def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):\n \"\"\"\n img1_shape: model input shape, [h, w]\n img0_shape: origin pic shape, [h, w, 3]\n masks: [h, w, num]\n \"\"\"\n # Rescale coordinates (xyxy) from im1_shape to im0_shape\n if ratio_pad is None: # calculate from im0_shape\n gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new\n pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding\n else:\n pad = ratio_pad[1]\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])\n\n if len(masks.shape) < 2:\n raise ValueError(f'\"len of masks shape\" should be 2 or 3, but got {len(masks.shape)}')\n masks = masks[top:bottom, left:right]\n # masks = masks.permute(2, 0, 1).contiguous()\n # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]\n # masks = masks.permute(1, 2, 0).contiguous()\n masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))\n\n if len(masks.shape) == 2:\n masks = masks[:, :, None]\n return masks" }, { "identifier": "Metrics", "path": "utils/segment/metrics.py", "snippet": "class Metrics:\n \"\"\"Metric for boxes and masks.\"\"\"\n\n def __init__(self) -> None:\n self.metric_box = Metric()\n self.metric_mask = Metric()\n\n def update(self, results):\n \"\"\"\n Args:\n results: Dict{'boxes': Dict{}, 'masks': Dict{}}\n \"\"\"\n self.metric_box.update(list(results['boxes'].values()))\n self.metric_mask.update(list(results['masks'].values()))\n\n def mean_results(self):\n return self.metric_box.mean_results() + self.metric_mask.mean_results()\n\n def class_result(self, i):\n return self.metric_box.class_result(i) + self.metric_mask.class_result(i)\n\n def get_maps(self, nc):\n return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)\n\n @property\n def ap_class_index(self):\n # boxes and masks have the same ap_class_index\n return self.metric_box.ap_class_index" }, { "identifier": "ap_per_class_box_and_mask", "path": "utils/segment/metrics.py", "snippet": "def ap_per_class_box_and_mask(\n tp_m,\n tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=False,\n save_dir='.',\n names=(),\n):\n \"\"\"\n Args:\n tp_b: tp of boxes.\n tp_m: tp of masks.\n other arguments see `func: ap_per_class`.\n \"\"\"\n results_boxes = ap_per_class(tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix='Box')[2:]\n results_masks = ap_per_class(tp_m,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix='Mask')[2:]\n\n results = {\n 'boxes': {\n 'p': results_boxes[0],\n 'r': results_boxes[1],\n 'ap': results_boxes[3],\n 'f1': results_boxes[2],\n 'ap_class': results_boxes[4]},\n 'masks': {\n 'p': results_masks[0],\n 'r': results_masks[1],\n 'ap': results_masks[3],\n 'f1': results_masks[2],\n 'ap_class': results_masks[4]}}\n return results" }, { "identifier": "plot_images_and_masks", "path": "utils/segment/plots.py", "snippet": "@threaded\ndef plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None):\n # Plot image grid with labels\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n if isinstance(masks, torch.Tensor):\n masks = masks.cpu().numpy().astype(int)\n\n max_size = 1920 # max image size\n max_subplots = 16 # max image subplots, i.e. 4x4\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n if np.max(images[0]) <= 1:\n images *= 255 # de-normalise (optional)\n\n # Build Image\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, im in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n im = im.transpose(1, 2, 0)\n mosaic[y:y + h, x:x + w, :] = im\n\n # Resize (optional)\n scale = max_size / ns / max(h, w)\n if scale < 1:\n h = math.ceil(scale * h)\n w = math.ceil(scale * w)\n mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))\n\n # Annotate\n fs = int((h + w) * ns * 0.01) # font size\n annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)\n for i in range(i + 1):\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders\n if paths:\n annotator.text([x + 5, y + 5], text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames\n if len(targets) > 0:\n idx = targets[:, 0] == i\n ti = targets[idx] # image targets\n\n boxes = xywh2xyxy(ti[:, 2:6]).T\n classes = ti[:, 1].astype('int')\n labels = ti.shape[1] == 6 # labels if no conf column\n conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale < 1: # absolute coords need scale if image scales\n boxes *= scale\n boxes[[0, 2]] += x\n boxes[[1, 3]] += y\n for j, box in enumerate(boxes.T.tolist()):\n cls = classes[j]\n color = colors(cls)\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'\n annotator.box_label(box, label, color=color)\n\n # Plot masks\n if len(masks):\n if masks.max() > 1.0: # mean that masks are overlap\n image_masks = masks[[i]] # (1, 640, 640)\n nl = len(ti)\n index = np.arange(nl).reshape(nl, 1, 1) + 1\n image_masks = np.repeat(image_masks, nl, axis=0)\n image_masks = np.where(image_masks == index, 1.0, 0.0)\n else:\n image_masks = masks[idx]\n\n im = np.asarray(annotator.im).copy()\n for j, box in enumerate(boxes.T.tolist()):\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n color = colors(classes[j])\n mh, mw = image_masks[j].shape\n if mh != h or mw != w:\n mask = image_masks[j].astype(np.uint8)\n mask = cv2.resize(mask, (w, h))\n mask = mask.astype(bool)\n else:\n mask = image_masks[j].astype(bool)\n with contextlib.suppress(Exception):\n im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6\n annotator.fromarray(im)\n annotator.im.save(fname) # save" }, { "identifier": "de_parallel", "path": "utils/torch_utils.py", "snippet": "def de_parallel(model):\n # De-parallelize a model: returns single-GPU model if model is of type DP or DDP\n return model.module if is_parallel(model) else model" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "smart_inference_mode", "path": "utils/torch_utils.py", "snippet": "def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):\n # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator\n def decorate(fn):\n return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n return decorate" } ]
import argparse import json import os import subprocess import sys import numpy as np import torch import torch.nn.functional as F from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_native, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
16,967
# Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: preds = non_max_suppression(preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm) # Metrics plot_masks = [] # masks for plotting for si, (pred, proto) in enumerate(zip(preds, protos)): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Masks midx = [si] if overlap else targets[:, 0] == si gt_masks = masks[midx] pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone()
# YOLOv5 🚀 by Ultralytics, AGPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order='F', dtype='uint8'))[0] rle['counts'] = rle['counts'].decode('utf-8') return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode='bilinear', align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements('pycocotools>=2.0.6') process = process_mask_native # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', 'R', 'mAP50', 'mAP50-95)', 'Mask(P', 'R', 'mAP50', 'mAP50-95)') dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: preds = non_max_suppression(preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm) # Metrics plot_masks = [] # masks for plotting for si, (pred, proto) in enumerate(zip(preds, protos)): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Masks midx = [si] if overlap else targets[:, 0] == si gt_masks = masks[midx] pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone()
scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred
3
2023-11-12 13:28:26+00:00
24k
cyberark/ark-sdk-python
ark_sdk_python/cli_services/dpa/vm/ark_dpa_vm_policies_editor_service.py
[ { "identifier": "ArkInquirerRender", "path": "ark_sdk_python/args/ark_args_formatter.py", "snippet": "class ArkInquirerRender(ConsoleRender):\n # pylint: disable=keyword-arg-before-vararg,protected-access\n def __init__(self, event_generator=None, *args, **kwargs):\n super().__init__(event_generator=event_generator, theme=ARK_INQUIRER_THEME, *args, **kwargs)\n\n def render(self, question, answers=None):\n question.answers = answers or {}\n\n if question.ignore:\n return question.default\n\n clazz = self.render_factory(question.kind)\n render = clazz(question, terminal=self.terminal, theme=self._theme, show_default=question.show_default)\n if isinstance(\n render, (inquirer.render.console._text.Text, inquirer.render.console._password.Password, inquirer.render.console._path.Path)\n ):\n render.current = ''\n self.clear_eos()\n\n try:\n a = self._event_loop(render)\n if not a and question.default:\n a = question.default\n elif not a and question.name in answers:\n a = answers[question.name]\n return a\n finally:\n print('')\n\n def _print_header(self, render):\n base = render.get_header()\n\n header = base[: self.width - 9] + '...' if len(base) > self.width - 6 else base\n default_value = '{normal} ({default})'.format(default=render.question.default, normal=self.terminal.normal)\n show_default = render.question.default and render.show_default\n header += default_value if show_default else ''\n msg_template = '{t.move_up}{t.clear_eol}{tq.brackets_color}{tq.mark_color}?{tq.brackets_color} {msg}{t.normal}'\n\n escaped_current_value = str(render.get_current_value()).replace('{', '{{').replace('}', '}}')\n self.print_str(\n f'\\n{msg_template} {escaped_current_value}',\n msg=header,\n lf=not render.title_inline,\n tq=self._theme.Question,\n )" }, { "identifier": "ArkISPAuth", "path": "ark_sdk_python/auth/ark_isp_auth.py", "snippet": "class ArkISPAuth(ArkAuth):\n def __perform_identity_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=secret.secret.get_secret_value() if secret else None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, ArkSystemConfig.is_interactive() and method_settings.identity_mfa_interactive, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform [{str(ex)}]')\n raise ArkAuthException from ex\n\n def __perform_identity_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n try:\n method_settings = cast(IdentityArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentity(\n username=auth_profile.username,\n password=None,\n identity_url=method_settings.identity_url,\n mfa_type=method_settings.identity_mfa_method,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n load_cache=True,\n cache_profile=profile,\n )\n identity.refresh_auth_identity(profile, method_settings.identity_mfa_interactive, False)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n token_lifetime = identity.session_details.token_lifetime\n if not token_lifetime:\n token_lifetime = DEFAULT_TOKEN_LIFETIME\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.Identity,\n expires_in=datetime.now() + timedelta(seconds=token_lifetime),\n refresh_token=identity.session_details.refresh_token,\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n raise ArkAuthException('Failed to authenticate to isp via identity') from ex\n\n def __perform_identity_service_user_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret], force: bool\n ) -> ArkToken:\n try:\n if not secret:\n raise ArkException('Token secret is required for identity service user auth')\n method_settings = cast(IdentityServiceUserArkAuthMethodSettings, auth_profile.auth_method_settings)\n identity = ArkIdentityServiceUser(\n username=auth_profile.username,\n token=secret.secret.get_secret_value(),\n app_name=method_settings.identity_authorization_application,\n logger=self._logger,\n cache_authentication=self._cache_authentication,\n )\n identity.auth_identity(profile, force)\n env = AwsEnv(os.environ.get('DEPLOY_ENV', AwsEnv.PROD.value))\n found_env = list(filter(lambda e: ROOT_DOMAIN[e] in identity.identity_url, ROOT_DOMAIN.keys()))\n if found_env:\n env = found_env[0]\n return ArkToken(\n token=identity.session_token,\n username=auth_profile.username,\n endpoint=identity.identity_url,\n token_type=ArkTokenType.JWT,\n auth_method=ArkAuthMethod.IdentityServiceUser,\n expires_in=datetime.now() + timedelta(hours=4),\n metadata={'env': env, 'cookies': codecs.encode(pickle.dumps(identity.session.cookies), 'base64').decode()},\n )\n except Exception as ex:\n self._logger.exception(f'Failed to authenticate to identity security platform with service user [{str(ex)}]')\n raise ArkAuthException from ex\n\n @overrides\n def _perform_authentication(\n self, profile: ArkProfile, auth_profile: ArkAuthProfile, secret: Optional[ArkSecret] = None, force: bool = False\n ) -> ArkToken:\n \"\"\"\n Performs authentication to the identity security platform identity tenant\n Authentication can be done with either a service user or a normal user\n Authentication Methods:\n - Identity, Default\n - IdentityServiceUser\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n secret (Optional[ArkSecret], optional): _description_. Defaults to None.\n force (bool, optional): _description_. Defaults to False.\n\n Raises:\n ArkAuthException: _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_authentication(profile, auth_profile, secret, force)\n if auth_profile.auth_method == ArkAuthMethod.IdentityServiceUser:\n return self.__perform_identity_service_user_authentication(profile, auth_profile, secret, force)\n raise ArkAuthException('Given auth method is not supported')\n\n @overrides\n def _perform_refresh_authentication(self, profile: ArkProfile, auth_profile: ArkAuthProfile, token: ArkToken) -> ArkToken:\n \"\"\"\n Refresh for isp tenant is supported only for identity\n\n Args:\n profile (ArkProfile): _description_\n auth_profile (ArkAuthProfile): _description_\n token (ArkToken): _description_\n\n Returns:\n ArkToken: _description_\n \"\"\"\n self._logger.info('Performing refresh authentication to ISP')\n if auth_profile.auth_method in [ArkAuthMethod.Identity, ArkAuthMethod.Default]:\n return self.__perform_identity_refresh_authentication(profile, auth_profile, token)\n return token\n\n @staticmethod\n @overrides\n def authenticator_name() -> str:\n return AUTH_NAME\n\n @staticmethod\n @overrides\n def authenticator_human_readable_name() -> str:\n return AUTH_HUMAN_READABLE_NAME\n\n @staticmethod\n @overrides\n def supported_auth_methods() -> List[ArkAuthMethod]:\n return AUTH_METHODS\n\n @staticmethod\n @overrides\n def default_auth_method() -> Tuple[ArkAuthMethod, ArkAuthMethodSettings]:\n return DEFAULT_AUTH_METHOD, DEFAULT_AUTH_METHOD_SETTINGS" }, { "identifier": "ArkDPABasePoliciesEditorService", "path": "ark_sdk_python/cli_services/dpa/common/ark_dpa_base_policies_editor_service.py", "snippet": "class ArkDPABasePoliciesEditorService(\n ArkService, ABC, Generic[PolicyType, PolicyListItemType, AddPolicyType, UpdatePolicyType, GeneratePolicyType]\n):\n def __init__(\n self,\n policy_type: PolicyType,\n add_policy_type: AddPolicyType,\n update_policy_type: UpdatePolicyType,\n isp_auth: ArkISPAuth,\n policies_family: str,\n tenant_id: str,\n policies_cache_dir: Optional[str] = None,\n profile: Optional[ArkProfile] = None,\n ) -> None:\n super().__init__(isp_auth)\n profile = profile or ArkProfileLoader.load_default_profile()\n self._policies_family = policies_family\n self.__policies_cache_dir = Path(policies_cache_dir or Path.home() / '.ark_cache' / 'profiles' / profile.profile_name / tenant_id)\n if not policies_cache_dir and 'ARK_DPA_POLICIES_EDITOR_FOLDER' in os.environ:\n self.__policies_cache_dir = Path(os.environ['ARK_DPA_POLICIES_EDITOR_FOLDER'])\n self.__policies_cache_dir = self.__policies_cache_dir / policies_family\n self.__policies_cache_dir.mkdir(exist_ok=True, parents=True)\n self.__policy_type = policy_type\n self.__add_policy_type = add_policy_type\n self.__update_policy_type = update_policy_type\n\n @abstractmethod\n def _policy(self, get_policy: ArkDPAGetPolicy) -> PolicyType:\n pass\n\n @abstractmethod\n def _list_policies(self) -> List[PolicyListItemType]:\n pass\n\n @abstractmethod\n def _add_policy(self, add_policy: AddPolicyType) -> PolicyType:\n pass\n\n @abstractmethod\n def _update_policy(self, update_policy: UpdatePolicyType) -> PolicyType:\n pass\n\n @abstractmethod\n def _delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:\n pass\n\n @abstractmethod\n def _generate_policy(self, generate_policy: GeneratePolicyType, workspace_policies: List[PolicyType]) -> PolicyType:\n pass\n\n def __load_policy_diff(self, workspace_policy: PolicyType) -> Optional[Tuple[PolicyType, PolicyType]]:\n remote_policy = self._policy(ArkDPAGetPolicy(policy_id=str(workspace_policy.policy_id)))\n if remote_policy != workspace_policy:\n return (workspace_policy, remote_policy)\n return None\n\n def __load_policies_diff(self) -> Dict[str, Tuple[PolicyType, PolicyType]]:\n workspace_policies = self.__load_existing_policies_from_workspace()\n with ThreadPoolExecutor() as executor:\n remote_policies = {\n p[0].policy_name: p for p in executor.map(self.__load_policy_diff, workspace_policies.values()) if p is not None\n }\n return remote_policies\n\n def __load_policies_from_workspace_by_suffix(self, suffix: str = '') -> Dict[str, PolicyType]:\n p = Path(self.__policies_cache_dir).glob(f'*.json{suffix}')\n policies_files = [x for x in p if x.is_file() and x.suffix == suffix or '.json']\n policies = {}\n for f in policies_files:\n policy = self.__policy_type.parse_file(f)\n policies[policy.policy_name] = policy\n return policies\n\n def __load_removed_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix('.removed')\n\n def __load_generated_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix('.generated')\n\n def __load_existing_policies_from_workspace(self) -> Dict[str, PolicyType]:\n return self.__load_policies_from_workspace_by_suffix()\n\n def __load_policy_to_workspace(self, policy: PolicyListItemType, override: bool) -> Optional[PolicyType]:\n policy_data = self._policy(ArkDPAGetPolicy(policy_id=policy.policy_id))\n policy_path = Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json')\n if policy_path.exists():\n existing_data = self.__policy_type.parse_raw(policy_path.read_text())\n if existing_data != policy_data:\n if not override:\n return policy_data\n if not policy_data.policy_id:\n policy_data.policy_id = policy.policy_id\n policy_path.write_text(policy_data.json(indent=4))\n (Path(self.__policies_cache_dir) / (policy_data.policy_name + '.json.removed')).unlink(missing_ok=True)\n\n def load_policies(self, load_policies: ArkDPALoadPolicies) -> ArkDPALoadedPolicies:\n \"\"\"\n Loads all remote policies into the local workspace.\n The user is asked whether to overwrite existing policies that were edited either locally or remotely.\n When default overwrite is enabled, existing policies are overwritten without prompts.\n\n Args:\n load_policies (ArkDPALoadPolicies): _description_\n\n Returns:\n ArkDPALoadedPolicies: _description_\n \"\"\"\n policies = self._list_policies()\n policies_to_query: Dict[str, PolicyType] = []\n with ThreadPoolExecutor() as executor:\n policies_to_query = {\n p.policy_name: p\n for p in executor.map(lambda p: self.__load_policy_to_workspace(p, load_policies.override), policies)\n if p is not None\n }\n # Build the query editor to ask the user\n policies_to_override = []\n if policies_to_query:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'override',\n message=f'Conflicts detected, please choose if you wish to override local {self._policies_family} policies or leave them as is',\n choices=[p.policy_name for p in policies_to_query.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policies_to_override = answers['override']\n for policy_name in policies_to_override:\n policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')\n if policy_path.exists() and policy_name in policies_to_query:\n policy_path.write_text(policies_to_query[policy_name].json(indent=4))\n return ArkDPALoadedPolicies(\n loaded_path=str(self.__policies_cache_dir),\n overall_policies_count=len(policies),\n loaded_policies_count=len(policies) - len(policies_to_query),\n overriden_policies_count=len(policies_to_override),\n untouched_policies_count=len(policies_to_query) - len(policies_to_override),\n )\n\n def edit_policies(self, edit_policies: ArkDPAEditPolicies) -> None:\n \"\"\"\n Edits the set of specified policies one at a time, either via the CLI or the default OS editor.\n Edited policies are only saved locally until they are committed.\n\n Args:\n edit_policies (ArkDPAEditPolicies): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n if not workspace_policies:\n raise ArkServiceException(\n f'No {self._policies_family} policies to edit in the workspace, please load the policies or generate a new one'\n )\n policy_names = edit_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to edit?, press space to select',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n try:\n answers = inquirer.prompt(\n [\n inquirer.Editor(f'{name}_edit', message=f'Chosen {self._policies_family} policy [{name}] is about to be edited')\n for name in policy_names\n ],\n render=ArkInquirerRender(),\n answers={f'{name}_edit': workspace_policies[name].json(indent=4) for name in policy_names},\n )\n for name in policy_names:\n policy = self.__policy_type.parse_raw(answers[f'{name}_edit'])\n for path in [\n Path(self.__policies_cache_dir) / (name + '.json'),\n Path(self.__policies_cache_dir) / (name + '.json.generated'),\n ]:\n if path.exists():\n path.write_text(policy.json(indent=4))\n break\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to edit {self._policies_family} policies, '\n f'you can edit the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def remove_policies(self, remove_policies: ArkDPARemovePolicies) -> None:\n \"\"\"\n Removes one or more policies from the local workspace.\n Until changes are committed, removing a remote policy only appends the `.deleted` indication to its name.\n After committing the changes, the policies are deleted both locally and remotely.\n New, uncommitted policies are deleted locally after the user consents.\n\n Args:\n remove_policies (ArkDPARemovePolicies): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n if not workspace_policies:\n raise ArkServiceException(\n f'No {self._policies_family} policies to remove in the workspace, please load the policies or generate a new one'\n )\n policy_names = remove_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to remove?, press space to select',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n for policy_name in policy_names:\n for path in [\n Path(self.__policies_cache_dir) / (policy_name + '.json'),\n Path(self.__policies_cache_dir) / (policy_name + '.json.generated'),\n ]:\n if path.exists():\n if path.suffix == '.json':\n path.rename(Path(self.__policies_cache_dir) / (policy_name + '.json.removed'))\n else:\n answers = inquirer.prompt(\n [\n inquirer.Confirm(\n 'remove',\n message=f'Are you sure you want to remove local {self._policies_family} policy [{policy_name}]?, removing an uncommitted local policy cannot be reverted',\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n if answers['remove']:\n path.unlink(missing_ok=True)\n\n def view_policies(self, view_policies: ArkDPAViewPolicies) -> None:\n \"\"\"\n Allows the user to view one or more policies either together or individually, as defined in the CLI user prompt.\n Policies are viewed in the machine's default editor (both existing policies and newly generated policies).\n\n Args:\n view_policies (ArkDPAViewPolicies): _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n policy_names = view_policies.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to view?',\n choices=[p.policy_name for p in workspace_policies.values()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n if not policy_names:\n return\n try:\n if view_policies.unified:\n inquirer.prompt(\n [inquirer.Editor('views', f'Show all selected {self._policies_family} policies')],\n answers={\n 'views': '\\n\\n\\n'.join(\n [f'# Policy [{policy_name}]\\n{workspace_policies[policy_name].json(indent=4)}' for policy_name in policy_names]\n )\n },\n render=ArkInquirerRender(),\n )\n else:\n inquirer.prompt(\n [inquirer.Editor(f'{policy_name}_view', f'Show [{policy_name}]') for policy_name in policy_names],\n render=ArkInquirerRender(),\n answers={f'{policy_name}_view': workspace_policies[policy_name].json(indent=4) for policy_name in policy_names},\n )\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to view the {self._policies_family} policies, '\n f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def reset_policies(self, reset_policy: ArkDPAResetPolicies) -> None:\n \"\"\"\n Resets local workspace policies.\n When all policies are reset, all local policies are overwritten and deleted policies are removed.\n Otherwise, the user can select which policies are reset.\n This function does not alter newly generated uncommitted policies.\n\n Args:\n reset_policy (ArkDPAResetPolicies): _description_\n \"\"\"\n if reset_policy.all:\n answers = inquirer.prompt(\n [inquirer.Confirm('reset', message=f'Are you sure you want to reset all edited {self._policies_family} policies?')]\n )\n if not answers:\n return\n if answers['reset']:\n self.load_policies(ArkDPALoadPolicies(override=True))\n else:\n policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n if not policies_diff and not removed_policies:\n return\n policy_names = reset_policy.names\n if not policy_names:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to reset?, press space to select',\n choices=[p for p in policies_diff.keys() + removed_policies.keys()],\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n policy_names = answers['names']\n policy_names = [p for p in policy_names if p in policies_diff or p in removed_policies]\n for policy_name in policy_names:\n policy_path = Path(self.__policies_cache_dir) / (policy_name + '.json')\n if policy_name in policies_diff:\n policy_path.write_text(policies_diff[policy_name][1].json(indent=4))\n elif policy_name in removed_policies:\n policy_path.write_text(removed_policies[policy_name].json(indent=4))\n (Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)\n\n def generate_policy(self, generate_policy: GeneratePolicyType) -> None:\n \"\"\"\n Generates a new policy from a template and the user's parameters.\n The user is prompted for the parameters when they are not specified in the CLI.\n After policy's parameters are defined, the policy is generates in memory and can bee edited.\n The new policy is saved locally until it is committed.\n\n Args:\n generate_policy (GeneratePolicyType): _description_\n \"\"\"\n workspace_policies = self.__load_existing_policies_from_workspace()\n workspace_policies.update(self.__load_generated_policies_from_workspace())\n policy = self._generate_policy(generate_policy, workspace_policies)\n policy_path = Path(self.__policies_cache_dir) / (policy.policy_name + '.json.generated')\n # Let the user edit the generated policy\n if not generate_policy.disable_edit:\n try:\n answers = inquirer.prompt(\n [\n inquirer.Editor(\n 'policy_editor',\n f'Newly {self._policies_family} policy is generated and ready to be edited, once edited, it will be saved to the local workspace',\n )\n ],\n render=ArkInquirerRender(),\n answers={'policy_editor': policy.json(indent=4, exclude_none=True)},\n )\n if not answers:\n return\n policy = self.__policy_type.parse_raw(answers['policy_editor'])\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to edit the {self._policies_family} policy, '\n f'the policy will be saved to [{policy_path}] and can be edited manually [{str(ex)}]'\n )\n policy_path.write_text(policy.json(indent=4))\n\n def policies_diff(self, policies_diff: ArkDPAPoliciesDiff) -> None:\n \"\"\"\n Calculates the diff between the local workspace and remote policies.\n This diff includes uncommitted removed policies. A unified or per policy diff can be displayed.\n\n Args:\n policies_diff (ArkDPAPoliciesDiff): _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n if not loaded_policies_diff and not removed_policies:\n return\n if policies_diff.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in policies_diff.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in policies_diff.names}\n if not loaded_policies_diff and not removed_policies:\n return\n diffs = {\n policy_name: difflib.unified_diff(\n policy_tuple[1].json(indent=4).splitlines(True),\n policy_tuple[0].json(indent=4).splitlines(True),\n fromfile=f'local policy [{policy_name}]',\n tofile=f'remote policy [{policy_name}]',\n n=MAX_LINE_DIFF,\n )\n for policy_name, policy_tuple in loaded_policies_diff.items()\n }\n diffs.update(\n {\n policy_name: difflib.unified_diff(\n policy.json(indent=4).splitlines(True),\n '',\n fromfile=f'local policy [{policy_name}]',\n tofile=f'remote policy [{policy_name}]',\n n=MAX_LINE_DIFF,\n )\n for policy_name, policy in removed_policies.items()\n }\n )\n try:\n if policies_diff.unified:\n inquirer.prompt(\n [inquirer.Editor('diffs', 'Show all diffs')],\n render=ArkInquirerRender(),\n answers={'diffs': '\\n\\n\\n'.join([''.join(d) for d in diffs.values()])},\n )\n else:\n inquirer.prompt(\n [inquirer.Editor(f'{policy_name}_diff', f'Show [{policy_name}] diff') for policy_name in diffs.keys()],\n render=ArkInquirerRender(),\n answers={f'{policy_name}_diff': ''.join(policy_diffs) for policy_name, policy_diffs in diffs.items()},\n )\n except EditorError as ex:\n self._logger.error(\n f'An error occurred while trying to show {self._policies_family} policies diff, '\n f'you can view the policies at [{self.__policies_cache_dir}] [{str(ex)}]'\n )\n\n def policies_status(self, get_policies_status: ArkDPAGetPoliciesStatus) -> ArkDPAPoliciesStatus:\n \"\"\"\n Gets the status of locally altered policies.\n\n Args:\n get_policies_status (ArkDPAGetPoliciesStatus): _description_\n\n Returns:\n ArkDPAPoliciesStatus: _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n generated_policies = self.__load_generated_policies_from_workspace()\n if get_policies_status.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in get_policies_status.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in get_policies_status.names}\n generated_policies = {k: v for k, v in generated_policies.items() if k in get_policies_status.names}\n return ArkDPAPoliciesStatus(\n modified_policies=list(loaded_policies_diff.keys()),\n removed_policies=list(removed_policies.keys()),\n added_policies=list(generated_policies.keys()),\n )\n\n def commit_policies(self, commit_policies: ArkDPACommitPolicies) -> None:\n \"\"\"\n Commits policies.\n The function first calculates the differences between the local and remote policies to find out which policies were edited, including\n the policies selected for deletion and new, uncommitted policies. It also\n allows selecting whether to commit all the edited policies or only specific policies by name.\n\n After all policies are committed, the workspace is reorganized accordingly.\n\n Args:\n commit_policies (ArkDPACommitPolicies): _description_\n \"\"\"\n loaded_policies_diff = self.__load_policies_diff()\n removed_policies = self.__load_removed_policies_from_workspace()\n generated_policies = self.__load_generated_policies_from_workspace()\n if not loaded_policies_diff and not removed_policies and not generated_policies:\n return\n if commit_policies.all:\n answers = inquirer.prompt(\n [inquirer.Confirm('reset', message=f'Are you sure you want to commit all edited {self._policies_family} policies?')]\n )\n if not answers or not answers['reset']:\n return\n else:\n if commit_policies.names:\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in commit_policies.names}\n removed_policies = {k: v for k, v in removed_policies.items() if k in commit_policies.names}\n generated_policies = {k: v for k, v in generated_policies.items() if k in commit_policies.names}\n else:\n answers = inquirer.prompt(\n [\n inquirer.Checkbox(\n 'names',\n f'Which {self._policies_family} policies would you like to commit?, press space to select',\n choices=list(loaded_policies_diff.keys()) + list(removed_policies.keys()) + list(generated_policies.keys()),\n )\n ],\n render=ArkInquirerRender(),\n )\n if not answers:\n return\n loaded_policies_diff = {k: v for k, v in loaded_policies_diff.items() if k in answers['names']}\n removed_policies = {k: v for k, v in removed_policies.items() if k in answers['names']}\n generated_policies = {k: v for k, v in generated_policies.items() if k in answers['names']}\n if not loaded_policies_diff and not removed_policies and not generated_policies:\n return\n with ThreadPoolExecutor() as executor:\n added = executor.map(lambda p: self._add_policy(self.__add_policy_type(**p.dict())), generated_policies.values())\n updated = executor.map(lambda p: self._update_policy(self.__update_policy_type(**p[0].dict())), loaded_policies_diff.values())\n deleted = executor.map(\n lambda p: self._delete_policy(ArkDPADeletePolicy(policy_id=p.policy_id, policy_name=p.policy_name)),\n removed_policies.values(),\n )\n # Loop for exception checking\n added_policies = list(added)\n for _ in itertools.chain(updated, deleted):\n pass\n for policy_name in removed_policies.keys():\n (Path(self.__policies_cache_dir) / (policy_name + '.json.removed')).unlink(missing_ok=True)\n for policy_name in generated_policies.keys():\n for policy in added_policies:\n if policy.policy_name == policy_name:\n (Path(self.__policies_cache_dir) / (policy_name + '.json.generated')).rename(\n (Path(self.__policies_cache_dir) / (policy_name + '.json'))\n )\n (Path(self.__policies_cache_dir) / (policy_name + '.json')).write_text(policy.json(indent=4))" }, { "identifier": "ArkProfile", "path": "ark_sdk_python/models/ark_profile.py", "snippet": "class ArkProfile(ArkModel):\n profile_name: str = Field(default='ark', alias='Profile Name', description='Profile name for storage')\n profile_description: str = Field(default='Default Ark Profile', alias='Profile Description', description='Info about the profile')\n auth_profiles: Dict[str, ArkAuthProfile] = Field(\n description='Authentication profiles configurations, map from name of the authenticator to its profile', default_factory=dict\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('auth_profiles', pre=True)\n def validate_auth_profiles(cls, val):\n auth_profiles = {}\n for k, v in val.items():\n auth_profile = ArkAuthProfile.parse_obj(v)\n # Make sure that the settings are parsed with the correct class\n # Due to properties overlapping\n if 'auth_method_settings' in v:\n auth_profile.auth_method_settings = ArkAuthMethodSettingsMap[auth_profile.auth_method].parse_obj(v['auth_method_settings'])\n auth_profiles[k] = auth_profile\n return auth_profiles" }, { "identifier": "ArkDPAVMGeneratePolicy", "path": "ark_sdk_python/models/cli_services/dpa/policies_editor/vm/ark_dpa_vm_generate_policy.py", "snippet": "class ArkDPAVMGeneratePolicy(ArkDPABaseGeneratePolicy):\n providers: Optional[Set[Literal['AWS', 'Azure', 'OnPrem']]] = Field(description='Providers to generate the policy for')\n protocols: Optional[Set[Literal['ssh', 'rdp']]] = Field(description='Protocols to generate the policy for')" }, { "identifier": "ArkProtocolType", "path": "ark_sdk_python/models/common/ark_protocol_type.py", "snippet": "class ArkProtocolType(str, MultiValueEnum):\n SSH = 'ssh', 'SSH'\n SCP = 'scp', 'SCP'\n SFTP = 'sftp', 'SFTP'\n RDP = 'rdp', 'RDP'\n CLI = 'cli', 'CLI'\n CONSOLE = 'console', 'Console'\n HTTPS = 'https', 'HTTPS'\n K8S = 'K8S', 'k8s'\n DB = 'Database', 'database', 'DATABASE'" }, { "identifier": "ArkWorkspaceType", "path": "ark_sdk_python/models/common/ark_workspace_type.py", "snippet": "class ArkWorkspaceType(str, MultiValueEnum):\n AWS = 'aws', 'AWS', 'Aws'\n AZURE = 'azure', 'AZURE', 'Azure'\n ONPREM = 'onprem', 'ON-PREMISE', 'OnPrem'\n DB = 'db', 'DATABASES', 'Databases'\n GCP = 'gcp', 'GCP'\n MYSQL = 'mysql', 'MySQL'\n MARIADB = 'mariadb', 'MariaDB'\n MSSQL = 'mssql', 'MSSQL'\n ORACLE = 'oracle', 'Oracle'\n POSTGRES = 'postgres', 'Postgres'\n FAULT = 'fault', 'FAULT'\n UNKNOWN = 'unknown', 'UNKNOWN', 'Unknown'" }, { "identifier": "ArkServiceConfig", "path": "ark_sdk_python/models/services/ark_service_config.py", "snippet": "class ArkServiceConfig(ArkModel):\n service_name: str = Field(description='Name of the service')\n required_authenticator_names: List[str] = Field(description='Required authenticators for the service to properly work')\n optional_authenticator_names: List[str] = Field(\n description='Optional authenticators for the service for extra capabilities', default_factory=list\n )" }, { "identifier": "ArkDPADeletePolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_delete_policy.py", "snippet": "class ArkDPADeletePolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to delete')\n policy_name: Optional[str] = Field(description='Policy name to delete')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPAGetPolicy", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_get_policy.py", "snippet": "class ArkDPAGetPolicy(ArkModel):\n policy_id: Optional[str] = Field(description='Policy id to get')\n policy_name: Optional[str] = Field(description='Policy name to get')\n\n # pylint: disable=no-self-use,no-self-argument\n @root_validator\n def validate_either(cls, values):\n if 'policy_id' not in values and 'policy_name' not in values:\n raise ValueError('Either policy id or policy name needs to be provided')\n return values" }, { "identifier": "ArkDPARuleStatus", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_rule_status.py", "snippet": "class ArkDPARuleStatus(str, Enum):\n Enabled = 'Enabled'\n Disabled = 'Disabled'\n Draft = 'Draft'\n Expired = 'Expired'" }, { "identifier": "ArkDPAUserData", "path": "ark_sdk_python/models/services/dpa/policies/common/ark_dpa_user_data.py", "snippet": "class ArkDPAUserData(ArkCamelizedModel):\n roles: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Roles allowed for auth rule', default_factory=list)\n groups: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Groups allowed for auth rule', default_factory=list)\n users: Optional[List[Union[str, ArkDPAUserDataAttribute]]] = Field(description='Users allowed for auth rule', default_factory=list)" }, { "identifier": "ArkDPAVMAddPolicy", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_add_policy.py", "snippet": "class ArkDPAVMAddPolicy(ArkDPABaseAddPolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(\n description='Workspaces / cloud providers data per type of cloud provider, '\n 'for example for AWS, how to filter ec2 instances to connect to'\n )\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(\n description='Rules describing how and who will be able to connect to the target instances filtered by the cloud providers'\n )\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMAuthorizationRule", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_authorization_rule.py", "snippet": "class ArkDPAVMAuthorizationRule(ArkDPABaseAuthorizationRule):\n connection_information: ArkDPAVMConnectionInformation = Field(description='Rule information on how access is made')" }, { "identifier": "ArkDPAVMConnectionInformation", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_authorization_rule.py", "snippet": "class ArkDPAVMConnectionInformation(ArkDPABaseConnectionInformation):\n connect_as: ArkDPAVMProvidersConnectionDict = Field(description='In which fashion the connection is made')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('connect_as')\n def validate_connect_as(cls, val):\n for k, v in val.items():\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n for k2 in v.keys():\n if ArkProtocolType(k2) not in [\n ArkProtocolType.SSH,\n ArkProtocolType.RDP,\n ArkProtocolType.SFTP,\n ArkProtocolType.SCP,\n ArkProtocolType.HTTPS,\n ]:\n raise ValueError('Invalid connection type')\n return val" }, { "identifier": "ArkDPAVMConnectionDataType", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_connection_data.py", "snippet": "class ArkDPAVMConnectionMethodData(ArkCamelizedModel):\nclass ArkDPAVMLocalEphemeralUserConnectionMethodData(ArkDPAVMConnectionMethodData):\nclass ArkDPAVMRDPLocalEphemeralUserConnectionData(ArkCamelizedModel):" }, { "identifier": "ArkDPAVMPolicy", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_policy.py", "snippet": "class ArkDPAVMPolicy(ArkDPABasePolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(description='Cloud providers info of the policy')\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(description='Authorization rules of the policy')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMPolicyListItem", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_policy_list_item.py", "snippet": "class ArkDPAVMPolicyListItem(ArkDPABasePolicyListItem):\n platforms: Optional[List[ArkWorkspaceType]] = Field(description='Names of the platforms of the policy')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('platforms')\n def validate_platforms(cls, val):\n if val is not None:\n for plat in val:\n if ArkWorkspaceType(plat) not in [\n ArkWorkspaceType.AWS,\n ArkWorkspaceType.AZURE,\n ArkWorkspaceType.GCP,\n ArkWorkspaceType.ONPREM,\n ]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMAWSProviderData", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_providers.py", "snippet": "class ArkDPAVMAWSProviderData(ArkCamelizedModel):\nclass ArkDPAVMAzureProviderData(ArkCamelizedModel):\nclass ArkDPAVMGCPProviderData(ArkCamelizedModel):\nclass ArkDPAVMFQDNRulesConjunction(str, Enum):\nclass ArkDPAVMFQDNOperator(str, Enum):\nclass ArkDPAVMFQDNRule(ArkCamelizedModel):\nclass ArkDPAVMOnPremProviderData(ArkCamelizedModel):\n AND = 'AND'\n OR = 'OR'\n EXACTLY = 'EXACTLY'\n WILDCARD = 'WILDCARD'\n PREFIX = 'PREFIX'\n SUFFIX = 'SUFFIX'\n CONTAINS = 'CONTAINS'" }, { "identifier": "ArkDPAVMUpdatePolicy", "path": "ark_sdk_python/models/services/dpa/policies/vm/ark_dpa_vm_update_policy.py", "snippet": "class ArkDPAVMUpdatePolicy(ArkDPABaseUpdatePolicy):\n providers_data: Optional[ArkDPAVMProvidersDict] = Field(description='New cloud providers to update')\n user_access_rules: Optional[List[ArkDPAVMAuthorizationRule]] = Field(description='New access rules to update')\n\n # pylint: disable=no-self-use,no-self-argument\n @validator('providers_data', pre=True)\n def validate_providers_data(cls, val):\n if val is not None:\n for k in val.keys():\n val[k]['providerName'] = serialize_dpa_vm_policies_workspace_type(ArkWorkspaceType(k))\n if ArkWorkspaceType(k) not in [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM]:\n raise ValueError('Invalid Platform / Workspace Type')\n return val" }, { "identifier": "ArkDPAVMPoliciesService", "path": "ark_sdk_python/services/dpa/policies/vm/ark_dpa_vm_policies_service.py", "snippet": "class ArkDPAVMPoliciesService(ArkService):\n def __init__(self, isp_auth: ArkISPAuth) -> None:\n super().__init__(isp_auth)\n self.__isp_auth = isp_auth\n self.__client: ArkISPServiceClient = ArkISPServiceClient.from_isp_auth(self.__isp_auth, 'dpa')\n\n @property\n def isp_client(self) -> ArkISPServiceClient:\n return self.__client\n\n def __policy_id_by_name(self, policy_name: str) -> str:\n policies = self.list_policies_by(ArkDPAVMPoliciesFilter(name=policy_name))\n if not policies:\n raise ArkServiceException(f'Failed to find vm policy id by name [{policy_name}]')\n return policies[0].policy_id\n\n @staticmethod\n def __serialize_providers_dict(providers_data: ArkDPAVMProvidersDict) -> Dict:\n serialized_providers_data = {}\n for k in list(providers_data.keys()):\n serialized_providers_data[serialize_dpa_vm_policies_workspace_type(k)] = providers_data[k].dict(by_alias=True)\n return serialized_providers_data\n\n @staticmethod\n def __serialize_authorization_rules_dict(authorization_rules: List[Dict]) -> None:\n for rule in authorization_rules:\n for k in list(rule['connectionInformation']['connectAs'].keys()):\n for pk in list(rule['connectionInformation']['connectAs'][k].keys()):\n item = rule['connectionInformation']['connectAs'][k][pk]\n del rule['connectionInformation']['connectAs'][k][pk]\n rule['connectionInformation']['connectAs'][k][serialize_dpa_vm_policies_protocol_type(pk)] = item\n item = rule['connectionInformation']['connectAs'][k]\n del rule['connectionInformation']['connectAs'][k]\n rule['connectionInformation']['connectAs'][serialize_dpa_vm_policies_workspace_type(k)] = item\n\n def add_policy(self, add_policy: ArkDPAVMAddPolicy) -> ArkDPAVMPolicy:\n \"\"\"\n Adds a new VM policy with the specified information.\n\n Args:\n add_policy (ArkDPVMAAddPolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n self._logger.info(f'Adding new vm policy [{add_policy.policy_name}]')\n add_policy_dict = add_policy.dict(by_alias=True)\n add_policy_dict['providersData'] = self.__serialize_providers_dict(add_policy.providers_data)\n self.__serialize_authorization_rules_dict(add_policy_dict['userAccessRules'])\n resp: Response = self.__client.post(VM_POLICIES_API, json=add_policy_dict)\n if resp.status_code == HTTPStatus.CREATED:\n try:\n policy_id = resp.json()['policyId']\n return self.policy(ArkDPAGetPolicy(policy_id=policy_id))\n except (ValidationError, JSONDecodeError, KeyError) as ex:\n self._logger.exception(f'Failed to parse add vm policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse add vm policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to add vm policy [{resp.text}] - [{resp.status_code}]')\n\n def delete_policy(self, delete_policy: ArkDPADeletePolicy) -> None:\n \"\"\"\n Deletes the specified (ID or name) VM policy.\n\n Args:\n delete_policy (ArkDPADeletePolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n \"\"\"\n if delete_policy.policy_name and not delete_policy.policy_id:\n delete_policy.policy_id = self.__policy_id_by_name(delete_policy.policy_name)\n self._logger.info(f'Deleting vm policy [{delete_policy.policy_id}]')\n resp: Response = self.__client.delete(VM_POLICY_API.format(policy_id=delete_policy.policy_id))\n if resp.status_code != HTTPStatus.NO_CONTENT:\n raise ArkServiceException(f'Failed to delete vm policy [{resp.text}] - [{resp.status_code}]')\n\n def update_policy(self, update_policy: ArkDPAVMUpdatePolicy) -> ArkDPAVMPolicy:\n \"\"\"\n Updates a VM policy.\n\n Args:\n update_policy (ArkDPAVMUpdatePolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n if update_policy.policy_name and not update_policy.policy_id:\n update_policy.policy_id = self.__policy_id_by_name(update_policy.policy_name)\n self._logger.info(f'Updating vm policy [{update_policy.policy_id}]')\n update_dict = json.loads(update_policy.json(by_alias=True, exclude_none=True, exclude={'new_policy_name', 'policy_name'}))\n if update_policy.new_policy_name:\n update_dict['policyName'] = update_policy.new_policy_name\n else:\n update_dict['policyName'] = update_policy.policy_name\n if update_policy.providers_data:\n update_dict['providersData'] = self.__serialize_providers_dict(update_policy.providers_data)\n if 'userAccessRules' in update_dict:\n self.__serialize_authorization_rules_dict(update_dict['userAccessRules'])\n resp: Response = self.__client.put(VM_POLICY_API.format(policy_id=update_policy.policy_id), json=update_dict)\n if resp.status_code == HTTPStatus.OK:\n try:\n return ArkDPAVMPolicy.parse_obj(resp.json())\n except (ValidationError, JSONDecodeError) as ex:\n self._logger.exception(f'Failed to parse update vm policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse update vm policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to update vm policy [{resp.text}] - [{resp.status_code}]')\n\n def update_policy_status(self, update_policy_status: ArkDPAUpdatePolicyStatus) -> ArkDPAVMPolicy:\n \"\"\"\n Updates the status of the specified (by ID) VM policy.\n\n Args:\n update_policy_status (ArkDPAUpdatePolicyStatus): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n if update_policy_status.policy_name and not update_policy_status.policy_id:\n update_policy_status.policy_id = self.__policy_id_by_name(update_policy_status.policy_name)\n self._logger.info(f'Updating vm policy status [{update_policy_status.policy_id}]')\n resp: Response = self.__client.put(\n VM_UPDATE_POLICY_STATUS_API.format(policy_id=update_policy_status.policy_id),\n json=update_policy_status.dict(exclude={'policy_id'}),\n )\n if resp.status_code == HTTPStatus.OK:\n return self.policy(ArkDPAGetPolicy(policy_id=update_policy_status.policy_id))\n raise ArkServiceException(f'Failed to update vm policy status [{resp.text}] - [{resp.status_code}]')\n\n def list_policies(self) -> List[ArkDPAVMPolicyListItem]:\n \"\"\"\n Lists all of the tenants's VM policies.\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n List[ArkDPAVMPolicyListItem]: _description_\n \"\"\"\n self._logger.info('Retrieving all vm policies')\n resp: Response = self.__client.get(VM_POLICIES_API)\n if resp.status_code == HTTPStatus.OK:\n try:\n return parse_obj_as(List[ArkDPAVMPolicyListItem], resp.json()['items'])\n except (ValidationError, JSONDecodeError, KeyError) as ex:\n self._logger.exception(f'Failed to parse list vm policies response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse list vm policies response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to list vm policies [{resp.text}] - [{resp.status_code}]')\n\n def list_policies_by(self, policies_filter: ArkDPAVMPoliciesFilter) -> List[ArkDPAVMPolicyListItem]:\n \"\"\"\n Lists VM policies that match the specified filters.\n\n Args:\n policies_filter (ArkDPAVMPoliciesFilter): _description_\n\n Returns:\n List[ArkDPAVMPolicyListItem]: _description_\n \"\"\"\n self._logger.info(f'Retrieving vm policies by filter [{policies_filter}]')\n policies = self.list_policies()\n\n # Filter by statuses\n if policies_filter.statuses:\n policies = [p for p in policies if p.status in policies_filter.statuses]\n\n # Filter by name wildcard\n if policies_filter.name:\n policies = [p for p in policies if fnmatch(p.policy_name, policies_filter.name)]\n\n # Filter by cloud providers\n if policies_filter.providers:\n policies = [p for p in policies if all(cp.value in p.platforms for cp in policies_filter.providers)]\n\n return policies\n\n def policy(self, get_policy: ArkDPAGetPolicy) -> ArkDPAVMPolicy:\n \"\"\"\n Retrieves a VM policy by ID.\n\n Args:\n get_policy (ArkDPAGetPolicy): _description_\n\n Raises:\n ArkServiceException: _description_\n\n Returns:\n ArkDPAVMPolicy: _description_\n \"\"\"\n if get_policy.policy_name and not get_policy.policy_id:\n get_policy.policy_id = self.__policy_id_by_name(get_policy.policy_name)\n self._logger.info(f'Retrieving vm policy [{get_policy.policy_id}]')\n resp: Response = self.__client.get(VM_POLICY_API.format(policy_id=get_policy.policy_id))\n if resp.status_code == HTTPStatus.OK:\n try:\n return ArkDPAVMPolicy.parse_obj(resp.json())\n except (ValidationError, JSONDecodeError) as ex:\n self._logger.exception(f'Failed to parse vm policy response [{str(ex)}] - [{resp.text}]')\n raise ArkServiceException(f'Failed to parse vm policy response [{str(ex)}]') from ex\n raise ArkServiceException(f'Failed to retrieve vm policy [{get_policy.policy_id}] [{resp.text}] - [{resp.status_code}]')\n\n def policies_stats(self) -> ArkDPAVMPoliciesStats:\n \"\"\"\n Calculates VM policy statistics.\n\n Returns:\n ArkDPAVMPoliciesStats: _description_\n \"\"\"\n self._logger.info('Calculating vm policies stats')\n policies = self.list_policies()\n policies_stats = ArkDPAVMPoliciesStats.construct()\n policies_stats.policies_count = len(policies)\n\n # Count policies per status\n status_types: Set[ArkDPARuleStatus] = {p.status for p in policies if p.status}\n policies_stats.policies_count_per_status = {st: len([p for p in policies if p.status and p.status == st]) for st in status_types}\n\n # Count policies per platforms\n policies_stats.policies_count_per_provider = {}\n for policy in policies:\n for platform in policy.platforms:\n if platform not in policies_stats.policies_count_per_provider:\n policies_stats.policies_count_per_provider[platform] = 0\n policies_stats.policies_count_per_provider[platform] += 1\n\n return policies_stats\n\n @staticmethod\n @overrides\n def service_config() -> ArkServiceConfig:\n return SERVICE_CONFIG" } ]
from datetime import date, timedelta from typing import Dict, Final, List, Optional from overrides import overrides from ark_sdk_python.args.ark_args_formatter import ArkInquirerRender from ark_sdk_python.auth.ark_isp_auth import ArkISPAuth from ark_sdk_python.cli_services.dpa.common.ark_dpa_base_policies_editor_service import ArkDPABasePoliciesEditorService from ark_sdk_python.models.ark_profile import ArkProfile from ark_sdk_python.models.cli_services.dpa.policies_editor.vm import ArkDPAVMGeneratePolicy from ark_sdk_python.models.common import ArkProtocolType, ArkWorkspaceType from ark_sdk_python.models.services import ArkServiceConfig from ark_sdk_python.models.services.dpa.policies.common import ArkDPADeletePolicy, ArkDPAGetPolicy, ArkDPARuleStatus, ArkDPAUserData from ark_sdk_python.models.services.dpa.policies.vm import ( ArkDPAVMAddPolicy, ArkDPAVMAuthorizationRule, ArkDPAVMAWSProviderData, ArkDPAVMAzureProviderData, ArkDPAVMConnectionDataType, ArkDPAVMConnectionInformation, ArkDPAVMFQDNOperator, ArkDPAVMFQDNRule, ArkDPAVMFQDNRulesConjunction, ArkDPAVMGCPProviderData, ArkDPAVMLocalEphemeralUserConnectionMethodData, ArkDPAVMOnPremProviderData, ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMProvider, ArkDPAVMRDPLocalEphemeralUserConnectionData, ArkDPAVMUpdatePolicy, ) from ark_sdk_python.services.dpa.policies.vm.ark_dpa_vm_policies_service import ArkDPAVMPoliciesService import inquirer
14,503
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData( fqdn_rules_conjunction=ArkDPAVMFQDNRulesConjunction.OR, fqdn_rules=[ArkDPAVMFQDNRule(operator=ArkDPAVMFQDNOperator.WILDCARD, computername_pattern='*', domain='default.com')], ), } DEFAULT_GENERATED_PROTOCOLS: Final[Dict[ArkProtocolType, ArkDPAVMConnectionDataType]] = { ArkProtocolType.SSH: 'root', ArkProtocolType.RDP: ArkDPAVMRDPLocalEphemeralUserConnectionData( local_ephemeral_user=ArkDPAVMLocalEphemeralUserConnectionMethodData(assign_groups={'Administrators'}) ), } SUPPORTED_SSH_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM, ] SUPPORTED_RDP_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.ONPREM] class ArkDPAVMPoliciesEditorService( ArkDPABasePoliciesEditorService[ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMAddPolicy, ArkDPAVMUpdatePolicy, ArkDPAVMGeneratePolicy] ):
SERVICE_CONFIG: Final[ArkServiceConfig] = ArkServiceConfig( service_name='dpa-policies-vm-editor', required_authenticator_names=['isp'], optional_authenticator_names=[] ) DEFAULT_GENERATED_POLICY: Final[ArkDPAVMPolicy] = ArkDPAVMPolicy( policy_name='Default VM Policy', status=ArkDPARuleStatus.Draft, description='Auto generated vm policy', providers_data={}, start_date=date.today().strftime('%Y-%m-%d'), end_date=(date.today() + timedelta(days=7)).strftime('%Y-%m-%d'), user_access_rules=[], ) DEFAULT_GENERATED_AUTHORIZATION_RULE: Final[ArkDPAVMAuthorizationRule] = ArkDPAVMAuthorizationRule( rule_name='Default VM Rule', user_data=ArkDPAUserData(roles=['DpaAdmin'], groups=[], users=[]), connection_information=ArkDPAVMConnectionInformation( connect_as={}, grant_access=2, idle_time=10, days_of_week=[], full_days=True, hours_from='07:00', hours_to='17:00', time_zone='Asia/Jerusalem', ), ) DEFAULT_GENERATED_PROVIDERS: Final[Dict[ArkWorkspaceType, ArkDPAVMProvider]] = { ArkWorkspaceType.AWS: ArkDPAVMAWSProviderData(regions=[], tags=[{'key': 'value'}], vpc_ids=[], account_ids=[]), ArkWorkspaceType.AZURE: ArkDPAVMAzureProviderData( regions=[], tags=[{'key': 'value'}], resource_groups=[], vnet_ids=[], subscriptions=[] ), ArkWorkspaceType.GCP: ArkDPAVMGCPProviderData(regions=[], tags=[{'key': 'value'}], network_ids=[], projects=[]), ArkWorkspaceType.ONPREM: ArkDPAVMOnPremProviderData( fqdn_rules_conjunction=ArkDPAVMFQDNRulesConjunction.OR, fqdn_rules=[ArkDPAVMFQDNRule(operator=ArkDPAVMFQDNOperator.WILDCARD, computername_pattern='*', domain='default.com')], ), } DEFAULT_GENERATED_PROTOCOLS: Final[Dict[ArkProtocolType, ArkDPAVMConnectionDataType]] = { ArkProtocolType.SSH: 'root', ArkProtocolType.RDP: ArkDPAVMRDPLocalEphemeralUserConnectionData( local_ephemeral_user=ArkDPAVMLocalEphemeralUserConnectionMethodData(assign_groups={'Administrators'}) ), } SUPPORTED_SSH_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.GCP, ArkWorkspaceType.ONPREM, ] SUPPORTED_RDP_PROTOCOL_PROVIDERS: Final[ArkWorkspaceType] = [ArkWorkspaceType.AWS, ArkWorkspaceType.AZURE, ArkWorkspaceType.ONPREM] class ArkDPAVMPoliciesEditorService( ArkDPABasePoliciesEditorService[ArkDPAVMPolicy, ArkDPAVMPolicyListItem, ArkDPAVMAddPolicy, ArkDPAVMUpdatePolicy, ArkDPAVMGeneratePolicy] ):
def __init__(self, isp_auth: ArkISPAuth, policies_cache_dir: Optional[str] = None, profile: Optional[ArkProfile] = None) -> None:
1
2023-11-13 09:24:31+00:00
24k
i-super/Saleor
saleor/graphql/plugins/dataloaders.py
[ { "identifier": "PluginsManager", "path": "saleor/plugins/manager.py", "snippet": "class PluginsManager(PaymentInterface):\n \"\"\"Base manager for handling plugins logic.\"\"\"\n\n plugins_per_channel: dict[str, list[\"BasePlugin\"]] = {}\n global_plugins: list[\"BasePlugin\"] = []\n all_plugins: list[\"BasePlugin\"] = []\n\n @property\n def database(self):\n return (\n settings.DATABASE_CONNECTION_REPLICA_NAME\n if self._allow_replica\n else settings.DATABASE_CONNECTION_DEFAULT_NAME\n )\n\n def _load_plugin(\n self,\n PluginClass: type[\"BasePlugin\"],\n db_configs_map: dict,\n channel: Optional[\"Channel\"] = None,\n requestor_getter=None,\n allow_replica=True,\n ) -> \"BasePlugin\":\n db_config = None\n if PluginClass.PLUGIN_ID in db_configs_map:\n db_config = db_configs_map[PluginClass.PLUGIN_ID]\n plugin_config = db_config.configuration\n active = db_config.active\n channel = db_config.channel\n else:\n plugin_config = PluginClass.DEFAULT_CONFIGURATION\n active = PluginClass.get_default_active()\n\n return PluginClass(\n configuration=plugin_config,\n active=active,\n channel=channel,\n requestor_getter=requestor_getter,\n db_config=db_config,\n allow_replica=allow_replica,\n )\n\n def __init__(self, plugins: list[str], requestor_getter=None, allow_replica=True):\n with opentracing.global_tracer().start_active_span(\"PluginsManager.__init__\"):\n self._allow_replica = allow_replica\n self.all_plugins = []\n self.global_plugins = []\n self.plugins_per_channel = defaultdict(list)\n\n channel_map = self._get_channel_map()\n global_db_configs, channel_db_configs = self._get_db_plugin_configs(\n channel_map\n )\n\n for plugin_path in plugins:\n with opentracing.global_tracer().start_active_span(f\"{plugin_path}\"):\n PluginClass = import_string(plugin_path)\n if not getattr(PluginClass, \"CONFIGURATION_PER_CHANNEL\", False):\n plugin = self._load_plugin(\n PluginClass,\n global_db_configs,\n requestor_getter=requestor_getter,\n allow_replica=allow_replica,\n )\n self.global_plugins.append(plugin)\n self.all_plugins.append(plugin)\n else:\n for channel in channel_map.values():\n channel_configs = channel_db_configs.get(channel, {})\n plugin = self._load_plugin(\n PluginClass,\n channel_configs,\n channel,\n requestor_getter,\n allow_replica,\n )\n self.plugins_per_channel[channel.slug].append(plugin)\n self.all_plugins.append(plugin)\n\n for channel in channel_map.values():\n self.plugins_per_channel[channel.slug].extend(self.global_plugins)\n\n def _get_db_plugin_configs(self, channel_map):\n with opentracing.global_tracer().start_active_span(\"_get_db_plugin_configs\"):\n plugin_manager_configs = PluginConfiguration.objects.using(\n self.database\n ).all()\n channel_configs: defaultdict[Channel, dict] = defaultdict(dict)\n global_configs = {}\n for db_plugin_config in plugin_manager_configs.iterator():\n channel = channel_map.get(db_plugin_config.channel_id)\n if channel is None:\n global_configs[db_plugin_config.identifier] = db_plugin_config\n else:\n db_plugin_config.channel = channel\n channel_configs[channel][\n db_plugin_config.identifier\n ] = db_plugin_config\n\n return global_configs, channel_configs\n\n def __run_method_on_plugins(\n self,\n method_name: str,\n default_value: Any,\n *args,\n channel_slug: Optional[str] = None,\n **kwargs,\n ):\n \"\"\"Try to run a method with the given name on each declared active plugin.\"\"\"\n value = default_value\n plugins = self.get_plugins(channel_slug=channel_slug, active_only=True)\n for plugin in plugins:\n value = self.__run_method_on_single_plugin(\n plugin, method_name, value, *args, **kwargs\n )\n return value\n\n def __run_method_on_single_plugin(\n self,\n plugin: Optional[\"BasePlugin\"],\n method_name: str,\n previous_value: Any,\n *args,\n **kwargs,\n ) -> Any:\n \"\"\"Run method_name on plugin.\n\n Method will return value returned from plugin's\n method. If plugin doesn't have own implementation of expected method_name, it\n will return previous_value.\n \"\"\"\n plugin_method = getattr(plugin, method_name, NotImplemented)\n if plugin_method == NotImplemented:\n return previous_value\n returned_value = plugin_method(*args, **kwargs, previous_value=previous_value) # type:ignore\n if returned_value == NotImplemented:\n return previous_value\n return returned_value\n\n def check_payment_balance(self, details: dict, channel_slug: str) -> dict:\n return self.__run_method_on_plugins(\n \"check_payment_balance\", None, details, channel_slug=channel_slug\n )\n\n def change_user_address(\n self,\n address: \"Address\",\n address_type: Optional[str],\n user: Optional[\"User\"],\n save: bool = True,\n ) -> \"Address\":\n default_value = address\n return self.__run_method_on_plugins(\n \"change_user_address\", default_value, address, address_type, user, save\n )\n\n def calculate_checkout_total(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n address: Optional[\"Address\"],\n ) -> TaxedMoney:\n currency = checkout_info.checkout.currency\n\n default_value = base_calculations.checkout_total(\n checkout_info,\n lines,\n )\n taxed_default_value = TaxedMoney(net=default_value, gross=default_value)\n\n if default_value <= zero_money(currency):\n return quantize_price(\n taxed_default_value,\n currency,\n )\n\n return quantize_price(\n self.__run_method_on_plugins(\n \"calculate_checkout_total\",\n taxed_default_value,\n checkout_info,\n lines,\n address,\n channel_slug=checkout_info.channel.slug,\n ),\n currency,\n )\n\n def calculate_checkout_subtotal(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n address: Optional[\"Address\"],\n ) -> TaxedMoney:\n line_totals = [\n self.calculate_checkout_line_total(\n checkout_info,\n lines,\n line_info,\n address,\n )\n for line_info in lines\n ]\n currency = checkout_info.checkout.currency\n total = sum(line_totals, zero_taxed_money(currency))\n return quantize_price(\n total,\n currency,\n )\n\n def calculate_checkout_shipping(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n address: Optional[\"Address\"],\n ) -> TaxedMoney:\n price = base_calculations.base_checkout_delivery_price(checkout_info, lines)\n default_value = TaxedMoney(price, price)\n return quantize_price(\n self.__run_method_on_plugins(\n \"calculate_checkout_shipping\",\n default_value,\n checkout_info,\n lines,\n address,\n channel_slug=checkout_info.channel.slug,\n ),\n checkout_info.checkout.currency,\n )\n\n def calculate_order_total(\n self,\n order: \"Order\",\n lines: Iterable[\"OrderLine\"],\n ) -> TaxedMoney:\n currency = order.currency\n default_value = base_order_calculations.base_order_total(order, lines)\n default_value = TaxedMoney(default_value, default_value)\n if default_value <= zero_taxed_money(currency):\n return quantize_price(\n default_value,\n currency,\n )\n\n return quantize_price(\n self.__run_method_on_plugins(\n \"calculate_order_total\",\n default_value,\n order,\n lines,\n channel_slug=order.channel.slug,\n ),\n currency,\n )\n\n def calculate_order_shipping(self, order: \"Order\") -> TaxedMoney:\n shipping_price = order.base_shipping_price\n default_value = quantize_price(\n TaxedMoney(net=shipping_price, gross=shipping_price),\n shipping_price.currency,\n )\n return quantize_price(\n self.__run_method_on_plugins(\n \"calculate_order_shipping\",\n default_value,\n order,\n channel_slug=order.channel.slug,\n ),\n order.currency,\n )\n\n def get_checkout_shipping_tax_rate(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n address: Optional[\"Address\"],\n shipping_price: TaxedMoney,\n ):\n default_value = calculate_tax_rate(shipping_price)\n return self.__run_method_on_plugins(\n \"get_checkout_shipping_tax_rate\",\n default_value,\n checkout_info,\n lines,\n address,\n channel_slug=checkout_info.channel.slug,\n ).quantize(Decimal(\".0001\"))\n\n def get_order_shipping_tax_rate(self, order: \"Order\", shipping_price: TaxedMoney):\n default_value = calculate_tax_rate(shipping_price)\n return self.__run_method_on_plugins(\n \"get_order_shipping_tax_rate\",\n default_value,\n order,\n channel_slug=order.channel.slug,\n ).quantize(Decimal(\".0001\"))\n\n def calculate_checkout_line_total(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n checkout_line_info: \"CheckoutLineInfo\",\n address: Optional[\"Address\"],\n ) -> TaxedMoney:\n default_value = base_calculations.calculate_base_line_total_price(\n checkout_line_info,\n checkout_info.channel,\n )\n # apply entire order discount\n default_value = base_calculations.apply_checkout_discount_on_checkout_line(\n checkout_info,\n lines,\n checkout_line_info,\n default_value,\n )\n default_value = quantize_price(default_value, checkout_info.checkout.currency)\n default_taxed_value = TaxedMoney(net=default_value, gross=default_value)\n line_total = self.__run_method_on_plugins(\n \"calculate_checkout_line_total\",\n default_taxed_value,\n checkout_info,\n lines,\n checkout_line_info,\n address,\n channel_slug=checkout_info.channel.slug,\n )\n\n return quantize_price(line_total, checkout_info.checkout.currency)\n\n def calculate_order_line_total(\n self,\n order: \"Order\",\n order_line: \"OrderLine\",\n variant: \"ProductVariant\",\n product: \"Product\",\n ) -> OrderTaxedPricesData:\n default_value = base_order_calculations.base_order_line_total(order_line)\n currency = order_line.currency\n\n line_total = self.__run_method_on_plugins(\n \"calculate_order_line_total\",\n default_value,\n order,\n order_line,\n variant,\n product,\n channel_slug=order.channel.slug,\n )\n\n line_total.price_with_discounts = quantize_price(\n line_total.price_with_discounts, currency\n )\n line_total.undiscounted_price = quantize_price(\n line_total.undiscounted_price, currency\n )\n return line_total\n\n def calculate_checkout_line_unit_price(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n checkout_line_info: \"CheckoutLineInfo\",\n address: Optional[\"Address\"],\n ) -> TaxedMoney:\n quantity = checkout_line_info.line.quantity\n default_value = base_calculations.calculate_base_line_unit_price(\n checkout_line_info, checkout_info.channel\n )\n # apply entire order discount\n total_value = base_calculations.apply_checkout_discount_on_checkout_line(\n checkout_info,\n lines,\n checkout_line_info,\n default_value * quantity,\n )\n default_taxed_value = TaxedMoney(\n net=total_value / quantity, gross=default_value\n )\n unit_price = self.__run_method_on_plugins(\n \"calculate_checkout_line_unit_price\",\n default_taxed_value,\n checkout_info,\n lines,\n checkout_line_info,\n address,\n channel_slug=checkout_info.channel.slug,\n )\n return quantize_price(unit_price, checkout_info.checkout.currency)\n\n def calculate_order_line_unit(\n self,\n order: \"Order\",\n order_line: \"OrderLine\",\n variant: \"ProductVariant\",\n product: \"Product\",\n ) -> OrderTaxedPricesData:\n default_value = OrderTaxedPricesData(\n undiscounted_price=TaxedMoney(\n order_line.undiscounted_base_unit_price,\n order_line.undiscounted_base_unit_price,\n ),\n price_with_discounts=TaxedMoney(\n order_line.base_unit_price,\n order_line.base_unit_price,\n ),\n )\n currency = order_line.currency\n line_unit = self.__run_method_on_plugins(\n \"calculate_order_line_unit\",\n default_value,\n order,\n order_line,\n variant,\n product,\n channel_slug=order.channel.slug,\n )\n line_unit.price_with_discounts = quantize_price(\n line_unit.price_with_discounts, currency\n )\n line_unit.undiscounted_price = quantize_price(\n line_unit.undiscounted_price, currency\n )\n return line_unit\n\n def get_checkout_line_tax_rate(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Iterable[\"CheckoutLineInfo\"],\n checkout_line_info: \"CheckoutLineInfo\",\n address: Optional[\"Address\"],\n price: TaxedMoney,\n ) -> Decimal:\n default_value = calculate_tax_rate(price)\n return self.__run_method_on_plugins(\n \"get_checkout_line_tax_rate\",\n default_value,\n checkout_info,\n lines,\n checkout_line_info,\n address,\n channel_slug=checkout_info.channel.slug,\n ).quantize(Decimal(\".0001\"))\n\n def get_order_line_tax_rate(\n self,\n order: \"Order\",\n product: \"Product\",\n variant: \"ProductVariant\",\n address: Optional[\"Address\"],\n unit_price: TaxedMoney,\n ) -> Decimal:\n default_value = calculate_tax_rate(unit_price)\n return self.__run_method_on_plugins(\n \"get_order_line_tax_rate\",\n default_value,\n order,\n product,\n variant,\n address,\n channel_slug=order.channel.slug,\n ).quantize(Decimal(\".0001\"))\n\n def get_tax_rate_type_choices(self) -> list[TaxType]:\n default_value: list = []\n return self.__run_method_on_plugins(\"get_tax_rate_type_choices\", default_value)\n\n def show_taxes_on_storefront(self) -> bool:\n default_value = False\n return self.__run_method_on_plugins(\"show_taxes_on_storefront\", default_value)\n\n def get_taxes_for_checkout(self, checkout_info, lines) -> Optional[TaxData]:\n return self.__run_plugin_method_until_first_success(\n \"get_taxes_for_checkout\",\n checkout_info,\n lines,\n channel_slug=checkout_info.channel.slug,\n )\n\n def get_taxes_for_order(self, order: \"Order\") -> Optional[TaxData]:\n return self.__run_plugin_method_until_first_success(\n \"get_taxes_for_order\", order, channel_slug=order.channel.slug\n )\n\n def preprocess_order_creation(\n self,\n checkout_info: \"CheckoutInfo\",\n lines: Optional[Iterable[\"CheckoutLineInfo\"]] = None,\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"preprocess_order_creation\",\n default_value,\n checkout_info,\n lines,\n channel_slug=checkout_info.channel.slug,\n )\n\n def customer_created(self, customer: \"User\"):\n default_value = None\n return self.__run_method_on_plugins(\"customer_created\", default_value, customer)\n\n def customer_deleted(self, customer: \"User\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"customer_deleted\", default_value, customer, webhooks=webhooks\n )\n\n def customer_updated(self, customer: \"User\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"customer_updated\", default_value, customer, webhooks=webhooks\n )\n\n def customer_metadata_updated(self, customer: \"User\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"customer_metadata_updated\", default_value, customer, webhooks=webhooks\n )\n\n def collection_created(self, collection: \"Collection\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"collection_created\", default_value, collection\n )\n\n def collection_updated(self, collection: \"Collection\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"collection_updated\", default_value, collection\n )\n\n def collection_deleted(self, collection: \"Collection\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"collection_deleted\", default_value, collection, webhooks=webhooks\n )\n\n def collection_metadata_updated(self, collection: \"Collection\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"collection_metadata_updated\", default_value, collection\n )\n\n def product_created(self, product: \"Product\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_created\", default_value, product, webhooks=webhooks\n )\n\n def product_updated(self, product: \"Product\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_updated\", default_value, product, webhooks=webhooks\n )\n\n def product_deleted(self, product: \"Product\", variants: list[int], webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_deleted\", default_value, product, variants, webhooks=webhooks\n )\n\n def product_media_created(self, media: \"ProductMedia\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_media_created\", default_value, media\n )\n\n def product_media_updated(self, media: \"ProductMedia\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_media_updated\", default_value, media\n )\n\n def product_media_deleted(self, media: \"ProductMedia\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_media_deleted\", default_value, media\n )\n\n def product_metadata_updated(self, product: \"Product\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_metadata_updated\", default_value, product\n )\n\n def product_variant_created(self, product_variant: \"ProductVariant\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_variant_created\", default_value, product_variant, webhooks=webhooks\n )\n\n def product_variant_updated(self, product_variant: \"ProductVariant\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_variant_updated\", default_value, product_variant, webhooks=webhooks\n )\n\n def product_variant_deleted(self, product_variant: \"ProductVariant\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_variant_deleted\", default_value, product_variant, webhooks=webhooks\n )\n\n def product_variant_out_of_stock(self, stock: \"Stock\", webhooks=None):\n default_value = None\n self.__run_method_on_plugins(\n \"product_variant_out_of_stock\", default_value, stock, webhooks=webhooks\n )\n\n def product_variant_back_in_stock(self, stock: \"Stock\", webhooks=None):\n default_value = None\n self.__run_method_on_plugins(\n \"product_variant_back_in_stock\", default_value, stock, webhooks=webhooks\n )\n\n def product_variant_stock_updated(self, stock: \"Stock\", webhooks=None):\n default_value = None\n self.__run_method_on_plugins(\n \"product_variant_stock_updated\", default_value, stock, webhooks=webhooks\n )\n\n def product_variant_metadata_updated(self, product_variant: \"ProductVariant\"):\n default_value = None\n self.__run_method_on_plugins(\n \"product_variant_metadata_updated\", default_value, product_variant\n )\n\n def product_export_completed(self, export: \"ExportFile\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"product_export_completed\", default_value, export\n )\n\n def order_created(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_created\", default_value, order, channel_slug=order.channel.slug\n )\n\n def event_delivery_retry(self, event_delivery: \"EventDelivery\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"event_delivery_retry\", default_value, event_delivery\n )\n\n def order_confirmed(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_confirmed\", default_value, order, channel_slug=order.channel.slug\n )\n\n def draft_order_created(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"draft_order_created\", default_value, order, channel_slug=order.channel.slug\n )\n\n def draft_order_updated(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"draft_order_updated\", default_value, order, channel_slug=order.channel.slug\n )\n\n def draft_order_deleted(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"draft_order_deleted\", default_value, order, channel_slug=order.channel.slug\n )\n\n def sale_created(self, sale: \"Promotion\", current_catalogue):\n default_value = None\n return self.__run_method_on_plugins(\n \"sale_created\", default_value, sale, current_catalogue\n )\n\n def sale_deleted(self, sale: \"Promotion\", previous_catalogue, webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"sale_deleted\", default_value, sale, previous_catalogue, webhooks=webhooks\n )\n\n def sale_updated(self, sale: \"Promotion\", previous_catalogue, current_catalogue):\n default_value = None\n return self.__run_method_on_plugins(\n \"sale_updated\", default_value, sale, previous_catalogue, current_catalogue\n )\n\n def sale_toggle(self, sale: \"Promotion\", catalogue):\n default_value = None\n return self.__run_method_on_plugins(\n \"sale_toggle\", default_value, sale, catalogue\n )\n\n def promotion_created(self, promotion: \"Promotion\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_created\", default_value, promotion\n )\n\n def promotion_updated(self, promotion: \"Promotion\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_updated\", default_value, promotion\n )\n\n def promotion_deleted(self, promotion: \"Promotion\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_deleted\", default_value, promotion, webhooks=webhooks\n )\n\n def promotion_started(self, promotion: \"Promotion\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_started\", default_value, promotion\n )\n\n def promotion_ended(self, promotion: \"Promotion\"):\n default_value = None\n return self.__run_method_on_plugins(\"promotion_ended\", default_value, promotion)\n\n def promotion_rule_created(self, promotion_rule: \"PromotionRule\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_rule_created\", default_value, promotion_rule\n )\n\n def promotion_rule_updated(self, promotion_rule: \"PromotionRule\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_rule_updated\", default_value, promotion_rule\n )\n\n def promotion_rule_deleted(self, promotion_rule: \"PromotionRule\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"promotion_rule_deleted\", default_value, promotion_rule\n )\n\n def invoice_request(\n self, order: \"Order\", invoice: \"Invoice\", number: Optional[str]\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"invoice_request\",\n default_value,\n order,\n invoice,\n number,\n channel_slug=order.channel.slug,\n )\n\n def invoice_delete(self, invoice: \"Invoice\"):\n default_value = None\n channel_slug = invoice.order.channel.slug if invoice.order else None\n return self.__run_method_on_plugins(\n \"invoice_delete\",\n default_value,\n invoice,\n channel_slug=channel_slug,\n )\n\n def invoice_sent(self, invoice: \"Invoice\", email: str):\n default_value = None\n channel_slug = invoice.order.channel.slug if invoice.order else None\n return self.__run_method_on_plugins(\n \"invoice_sent\",\n default_value,\n invoice,\n email,\n channel_slug=channel_slug,\n )\n\n def order_fully_paid(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_fully_paid\", default_value, order, channel_slug=order.channel.slug\n )\n\n def order_paid(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_paid\", default_value, order, channel_slug=order.channel.slug\n )\n\n def order_fully_refunded(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_fully_refunded\",\n default_value,\n order,\n channel_slug=order.channel.slug,\n )\n\n def order_refunded(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_refunded\", default_value, order, channel_slug=order.channel.slug\n )\n\n def order_updated(self, order: \"Order\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_updated\",\n default_value,\n order,\n channel_slug=order.channel.slug,\n webhooks=webhooks,\n )\n\n def order_cancelled(self, order: \"Order\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_cancelled\",\n default_value,\n order,\n channel_slug=order.channel.slug,\n webhooks=webhooks,\n )\n\n def order_expired(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_expired\", default_value, order, channel_slug=order.channel.slug\n )\n\n def order_fulfilled(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_fulfilled\", default_value, order, channel_slug=order.channel.slug\n )\n\n def order_metadata_updated(self, order: \"Order\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"order_metadata_updated\", default_value, order\n )\n\n def order_bulk_created(self, orders: list[\"Order\"]):\n default_value = None\n return self.__run_method_on_plugins(\"order_bulk_created\", default_value, orders)\n\n def fulfillment_created(\n self, fulfillment: \"Fulfillment\", notify_customer: Optional[bool] = True\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"fulfillment_created\",\n default_value,\n fulfillment,\n channel_slug=fulfillment.order.channel.slug,\n notify_customer=notify_customer,\n )\n\n def fulfillment_canceled(self, fulfillment: \"Fulfillment\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"fulfillment_canceled\",\n default_value,\n fulfillment,\n channel_slug=fulfillment.order.channel.slug,\n )\n\n def fulfillment_approved(\n self, fulfillment: \"Fulfillment\", notify_customer: Optional[bool] = True\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"fulfillment_approved\",\n default_value,\n fulfillment,\n channel_slug=fulfillment.order.channel.slug,\n notify_customer=notify_customer,\n )\n\n def fulfillment_metadata_updated(self, fulfillment: \"Fulfillment\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"fulfillment_metadata_updated\", default_value, fulfillment\n )\n\n def tracking_number_updated(self, fulfillment: \"Fulfillment\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"tracking_number_updated\",\n default_value,\n fulfillment,\n channel_slug=fulfillment.order.channel.slug,\n )\n\n def checkout_created(self, checkout: \"Checkout\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"checkout_created\",\n default_value,\n checkout,\n channel_slug=checkout.channel.slug,\n )\n\n def checkout_updated(self, checkout: \"Checkout\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"checkout_updated\",\n default_value,\n checkout,\n channel_slug=checkout.channel.slug,\n )\n\n def checkout_fully_paid(self, checkout: \"Checkout\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"checkout_fully_paid\",\n default_value,\n checkout,\n channel_slug=checkout.channel.slug,\n )\n\n def checkout_metadata_updated(self, checkout: \"Checkout\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"checkout_metadata_updated\", default_value, checkout\n )\n\n def page_created(self, page: \"Page\"):\n default_value = None\n return self.__run_method_on_plugins(\"page_created\", default_value, page)\n\n def page_updated(self, page: \"Page\"):\n default_value = None\n return self.__run_method_on_plugins(\"page_updated\", default_value, page)\n\n def page_deleted(self, page: \"Page\"):\n default_value = None\n return self.__run_method_on_plugins(\"page_deleted\", default_value, page)\n\n def page_type_created(self, page_type: \"PageType\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"page_type_created\", default_value, page_type\n )\n\n def page_type_updated(self, page_type: \"PageType\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"page_type_updated\", default_value, page_type\n )\n\n def page_type_deleted(self, page_type: \"PageType\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"page_type_deleted\", default_value, page_type\n )\n\n def permission_group_created(self, group: \"Group\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"permission_group_created\", default_value, group\n )\n\n def permission_group_updated(self, group: \"Group\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"permission_group_updated\", default_value, group\n )\n\n def permission_group_deleted(self, group: \"Group\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"permission_group_deleted\", default_value, group\n )\n\n def transaction_charge_requested(\n self, payment_data: \"TransactionActionData\", channel_slug: str\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_charge_requested\",\n default_value,\n payment_data,\n channel_slug=channel_slug,\n )\n\n def transaction_refund_requested(\n self, payment_data: \"TransactionActionData\", channel_slug: str\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_refund_requested\",\n default_value,\n payment_data,\n channel_slug=channel_slug,\n )\n\n def transaction_cancelation_requested(\n self, payment_data: \"TransactionActionData\", channel_slug: str\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_cancelation_requested\",\n default_value,\n payment_data,\n channel_slug=channel_slug,\n )\n\n def payment_gateway_initialize_session(\n self,\n amount: Decimal,\n payment_gateways: Optional[list[\"PaymentGatewayData\"]],\n source_object: Union[\"Order\", \"Checkout\"],\n ) -> list[\"PaymentGatewayData\"]:\n default_value = None\n return self.__run_method_on_plugins(\n \"payment_gateway_initialize_session\",\n default_value,\n amount,\n payment_gateways,\n source_object,\n channel_slug=source_object.channel.slug,\n )\n\n def transaction_initialize_session(\n self,\n transaction_session_data: \"TransactionSessionData\",\n ) -> \"TransactionSessionResult\":\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_initialize_session\",\n default_value,\n transaction_session_data,\n channel_slug=transaction_session_data.source_object.channel.slug,\n )\n\n def transaction_process_session(\n self,\n transaction_session_data: \"TransactionSessionData\",\n ) -> \"TransactionSessionResult\":\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_process_session\",\n default_value,\n transaction_session_data,\n channel_slug=transaction_session_data.source_object.channel.slug,\n )\n\n def transaction_item_metadata_updated(self, transaction_item: \"TransactionItem\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"transaction_item_metadata_updated\", default_value, transaction_item\n )\n\n def account_confirmed(self, user: \"User\"):\n default_value = None\n return self.__run_method_on_plugins(\"account_confirmed\", default_value, user)\n\n def account_confirmation_requested(\n self, user: \"User\", channel_slug: str, token: str, redirect_url: Optional[str]\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"account_confirmation_requested\",\n default_value,\n user,\n channel_slug,\n token=token,\n redirect_url=redirect_url,\n )\n\n def account_change_email_requested(\n self,\n user: \"User\",\n channel_slug: str,\n token: str,\n redirect_url: str,\n new_email: str,\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"account_change_email_requested\",\n default_value,\n user,\n channel_slug,\n token=token,\n redirect_url=redirect_url,\n new_email=new_email,\n )\n\n def account_email_changed(\n self,\n user: \"User\",\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"account_email_changed\",\n default_value,\n user,\n )\n\n def account_set_password_requested(\n self,\n user: \"User\",\n channel_slug: str,\n token: str,\n redirect_url: str,\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"account_set_password_requested\",\n default_value,\n user,\n channel_slug,\n token=token,\n redirect_url=redirect_url,\n )\n\n def account_delete_requested(\n self, user: \"User\", channel_slug: str, token: str, redirect_url: str\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"account_delete_requested\",\n default_value,\n user,\n channel_slug,\n token=token,\n redirect_url=redirect_url,\n )\n\n def account_deleted(self, user: \"User\"):\n default_value = None\n return self.__run_method_on_plugins(\"account_deleted\", default_value, user)\n\n def address_created(self, address: \"Address\"):\n default_value = None\n return self.__run_method_on_plugins(\"address_created\", default_value, address)\n\n def address_updated(self, address: \"Address\"):\n default_value = None\n return self.__run_method_on_plugins(\"address_updated\", default_value, address)\n\n def address_deleted(self, address: \"Address\"):\n default_value = None\n return self.__run_method_on_plugins(\"address_deleted\", default_value, address)\n\n def app_installed(self, app: \"App\"):\n default_value = None\n return self.__run_method_on_plugins(\"app_installed\", default_value, app)\n\n def app_updated(self, app: \"App\"):\n default_value = None\n return self.__run_method_on_plugins(\"app_updated\", default_value, app)\n\n def app_deleted(self, app: \"App\"):\n default_value = None\n return self.__run_method_on_plugins(\"app_deleted\", default_value, app)\n\n def app_status_changed(self, app: \"App\"):\n default_value = None\n return self.__run_method_on_plugins(\"app_status_changed\", default_value, app)\n\n def attribute_created(self, attribute: \"Attribute\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_created\", default_value, attribute\n )\n\n def attribute_updated(self, attribute: \"Attribute\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_updated\", default_value, attribute, webhooks=webhooks\n )\n\n def attribute_deleted(self, attribute: \"Attribute\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_deleted\", default_value, attribute, webhooks=webhooks\n )\n\n def attribute_value_created(self, attribute_value: \"AttributeValue\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_value_created\", default_value, attribute_value, webhooks=webhooks\n )\n\n def attribute_value_updated(self, attribute_value: \"AttributeValue\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_value_updated\", default_value, attribute_value\n )\n\n def attribute_value_deleted(self, attribute_value: \"AttributeValue\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"attribute_value_deleted\", default_value, attribute_value, webhooks=webhooks\n )\n\n def category_created(self, category: \"Category\"):\n default_value = None\n return self.__run_method_on_plugins(\"category_created\", default_value, category)\n\n def category_updated(self, category: \"Category\"):\n default_value = None\n return self.__run_method_on_plugins(\"category_updated\", default_value, category)\n\n def category_deleted(self, category: \"Category\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"category_deleted\", default_value, category, webhooks=webhooks\n )\n\n def channel_created(self, channel: \"Channel\"):\n default_value = None\n return self.__run_method_on_plugins(\"channel_created\", default_value, channel)\n\n def channel_updated(self, channel: \"Channel\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"channel_updated\", default_value, channel, webhooks=webhooks\n )\n\n def channel_deleted(self, channel: \"Channel\"):\n default_value = None\n return self.__run_method_on_plugins(\"channel_deleted\", default_value, channel)\n\n def channel_status_changed(self, channel: \"Channel\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"channel_status_changed\", default_value, channel\n )\n\n def channel_metadata_updated(self, channel: \"Channel\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"channel_metadata_updated\", default_value, channel\n )\n\n def gift_card_created(self, gift_card: \"GiftCard\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_created\", default_value, gift_card, webhooks=webhooks\n )\n\n def gift_card_updated(self, gift_card: \"GiftCard\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_updated\", default_value, gift_card\n )\n\n def gift_card_deleted(self, gift_card: \"GiftCard\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_deleted\", default_value, gift_card, webhooks=webhooks\n )\n\n def gift_card_sent(self, gift_card: \"GiftCard\", channel_slug: str, email: str):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_sent\",\n default_value,\n gift_card,\n channel_slug,\n email,\n )\n\n def gift_card_status_changed(self, gift_card: \"GiftCard\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_status_changed\", default_value, gift_card, webhooks=webhooks\n )\n\n def gift_card_metadata_updated(self, gift_card: \"GiftCard\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_metadata_updated\", default_value, gift_card\n )\n\n def gift_card_export_completed(self, export: \"ExportFile\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"gift_card_export_completed\", default_value, export\n )\n\n def menu_created(self, menu: \"Menu\"):\n default_value = None\n return self.__run_method_on_plugins(\"menu_created\", default_value, menu)\n\n def menu_updated(self, menu: \"Menu\"):\n default_value = None\n return self.__run_method_on_plugins(\"menu_updated\", default_value, menu)\n\n def menu_deleted(self, menu: \"Menu\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"menu_deleted\", default_value, menu, webhooks=webhooks\n )\n\n def menu_item_created(self, menu_item: \"MenuItem\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"menu_item_created\", default_value, menu_item\n )\n\n def menu_item_updated(self, menu_item: \"MenuItem\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"menu_item_updated\", default_value, menu_item\n )\n\n def menu_item_deleted(self, menu_item: \"MenuItem\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"menu_item_deleted\", default_value, menu_item, webhooks=webhooks\n )\n\n def shipping_price_created(self, shipping_method: \"ShippingMethod\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_price_created\", default_value, shipping_method\n )\n\n def shipping_price_updated(self, shipping_method: \"ShippingMethod\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_price_updated\", default_value, shipping_method\n )\n\n def shipping_price_deleted(self, shipping_method: \"ShippingMethod\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_price_deleted\", default_value, shipping_method, webhooks=webhooks\n )\n\n def shipping_zone_created(self, shipping_zone: \"ShippingZone\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_zone_created\", default_value, shipping_zone\n )\n\n def shipping_zone_updated(self, shipping_zone: \"ShippingZone\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_zone_updated\", default_value, shipping_zone\n )\n\n def shipping_zone_deleted(self, shipping_zone: \"ShippingZone\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_zone_deleted\", default_value, shipping_zone, webhooks=webhooks\n )\n\n def shipping_zone_metadata_updated(self, shipping_zone: \"ShippingZone\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shipping_zone_metadata_updated\", default_value, shipping_zone\n )\n\n def staff_created(self, staff_user: \"User\"):\n default_value = None\n return self.__run_method_on_plugins(\"staff_created\", default_value, staff_user)\n\n def staff_updated(self, staff_user: \"User\"):\n default_value = None\n return self.__run_method_on_plugins(\"staff_updated\", default_value, staff_user)\n\n def staff_deleted(self, staff_user: \"User\", webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"staff_deleted\", default_value, staff_user, webhooks=webhooks\n )\n\n def staff_set_password_requested(\n self, user: \"User\", channel_slug: str, token: str, redirect_url: str\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"staff_set_password_requested\",\n default_value,\n user,\n channel_slug,\n token=token,\n redirect_url=redirect_url,\n )\n\n def thumbnail_created(\n self,\n thumbnail: \"Thumbnail\",\n ):\n default_value = None\n return self.__run_method_on_plugins(\n \"thumbnail_created\", default_value, thumbnail\n )\n\n def warehouse_created(self, warehouse: \"Warehouse\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"warehouse_created\", default_value, warehouse\n )\n\n def warehouse_updated(self, warehouse: \"Warehouse\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"warehouse_updated\", default_value, warehouse\n )\n\n def warehouse_deleted(self, warehouse: \"Warehouse\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"warehouse_deleted\", default_value, warehouse\n )\n\n def warehouse_metadata_updated(self, warehouse: \"Warehouse\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"warehouse_metadata_updated\", default_value, warehouse\n )\n\n def voucher_created(self, voucher: \"Voucher\", code: str):\n default_value = None\n return self.__run_method_on_plugins(\n \"voucher_created\", default_value, voucher, code\n )\n\n def voucher_updated(self, voucher: \"Voucher\", code: str):\n default_value = None\n return self.__run_method_on_plugins(\n \"voucher_updated\", default_value, voucher, code\n )\n\n def voucher_deleted(self, voucher: \"Voucher\", code: str, webhooks=None):\n default_value = None\n return self.__run_method_on_plugins(\n \"voucher_deleted\", default_value, voucher, code, webhooks=webhooks\n )\n\n def voucher_metadata_updated(self, voucher: \"Voucher\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"voucher_metadata_updated\", default_value, voucher\n )\n\n def voucher_code_export_completed(self, export: \"ExportFile\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"voucher_code_export_completed\", default_value, export\n )\n\n def shop_metadata_updated(self, shop: \"SiteSettings\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"shop_metadata_updated\", default_value, shop\n )\n\n def initialize_payment(\n self, gateway, payment_data: dict, channel_slug: str\n ) -> Optional[\"InitializedPaymentResponse\"]:\n method_name = \"initialize_payment\"\n default_value = None\n gtw = self.get_plugin(gateway, channel_slug)\n if not gtw:\n return None\n\n return self.__run_method_on_single_plugin(\n gtw,\n method_name,\n previous_value=default_value,\n payment_data=payment_data,\n )\n\n def authorize_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"authorize_payment\", payment_information, channel_slug=channel_slug\n )\n\n def capture_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"capture_payment\", payment_information, channel_slug=channel_slug\n )\n\n def refund_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"refund_payment\", payment_information, channel_slug=channel_slug\n )\n\n def void_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"void_payment\", payment_information, channel_slug=channel_slug\n )\n\n def confirm_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"confirm_payment\", payment_information, channel_slug=channel_slug\n )\n\n def process_payment(\n self, gateway: str, payment_information: \"PaymentData\", channel_slug: str\n ) -> \"GatewayResponse\":\n return self.__run_payment_method(\n gateway, \"process_payment\", payment_information, channel_slug=channel_slug\n )\n\n def token_is_required_as_payment_input(\n self, gateway: str, channel_slug: str\n ) -> bool:\n method_name = \"token_is_required_as_payment_input\"\n default_value = True\n gtw = self.get_plugin(gateway, channel_slug=channel_slug)\n if gtw is not None:\n return self.__run_method_on_single_plugin(\n gtw,\n method_name,\n previous_value=default_value,\n )\n return default_value\n\n def get_client_token(\n self,\n gateway,\n token_config: \"TokenConfig\",\n channel_slug: str,\n ) -> str:\n method_name = \"get_client_token\"\n default_value = None\n gtw = self.get_plugin(gateway, channel_slug=channel_slug)\n return self.__run_method_on_single_plugin(\n gtw, method_name, default_value, token_config=token_config\n )\n\n def list_payment_sources(\n self,\n gateway: str,\n customer_id: str,\n channel_slug: str,\n ) -> list[\"CustomerSource\"]:\n default_value: list = []\n gtw = self.get_plugin(gateway, channel_slug=channel_slug)\n if gtw is not None:\n return self.__run_method_on_single_plugin(\n gtw, \"list_payment_sources\", default_value, customer_id=customer_id\n )\n raise Exception(f\"Payment plugin {gateway} is inaccessible!\")\n\n def list_stored_payment_methods(\n self, list_stored_payment_methods_data: \"ListStoredPaymentMethodsRequestData\"\n ) -> list[\"PaymentMethodData\"]:\n default_value: list = []\n return self.__run_method_on_plugins(\n \"list_stored_payment_methods\",\n default_value,\n list_stored_payment_methods_data,\n )\n\n def stored_payment_method_request_delete(\n self,\n request_delete_data: \"StoredPaymentMethodRequestDeleteData\",\n ) -> \"StoredPaymentMethodRequestDeleteResponseData\":\n default_response = StoredPaymentMethodRequestDeleteResponseData(\n result=StoredPaymentMethodRequestDeleteResult.FAILED_TO_DELIVER,\n error=\"Payment method request delete failed to deliver.\",\n )\n response = self.__run_method_on_plugins(\n \"stored_payment_method_request_delete\",\n default_response,\n request_delete_data,\n )\n return response\n\n def payment_gateway_initialize_tokenization(\n self,\n request_data: \"PaymentGatewayInitializeTokenizationRequestData\",\n ) -> \"PaymentGatewayInitializeTokenizationResponseData\":\n default_response = PaymentGatewayInitializeTokenizationResponseData(\n result=PaymentGatewayInitializeTokenizationResult.FAILED_TO_DELIVER,\n error=\"Payment gateway initialize tokenization failed to deliver.\",\n data=None,\n )\n\n response = self.__run_method_on_plugins(\n \"payment_gateway_initialize_tokenization\",\n default_response,\n request_data,\n )\n return response\n\n def payment_method_initialize_tokenization(\n self,\n request_data: \"PaymentMethodProcessTokenizationRequestData\",\n ) -> \"PaymentMethodTokenizationResponseData\":\n default_response = PaymentMethodTokenizationResponseData(\n result=PaymentMethodTokenizationResult.FAILED_TO_DELIVER,\n error=\"Payment method initialize tokenization failed to deliver.\",\n data=None,\n )\n\n response = self.__run_method_on_plugins(\n \"payment_method_initialize_tokenization\",\n default_response,\n request_data,\n )\n return response\n\n def payment_method_process_tokenization(\n self,\n request_data: \"PaymentMethodProcessTokenizationRequestData\",\n ) -> \"PaymentMethodTokenizationResponseData\":\n default_response = PaymentMethodTokenizationResponseData(\n result=PaymentMethodTokenizationResult.FAILED_TO_DELIVER,\n error=\"Payment method process tokenization failed to deliver.\",\n data=None,\n )\n\n response = self.__run_method_on_plugins(\n \"payment_method_process_tokenization\",\n default_response,\n request_data,\n )\n return response\n\n def translation_created(self, translation: \"Translation\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"translation_created\", default_value, translation\n )\n\n def translation_updated(self, translation: \"Translation\"):\n default_value = None\n return self.__run_method_on_plugins(\n \"translation_updated\", default_value, translation\n )\n\n def get_plugins(\n self, channel_slug: Optional[str] = None, active_only=False\n ) -> list[\"BasePlugin\"]:\n \"\"\"Return list of plugins for a given channel.\"\"\"\n if channel_slug:\n plugins = self.plugins_per_channel[channel_slug]\n else:\n plugins = self.all_plugins\n\n if active_only:\n plugins = [plugin for plugin in plugins if plugin.active]\n return plugins\n\n def list_payment_gateways(\n self,\n currency: Optional[str] = None,\n checkout_info: Optional[\"CheckoutInfo\"] = None,\n checkout_lines: Optional[Iterable[\"CheckoutLineInfo\"]] = None,\n channel_slug: Optional[str] = None,\n active_only: bool = True,\n ) -> list[\"PaymentGateway\"]:\n channel_slug = checkout_info.channel.slug if checkout_info else channel_slug\n plugins = self.get_plugins(channel_slug=channel_slug, active_only=active_only)\n payment_plugins = [\n plugin for plugin in plugins if \"process_payment\" in type(plugin).__dict__\n ]\n\n # if currency is given return only gateways which support given currency\n gateways = []\n for plugin in payment_plugins:\n gateways.extend(\n plugin.get_payment_gateways(\n currency=currency,\n checkout_info=checkout_info,\n checkout_lines=checkout_lines,\n previous_value=None,\n )\n )\n return gateways\n\n def list_shipping_methods_for_checkout(\n self,\n checkout: \"Checkout\",\n channel_slug: Optional[str] = None,\n active_only: bool = True,\n ) -> list[\"ShippingMethodData\"]:\n channel_slug = channel_slug if channel_slug else checkout.channel.slug\n plugins = self.get_plugins(channel_slug=channel_slug, active_only=active_only)\n shipping_plugins = [\n plugin\n for plugin in plugins\n if hasattr(plugin, \"get_shipping_methods_for_checkout\")\n ]\n\n shipping_methods = []\n for plugin in shipping_plugins:\n shipping_methods.extend(\n # https://github.com/python/mypy/issues/9975\n getattr(plugin, \"get_shipping_methods_for_checkout\")(checkout, None)\n )\n return shipping_methods\n\n def get_shipping_method(\n self,\n shipping_method_id: str,\n checkout: Optional[\"Checkout\"] = None,\n channel_slug: Optional[str] = None,\n ):\n if checkout:\n methods = {\n method.id: method\n for method in self.list_shipping_methods_for_checkout(\n checkout=checkout, channel_slug=channel_slug\n )\n }\n return methods.get(shipping_method_id)\n return None\n\n def list_external_authentications(self, active_only: bool = True) -> list[dict]:\n auth_basic_method = \"external_obtain_access_tokens\"\n plugins = self.get_plugins(active_only=active_only)\n return [\n {\"id\": plugin.PLUGIN_ID, \"name\": plugin.PLUGIN_NAME}\n for plugin in plugins\n if auth_basic_method in type(plugin).__dict__\n ]\n\n def __run_payment_method(\n self,\n gateway: str,\n method_name: str,\n payment_information: \"PaymentData\",\n channel_slug: str,\n **kwargs,\n ) -> \"GatewayResponse\":\n default_value = None\n plugin = self.get_plugin(gateway, channel_slug)\n if plugin is not None:\n resp = self.__run_method_on_single_plugin(\n plugin,\n method_name,\n previous_value=default_value,\n payment_information=payment_information,\n **kwargs,\n )\n if resp is not None:\n return resp\n\n raise Exception(\n f\"Payment plugin {gateway} for {method_name}\"\n \" payment method is inaccessible!\"\n )\n\n def __run_plugin_method_until_first_success(\n self,\n method_name: str,\n *args,\n channel_slug: Optional[str] = None,\n ):\n plugins = self.get_plugins(channel_slug=channel_slug)\n for plugin in plugins:\n result = self.__run_method_on_single_plugin(\n plugin, method_name, None, *args\n )\n if result is not None:\n return result\n return None\n\n def _get_all_plugin_configs(self):\n with opentracing.global_tracer().start_active_span(\"_get_all_plugin_configs\"):\n if not hasattr(self, \"_plugin_configs\"):\n plugin_configurations = PluginConfiguration.objects.prefetch_related(\n \"channel\"\n ).all()\n self._plugin_configs_per_channel: defaultdict[\n Channel, dict\n ] = defaultdict(dict)\n self._global_plugin_configs = {}\n for pc in plugin_configurations:\n channel = pc.channel\n if channel is None:\n self._global_plugin_configs[pc.identifier] = pc\n else:\n self._plugin_configs_per_channel[channel][pc.identifier] = pc\n return self._global_plugin_configs, self._plugin_configs_per_channel\n\n # FIXME these methods should be more generic\n\n def assign_tax_code_to_object_meta(self, obj: \"TaxClass\", tax_code: Optional[str]):\n default_value = None\n return self.__run_method_on_plugins(\n \"assign_tax_code_to_object_meta\", default_value, obj, tax_code\n )\n\n def get_tax_code_from_object_meta(\n self, obj: Union[\"Product\", \"ProductType\", \"TaxClass\"]\n ) -> TaxType:\n default_value = TaxType(code=\"\", description=\"\")\n return self.__run_method_on_plugins(\n \"get_tax_code_from_object_meta\", default_value, obj\n )\n\n def save_plugin_configuration(\n self, plugin_id, channel_slug: Optional[str], cleaned_data: dict\n ):\n if channel_slug:\n plugins = self.get_plugins(channel_slug=channel_slug)\n channel = (\n Channel.objects.using(self.database).filter(slug=channel_slug).first()\n )\n if not channel:\n return None\n else:\n channel = None\n plugins = self.global_plugins\n\n for plugin in plugins:\n if plugin.PLUGIN_ID == plugin_id:\n plugin_configuration, _ = PluginConfiguration.objects.using(\n self.database\n ).get_or_create(\n identifier=plugin_id,\n channel=channel,\n defaults={\"configuration\": plugin.configuration},\n )\n configuration = plugin.save_plugin_configuration(\n plugin_configuration, cleaned_data\n )\n configuration.name = plugin.PLUGIN_NAME\n configuration.description = plugin.PLUGIN_DESCRIPTION\n plugin.active = configuration.active\n plugin.configuration = configuration.configuration\n return configuration\n\n def get_plugin(\n self, plugin_id: str, channel_slug: Optional[str] = None\n ) -> Optional[\"BasePlugin\"]:\n plugins = self.get_plugins(channel_slug=channel_slug)\n for plugin in plugins:\n if plugin.check_plugin_id(plugin_id):\n return plugin\n return None\n\n def webhook_endpoint_without_channel(\n self, request: SaleorContext, plugin_id: str\n ) -> HttpResponse:\n # This should be removed in 3.0.0-a.25 as we want to give a possibility to have\n # no downtime between RCs\n split_path = request.path.split(plugin_id, maxsplit=1)\n path = None\n if len(split_path) == 2:\n path = split_path[1]\n\n default_value = HttpResponseNotFound()\n plugin = self.get_plugin(plugin_id)\n if not plugin:\n return default_value\n return self.__run_method_on_single_plugin(\n plugin, \"webhook\", default_value, request, path\n )\n\n def webhook(\n self, request: SaleorContext, plugin_id: str, channel_slug: Optional[str] = None\n ) -> HttpResponse:\n split_path = request.path.split(plugin_id, maxsplit=1)\n path = None\n if len(split_path) == 2:\n path = split_path[1]\n\n default_value = HttpResponseNotFound()\n plugin = self.get_plugin(plugin_id, channel_slug=channel_slug)\n if not plugin:\n return default_value\n\n if not plugin.active:\n return default_value\n\n if plugin.CONFIGURATION_PER_CHANNEL and not channel_slug:\n return HttpResponseNotFound(\n \"Incorrect endpoint. Use /plugins/channel/<channel_slug>/\"\n f\"{plugin.PLUGIN_ID}/\"\n )\n\n return self.__run_method_on_single_plugin(\n plugin, \"webhook\", default_value, request, path\n )\n\n def notify(\n self,\n event: \"NotifyEventTypeChoice\",\n payload: dict,\n channel_slug: Optional[str] = None,\n plugin_id: Optional[str] = None,\n ):\n default_value = None\n if plugin_id:\n plugin = self.get_plugin(plugin_id, channel_slug=channel_slug)\n return self.__run_method_on_single_plugin(\n plugin=plugin,\n method_name=\"notify\",\n previous_value=default_value,\n event=event,\n payload=payload,\n )\n return self.__run_method_on_plugins(\n \"notify\", default_value, event, payload, channel_slug=channel_slug\n )\n\n def external_obtain_access_tokens(\n self, plugin_id: str, data: dict, request: SaleorContext\n ) -> ExternalAccessTokens:\n \"\"\"Obtain access tokens from authentication plugin.\"\"\"\n default_value = ExternalAccessTokens()\n plugin = self.get_plugin(plugin_id)\n return self.__run_method_on_single_plugin(\n plugin, \"external_obtain_access_tokens\", default_value, data, request\n )\n\n def external_authentication_url(\n self, plugin_id: str, data: dict, request: SaleorContext\n ) -> dict:\n \"\"\"Handle authentication request.\"\"\"\n default_value = {} # type: ignore\n plugin = self.get_plugin(plugin_id)\n return self.__run_method_on_single_plugin(\n plugin, \"external_authentication_url\", default_value, data, request\n )\n\n def external_refresh(\n self, plugin_id: str, data: dict, request: SaleorContext\n ) -> ExternalAccessTokens:\n \"\"\"Handle authentication refresh request.\"\"\"\n default_value = ExternalAccessTokens()\n plugin = self.get_plugin(plugin_id)\n return self.__run_method_on_single_plugin(\n plugin, \"external_refresh\", default_value, data, request\n )\n\n def authenticate_user(self, request: SaleorContext) -> Optional[\"User\"]:\n \"\"\"Authenticate user which should be assigned to the request.\"\"\"\n default_value = None\n return self.__run_method_on_plugins(\"authenticate_user\", default_value, request)\n\n def external_logout(\n self, plugin_id: str, data: dict, request: SaleorContext\n ) -> dict:\n \"\"\"Logout the user.\"\"\"\n default_value: dict[str, str] = {}\n plugin = self.get_plugin(plugin_id)\n return self.__run_method_on_single_plugin(\n plugin, \"external_logout\", default_value, data, request\n )\n\n def external_verify(\n self, plugin_id: str, data: dict, request: SaleorContext\n ) -> tuple[Optional[\"User\"], dict]:\n \"\"\"Verify the provided authentication data.\"\"\"\n default_data: dict[str, str] = dict()\n default_user: Optional[\"User\"] = None\n default_value = default_user, default_data\n plugin = self.get_plugin(plugin_id)\n return self.__run_method_on_single_plugin(\n plugin, \"external_verify\", default_value, data, request\n )\n\n def excluded_shipping_methods_for_order(\n self,\n order: \"Order\",\n available_shipping_methods: list[\"ShippingMethodData\"],\n ) -> list[ExcludedShippingMethod]:\n return self.__run_method_on_plugins(\n \"excluded_shipping_methods_for_order\",\n [],\n order,\n available_shipping_methods,\n channel_slug=order.channel.slug,\n )\n\n def excluded_shipping_methods_for_checkout(\n self,\n checkout: \"Checkout\",\n available_shipping_methods: list[\"ShippingMethodData\"],\n ) -> list[ExcludedShippingMethod]:\n return self.__run_method_on_plugins(\n \"excluded_shipping_methods_for_checkout\",\n [],\n checkout,\n available_shipping_methods,\n channel_slug=checkout.channel.slug,\n )\n\n def perform_mutation(\n self, mutation_cls: Mutation, root, info: ResolveInfo, data: dict\n ) -> Optional[Union[ExecutionResult, GraphQLError]]:\n \"\"\"Invoke before each mutation is executed.\n\n This allows to trigger specific logic before the mutation is executed\n but only once the permissions are checked.\n\n Returns one of:\n - null if the execution shall continue\n - graphql.GraphQLError\n - graphql.execution.ExecutionResult\n \"\"\"\n return self.__run_method_on_plugins(\n \"perform_mutation\",\n default_value=None,\n mutation_cls=mutation_cls,\n root=root,\n info=info,\n data=data,\n )\n\n def is_event_active_for_any_plugin(\n self, event: str, channel_slug: Optional[str] = None\n ) -> bool:\n \"\"\"Check if any plugin supports defined event.\"\"\"\n plugins = (\n self.plugins_per_channel[channel_slug] if channel_slug else self.all_plugins\n )\n only_active_plugins = [plugin for plugin in plugins if plugin.active]\n return any([plugin.is_event_active(event) for plugin in only_active_plugins])\n\n def _get_channel_map(self):\n return {\n channel.pk: channel\n for channel in Channel.objects.using(self.database).all().iterator()\n }" }, { "identifier": "get_plugins_manager", "path": "saleor/plugins/manager.py", "snippet": "def get_plugins_manager(\n requestor_getter: Optional[Callable[[], \"Requestor\"]] = None,\n allow_replica=True,\n) -> PluginsManager:\n with opentracing.global_tracer().start_active_span(\"get_plugins_manager\"):\n return PluginsManager(settings.PLUGINS, requestor_getter, allow_replica)" }, { "identifier": "EmailTemplate", "path": "saleor/plugins/models.py", "snippet": "class EmailTemplate(models.Model):\n plugin_configuration = models.ForeignKey(\n PluginConfiguration, related_name=\"email_templates\", on_delete=models.CASCADE\n )\n name = models.CharField(max_length=255)\n value = models.TextField()\n\n def __str__(self):\n return self.name" }, { "identifier": "get_app_promise", "path": "saleor/graphql/app/dataloaders.py", "snippet": "def get_app_promise(context: SaleorContext) -> Promise[Optional[App]]:\n if hasattr(context, \"app\"):\n app = context.app\n if isinstance(app, LazyObject):\n app = unwrap_lazy(app)\n return Promise.resolve(app)\n\n return promise_app(context)" }, { "identifier": "SaleorContext", "path": "saleor/graphql/core/context.py", "snippet": "class SaleorContext(HttpRequest):\n _cached_user: Optional[User]\n decoded_auth_token: Optional[dict[str, Any]]\n allow_replica: bool = True\n dataloaders: dict[str, \"DataLoader\"]\n app: Optional[App]\n user: Optional[User] # type: ignore[assignment]\n requestor: Union[App, User, None]\n request_time: datetime.datetime" }, { "identifier": "DataLoader", "path": "saleor/graphql/core/dataloaders.py", "snippet": "class DataLoader(BaseLoader, Generic[K, R]):\n context_key: str\n context: SaleorContext\n database_connection_name: str\n\n def __new__(cls, context: SaleorContext):\n key = cls.context_key\n if key is None:\n raise TypeError(f\"Data loader {cls} does not define a context key\")\n if not hasattr(context, \"dataloaders\"):\n context.dataloaders = {}\n if key not in context.dataloaders:\n context.dataloaders[key] = super().__new__(cls)\n loader = context.dataloaders[key]\n assert isinstance(loader, cls)\n return loader\n\n def __init__(self, context: SaleorContext) -> None:\n if getattr(self, \"context\", None) != context:\n self.context = context\n self.database_connection_name = get_database_connection_name(context)\n super().__init__()\n\n def batch_load_fn( # pylint: disable=method-hidden\n self, keys: Iterable[K]\n ) -> Promise[list[R]]:\n with opentracing.global_tracer().start_active_span(\n self.__class__.__name__\n ) as scope:\n span = scope.span\n span.set_tag(opentracing.tags.COMPONENT, \"dataloaders\")\n results = self.batch_load(keys)\n if not isinstance(results, Promise):\n return Promise.resolve(results)\n return results\n\n def batch_load(self, keys: Iterable[K]) -> Union[Promise[list[R]], list[R]]:\n raise NotImplementedError()" } ]
from collections import defaultdict from functools import partial, wraps from promise import Promise from ...plugins.manager import PluginsManager, get_plugins_manager from ...plugins.models import EmailTemplate from ..app.dataloaders import get_app_promise from ..core import SaleorContext from ..core.dataloaders import DataLoader
17,359
class EmailTemplatesByPluginConfigurationLoader(DataLoader): """Loads email templates by plugin configuration ID.""" context_key = "email_template_by_plugin_configuration" def batch_load(self, keys): email_templates = EmailTemplate.objects.using( self.database_connection_name ).filter(plugin_configuration_id__in=keys) config_to_template = defaultdict(list) for et in email_templates: config_to_template[et.plugin_configuration_id].append(et) return [config_to_template[key] for key in keys] class PluginManagerByRequestorDataloader(DataLoader): context_key = "plugin_manager_by_requestor" def batch_load(self, keys): allow_replica = getattr(self.context, "allow_replica", True)
class EmailTemplatesByPluginConfigurationLoader(DataLoader): """Loads email templates by plugin configuration ID.""" context_key = "email_template_by_plugin_configuration" def batch_load(self, keys): email_templates = EmailTemplate.objects.using( self.database_connection_name ).filter(plugin_configuration_id__in=keys) config_to_template = defaultdict(list) for et in email_templates: config_to_template[et.plugin_configuration_id].append(et) return [config_to_template[key] for key in keys] class PluginManagerByRequestorDataloader(DataLoader): context_key = "plugin_manager_by_requestor" def batch_load(self, keys): allow_replica = getattr(self.context, "allow_replica", True)
return [get_plugins_manager(lambda: key, allow_replica) for key in keys]
1
2023-11-13 05:00:35+00:00
24k
kampta/asic
train.py
[ { "identifier": "Logger", "path": "commons/logger.py", "snippet": "class Logger(SummaryWriter):\n\n def __init__(self, results_path, log_to_tb=False, log_to_wandb=True):\n super().__init__(results_path)\n self.results_path = results_path\n self.log_to_tb = log_to_tb\n self.log_to_wandb = log_to_wandb\n\n def _log_image_grid(self, images, logging_name, prefix, itr, range=(-1, 1),\n scale_each=False, nrow=None, **kwargs):\n nrow = max(1, int(len(images) ** 0.5+0.5)) if nrow is None else nrow\n if type(images[0]) is torch.Tensor:\n ndarr = images2grid(images, return_as_PIL=True, nrow=nrow,\n normalize=True, value_range=range,\n scale_each=scale_each, **kwargs)\n grid = Image.fromarray(ndarr)\n grid.save(f\"{self.results_path}/{logging_name}_{str(itr).zfill(7)}.png\")\n if self.log_to_wandb:\n wandb.log({logging_name: wandb.Image(grid)}, step=itr)\n else:\n grid = concat_v(*images)\n grid.save(f\"{self.results_path}/{logging_name}_{str(itr).zfill(7)}.png\")\n if self.log_to_wandb:\n wandb.log({logging_name: [wandb.Image(im) for im in images]}, step=itr)\n\n if self.log_to_tb:\n self.add_image(f\"{prefix}/{logging_name}\", ndarr, itr,\n dataformats='HWC')\n\n def log_image_grid(self, images, logging_name, itr, imgs_to_show,\n log_mean_img=True, mean_range=None, range=(-1, 1),\n scale_each=False, num_heads=1, nrow=None, **kwargs):\n self._log_image_grid(images[:imgs_to_show], logging_name, \"grids\", itr,\n range=range, scale_each=scale_each, nrow=nrow, **kwargs)\n if log_mean_img: # Log average images:\n images = images.reshape(images.size(0) // num_heads, num_heads,\n *images.size()[1:])\n self._log_image_grid(images.mean(dim=0), f'mean_{logging_name}',\n \"means\", itr, range=mean_range,\n scale_each=True, nrow=nrow)\n\n def add_scalar(self, tag, scalar_value, global_step=None, **kwargs):\n if self.log_to_wandb:\n wandb.log({tag: scalar_value}, step=global_step)\n return super().add_scalar(tag, scalar_value, global_step, **kwargs)\n\n def add_scalars(self, main_tag, tag_scalar_dict, global_step=None, **kwargs):\n if self.log_to_wandb:\n wandb.log(tag_scalar_dict, step=global_step)\n return super().add_scalars(main_tag, tag_scalar_dict, global_step, **kwargs)" }, { "identifier": "log_visuals", "path": "commons/logger.py", "snippet": "@torch.inference_mode()\ndef log_visuals(canon, stn, dset, train_idx, writer, vis_sample=2,\n vis_denseres=32):\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n pseudo_kps = dset.pseudo_kps\n parts = dset.parts\n vis_sample = min(vis_sample, len(dset))\n res = dset.img_size\n has_gt_kp = dset.kps is not None\n has_fixed_pairs = dset.fixed_pairs is not None # SPair\n\n # Run full test dataloader (assuming small dataset)\n all_imgs = dset.imgs\n all_masks = dset.masks\n all_kps = dset.kps\n all_flows, _ = stn(all_imgs)\n\n if has_gt_kp:\n kps_cols = torch.from_numpy(get_colors(all_kps.size(1))).float()\n kps_cols = map_minmax(kps_cols, 0, 1, -1, 1).to(device).unsqueeze(0)\n\n parts_cols = torch.from_numpy(get_colors(dset.num_parts+1)).float()\n parts_cols = map_minmax(parts_cols, 0, 1, -1, 1).to(device)\n parts_cols[-1] = 0\n\n # Text logging\n text_kp, text_kp_col = load_text_points('CVPR')\n text_kp = text_kp.to(device).unsqueeze(0)\n text_kp_col = text_kp_col.to(device).unsqueeze(0)\n\n pairs = sample_tuples(len(dset), count=vis_sample, seed=0)\n src_idx, trg_idx = pairs[:, 0], pairs[:, 1]\n\n # Log only once during the training\n if train_idx == 0:\n # Log images and the mask\n writer.log_image_grid(all_imgs[:vis_sample], 'img', train_idx,\n vis_sample, nrow=vis_sample)\n writer.log_image_grid(all_imgs[:vis_sample]*all_masks[:vis_sample],\n 'img_mask', train_idx, vis_sample, nrow=vis_sample)\n\n # Log neural best buddies (sparse)\n kp1 = pseudo_kps[src_idx, trg_idx]\n kp2 = pseudo_kps[trg_idx, src_idx]\n kp_vis = kp1[..., -1] * kp2[..., -1]\n kp1, kp2 = kp1[..., :2], kp2[..., :2]\n colors = map_minmax(get_dense_colors(kp1), 0, 1, -1, 1)\n\n blend_src = splat_points(\n all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n blend_trg = splat_points(\n all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n\n writer.log_image_grid(stacked, 'kp_pseudo_gt', train_idx, 2*vis_sample,\n log_mean_img=False, nrow=2)\n\n # Log parts\n parts_img = parts_cols[parts[:vis_sample]].permute(0, 3, 1, 2)\n writer.log_image_grid(parts_img, 'parts', train_idx, vis_sample,\n nrow=vis_sample, log_mean_img=False)\n\n # Log groundtruth kp\n if has_gt_kp:\n kp1, kp2 = all_kps[src_idx], all_kps[trg_idx]\n kp_vis = kp1[..., -1] * kp2[..., -1]\n kp1, kp2 = kp1[..., :2], kp2[..., :2]\n\n colors = kps_cols.expand(vis_sample, -1, -1)\n blend_src = splat_points(\n all_imgs[src_idx], kp1, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n blend_trg = splat_points(\n all_imgs[trg_idx], kp2, sigma=3., opacity=1.0, colors=colors,\n alpha_channel=kp_vis.unsqueeze(-1))\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n writer.log_image_grid(stacked, 'kp_gt', train_idx, 2*vis_sample,\n log_mean_img=False, nrow=2)\n\n # Log kp and top predictions by STN (if kp are available)\n if has_gt_kp:\n kp1 = all_kps[src_idx][..., :2]\n kp_vis = all_kps[src_idx][..., 2]\n\n kp_pred = stn.transfer_points(\n kp1, src_idx, trg_idx, all_flows, mask=all_masks, res=res, is_flow=True)\n colors = kps_cols.expand(vis_sample, -1, -1)\n\n blend_src = splat_points(\n all_imgs[src_idx], kp1, sigma=3., opacity=1.0,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n blend_trg = splat_points(\n all_imgs[trg_idx], kp_pred.float(), sigma=3., opacity=1.0,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n\n stacked = torch.stack([blend_src, blend_trg], dim=1).flatten(0, 1)\n writer.log_image_grid(stacked, 'kp_pred_sparse', train_idx,\n 2*vis_sample, log_mean_img=False, nrow=2)\n\n # Log current canon image\n canon_grid = canon.get_grid(vis_sample)\n if canon_grid.size(1) > 3:\n canon_grid = canon_grid[:, :3]\n scale_factor = res / canon_grid.size(-1)\n canon_grid = F.interpolate(\n canon_grid, scale_factor=scale_factor, mode='bilinear')\n writer.log_image_grid(canon_grid, 'canon', train_idx, 1, log_mean_img=False)\n\n # Log dense correspondences\n kp, kp_vis, kp_col_dense = load_fg_points(all_masks[src_idx],\n resolution=vis_denseres)\n kp_pred, kp_canon = stn.transfer_points(\n kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,\n return_canon=True, is_flow=True)\n colors = map_minmax(kp_col_dense, 0, 1, -1, 1)\n\n blend_src = splat_points(\n all_imgs[src_idx], kp, sigma=4., opacity=0.75,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n\n blend_trg = splat_points(\n all_imgs[trg_idx], kp_pred.float(), sigma=4., opacity=0.75,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n\n blend_canon = splat_points(\n torch.ones_like(canon_grid) * -1, kp_canon, sigma=1.3, opacity=1.0,\n colors=colors, alpha_channel=kp_vis.unsqueeze(-1))\n stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\\\n flatten(0, 1)\n writer.log_image_grid(\n stacked, 'kp_pred_dense', train_idx, 3*vis_sample,\n log_mean_img=False, nrow=3)\n\n # # Log dense correspondences with text\n # text_kp = text_kp.expand(vis_sample, -1, -1)\n # text_kp_col = text_kp_col.expand(vis_sample, -1, -1)\n # kp_pred, kp_canon = stn.transfer_points(\n # text_kp, src_idx, trg_idx, all_flows, mask=all_masks, res=res,\n # return_canon=True, is_flow=True)\n\n # blend_src = splat_points(all_imgs[src_idx], text_kp, sigma=0.7, opacity=1.,\n # colors=text_kp_col)\n\n # blend_trg = splat_points(all_imgs[trg_idx], kp_pred.float(), sigma=0.7,\n # opacity=1., colors=text_kp_col)\n\n # blend_canon = splat_points(torch.ones_like(canon_grid) * -1, kp_canon,\n # sigma=0.7, opacity=1., colors=text_kp_col)\n\n # stacked = torch.stack([blend_src, blend_canon, blend_trg], dim=1).\\\n # flatten(0, 1)\n # writer.log_image_grid(\n # stacked, 'kp_pred_text', train_idx, 3*vis_sample,\n # log_mean_img=False, nrow=3)\n\n # Log dense mapping from canonical space to Image space\n wheel = color_wheel_fast_smooth(res).permute(2, 0, 1).unsqueeze(0).to(device)\n colors = wheel.expand(vis_sample, -1, -1, -1)\n flow, _ = stn(all_imgs[src_idx])\n colors = F.grid_sample(colors, flow, padding_mode='border',\n align_corners=True)\n colors = map_minmax(colors, 0, 1, -1, 1)\n alpha = 0.5\n blend_img = alpha * all_imgs[src_idx] * (1-all_masks[src_idx]) + \\\n (all_imgs[src_idx] * alpha + colors * (1-alpha)) * all_masks[src_idx]\n blend_img = torch.cat([wheel, blend_img, wheel, colors* all_masks[src_idx]])\n writer.log_image_grid(blend_img, 'canon_map', train_idx, len(blend_img),\n log_mean_img=False, nrow=len(blend_img)//2)\n\n # Log keypoints from Image space to canonical space\n if has_gt_kp:\n canon_corrs = stn.transfer_forward(all_flows, all_kps[..., :2], res, is_flow=True)\n canon_corrs = stn.unnormalize(canon_corrs, res, res)\n canon_vis = all_kps[..., -1]\n num_kp = canon_vis.size(-1)\n N = canon_vis.size(0)\n colors = kps_cols.permute(1, 0, 2).expand(-1, N, -1).to(device)\n heatmaps = splat_points(\n torch.ones(num_kp, 3, res, res, device=device) * -1,\n canon_corrs.permute(1, 0, 2), sigma=6., opacity=1.,\n colors=colors, alpha_channel=canon_vis.permute(1, 0).unsqueeze(-1))\n writer.log_image_grid(heatmaps, 'kp_heatmaps', train_idx,\n num_kp, padding=2, pad_value=1.)\n\n # Log parts from Image space to canonical space\n # Splat one part at a time to canonical\n # TODO: splat all at once\n num_parts = dset.num_parts\n part_kp_canons = []\n part_kp_vis = [] \n for part in range(num_parts):\n part_masks = (parts == part).float().unsqueeze(1)\n kp, kp_vis, _ = load_fg_points(part_masks, resolution=vis_denseres)\n kp_canon = stn.transfer_forward(all_flows, kp[..., :2], res, is_flow=True)\n kp_canon = stn.unnormalize(kp_canon, res, res)\n part_kp_canons.append(kp_canon.reshape(-1, 2))\n part_kp_vis.append(kp_vis.reshape(-1))\n\n part_kp_canons = torch.stack(part_kp_canons)\n part_kp_vis = torch.stack(part_kp_vis)\n colors = parts_cols[:-1].unsqueeze(1).expand(-1, part_kp_vis.size(1), -1)\n heatmaps = splat_points(\n torch.ones(num_parts, 3, res, res, device=device) * -1,\n part_kp_canons, sigma=2., opacity=1.,\n colors=colors, alpha_channel=part_kp_vis.unsqueeze(-1))\n writer.log_image_grid(heatmaps, 'part_heatmaps', train_idx,\n num_parts, padding=2, pad_value=1.)\n\n # Compute PCKs\n N = all_imgs.size(0)\n transfer_fn = stn.transfer_points\n pck_pairs = None\n if has_gt_kp:\n # First compute PCK for all 2-pairs\n if has_fixed_pairs:\n tuples = dset.fixed_pairs\n if dset.thresholds is not None:\n thresholds = [torch.from_numpy(dset.thresholds)[tuples[:, 1]]]\n else:\n thresholds = None\n else:\n tuples = sample_tuples(N)\n thresholds = None\n print(f\"First computing 2-point PCK for {len(tuples)} pairs\")\n gt_corrs, pred_corrs, vis = pck_loop(\n tuples, all_kps, transfer_fn, all_flows, all_masks, res,\n return_canon=False, is_flow=True)\n pck_pairs = compute_pck(pred_corrs, gt_corrs, vis, thresholds,\n img_size=res)\n\n # Compute k-cycle PCK\n pck_cycles = []\n if not has_gt_kp:\n kp, kp_vis, kp_col_dense = load_fg_points(all_masks,\n resolution=vis_denseres)\n ignore_idx = kp_vis.sum(dim=0) == 0\n all_kps = torch.cat([kp[:, ~ignore_idx], kp_vis[:, ~ignore_idx].unsqueeze(-1)], dim=2)\n ignore_interim = True\n else:\n ignore_interim = False\n\n for k in [2, 3, 4]:\n tuples = sample_tuples(N, k=k, count=200)\n if has_fixed_pairs and dset.thresholds is not None:\n thresholds = torch.from_numpy(dset.thresholds[tuples[:, 1:]])\n thresholds = thresholds.reshape(-1)\n else:\n thresholds = None\n print(f\"Next computing {k}-cycle PCK for {len(tuples)} tuples\")\n gt_corrs, pred_corrs, vis = pck_loop(\n tuples, all_kps, transfer_fn, all_flows, all_masks, res,\n return_canon=False, is_flow=True, ignore_interim=ignore_interim)\n pck = compute_pck(pred_corrs, gt_corrs, vis, thresholds, img_size=res)\n pck_cycles.append(pck)\n\n return pck_pairs, pck_cycles" }, { "identifier": "get_rank", "path": "commons/distributed.py", "snippet": "def get_rank():\n if not dist.is_available():\n return 0\n\n if not dist.is_initialized():\n return 0\n\n return dist.get_rank()" }, { "identifier": "setup_distributed", "path": "commons/distributed.py", "snippet": "def setup_distributed():\n local_rank = int(os.environ['LOCAL_RANK']) if 'LOCAL_RANK' in os.environ else 0\n n_gpu = int(os.environ['WORLD_SIZE']) if 'WORLD_SIZE' in os.environ else 1\n is_distributed = n_gpu > 1\n if is_distributed:\n torch.cuda.set_device(local_rank)\n dist.init_process_group(backend=\"nccl\", init_method=\"env://\")\n synchronize()\n return is_distributed" }, { "identifier": "reduce_loss_dict", "path": "commons/distributed.py", "snippet": "def reduce_loss_dict(loss_dict):\n world_size = get_world_size()\n\n if world_size < 2:\n return loss_dict\n\n with torch.no_grad():\n keys = []\n losses = []\n\n for k in sorted(loss_dict.keys()):\n keys.append(k)\n losses.append(loss_dict[k])\n\n losses = torch.stack(losses, 0)\n dist.reduce(losses, dst=0)\n\n if dist.get_rank() == 0:\n losses /= world_size\n\n reduced_losses = {k: v for k, v in zip(keys, losses)}\n\n return reduced_losses" }, { "identifier": "get_world_size", "path": "commons/distributed.py", "snippet": "def get_world_size():\n if not dist.is_available():\n return 1\n\n if not dist.is_initialized():\n return 1\n\n return dist.get_world_size()" }, { "identifier": "primary", "path": "commons/distributed.py", "snippet": "def primary():\n if not dist.is_available():\n return True\n\n if not dist.is_initialized():\n return True\n\n return get_rank() == 0" }, { "identifier": "sample_tuples", "path": "commons/utils.py", "snippet": "def sample_tuples(N, k=1, count=None, seed=None):\n\n if seed is not None:\n np.random.seed(seed)\n\n if count is None: # return all possible (k+1) permutations\n # (N!/(N-k)!) x k array\n samples = np.array(list(permutations(range(N), k+1)))\n\n elif k == 1:\n p1 = np.random.choice(N, count)\n p2 = np.random.choice(N, count)\n return np.stack([p1, p2], axis=1)\n\n elif count == -1:\n samples = np.array(list(permutations(range(N), k)))\n samples = np.concatenate([samples, samples[:, 0].reshape(-1, 1)], axis=1)\n\n else: # sample count number of permutations\n # count x k array\n samples = np.zeros((count, k+1), dtype=int)\n for i in range(count):\n samples[i, :k] = np.random.choice(N, k, replace=False)\n # Force the last column to be same as the first column\n samples[:, k] = samples[:, 0]\n\n return samples" }, { "identifier": "CUBDataset", "path": "datasets/cub.py", "snippet": "class CUBDataset(Dataset):\n def __init__(self, data_dir, split='test', img_size=256, cls_idx=1,\n flow_dir=None, num_parts=0,\n mask_threshold=1, use_coseg_masks=False, padding_mode='border'):\n super().__init__()\n self.img_size = img_size\n self.split = split\n self.cls_idx = cls_idx\n self.flow_dir = flow_dir\n self.num_parts = num_parts\n self.mask_threshold = mask_threshold\n self.fixed_pairs = None\n self.thresholds = None\n self.border = True if padding_mode=='border' else False\n\n os.makedirs(data_dir, exist_ok=True)\n download_cub(data_dir)\n download_cub_metadata(data_dir)\n\n self.files, self.bboxes, self.kps, self.masks = load_acsm_data(\n data_dir, size=img_size, split=split, cls_idx=cls_idx)\n\n imgs = []\n for i in range(len(self.files)):\n img = Image.open(self.files[i]).convert('RGB')\n img = cub_crop(img, self.img_size, self.bboxes[i], border=self.border)\n imgs.append(torch.from_numpy(np.array(img)).permute(2, 0, 1))\n self.imgs = torch.stack(imgs) / 127.5 - 1.0 # normalize (-1, 1)\n\n # Load masks\n if flow_dir is not None:\n if use_coseg_masks:\n mask_dir = Path(flow_dir) / 'masks_coseg'\n else:\n mask_dir = Path(flow_dir) / 'masks'\n assert mask_dir.exists(), f\"{mask_dir} doesn't exist\"\n masks = []\n for i in range(0, len(self)):\n fname = mask_dir / f'{Path(self.files[i]).stem}.png'\n mask = np.array(Image.open(fname).convert('L'))\n masks.append(mask)\n self.masks = torch.from_numpy(np.stack(masks) > mask_threshold).float()\n\n self.parts = None\n if flow_dir is not None:\n parts_str = 'parts' if num_parts <=0 else f'parts_num{num_parts}'\n parts_dir = Path(flow_dir) / f'{parts_str}'\n if parts_dir.exists():\n parts = []\n for i in range(0, len(self)):\n fname = parts_dir / f'parts_s2_{Path(self.files[i]).stem}.npy'\n part = np.load(fname)\n parts.append(part)\n parts = np.stack(parts)\n num_parts = int(np.max(parts[~np.isnan(parts)])) + 1\n parts[np.isnan(parts)] = num_parts\n\n self.parts = torch.from_numpy(parts.astype(np.int64))\n else:\n print(f\"{parts_dir} doesn't exist. Parts won't load.\")\n self.num_parts = num_parts\n # self.parts = F.one_hot(parts, num_classes=num_parts+1).bool()\n\n # Load pseudo keypoints\n self.pseudo_kps = None\n if flow_dir is not None:\n nbb_dir = Path(flow_dir) / 'nbb'\n if nbb_dir.exists():\n self.pseudo_kps = load_nbb(nbb_dir, self.files, self.parts)\n max_matches = self.pseudo_kps.shape[2]\n print(f'Max #matches between an image pair: {max_matches}')\n else:\n print(f\"{nbb_dir} doesn't exist. Pseudo kps won't load.\")\n\n\n def __len__(self):\n return len(self.files)" }, { "identifier": "InMemoryDataset", "path": "datasets/in_memory.py", "snippet": "class InMemoryDataset(Dataset):\n def __init__(self, data_dir, img_size=256, flow_dir=None,\n num_parts=0, mask_threshold=1, use_coseg_masks=False,\n every_k=1):\n\n self.img_size = img_size\n self.flow_dir = flow_dir\n self.num_parts = num_parts\n self.mask_threshold = mask_threshold\n\n normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n transform = transforms.Compose([\n transforms.Resize(img_size),\n transforms.CenterCrop(img_size),\n transforms.ToTensor(),\n normalize,\n ])\n\n files = []\n imgs = []\n for base_dir, dirnames, filenames in os.walk(data_dir):\n if len(dirnames) > 0:\n continue\n for f in sorted(filenames):\n if not f.lower().endswith(('.png', '.jpg', '.jpeg')):\n continue\n filename = Path(base_dir) / f\n files.append(filename)\n img = Image.open(filename).convert('RGB')\n imgs.append(transform(img))\n \n self.files = files[::every_k]\n self.imgs = torch.stack(imgs[::every_k])\n\n self.kps = None\n self.fixed_pairs = None\n self.thresholds = None\n self.pseudo_kps = None\n self.parts = None\n\n # Load masks\n if flow_dir is not None:\n if use_coseg_masks:\n mask_dir = Path(flow_dir) / 'masks_coseg'\n else:\n mask_dir = Path(flow_dir) / 'masks'\n assert mask_dir.exists(), f\"{mask_dir} doesn't exist\"\n masks = []\n for i in range(0, len(self)):\n fname = mask_dir / f'{self.files[i].stem}.png'\n mask = np.array(Image.open(fname).convert('L'))\n masks.append(mask)\n self.masks = torch.from_numpy(np.stack(masks) >= mask_threshold).float()\n\n # Load parts\n if flow_dir is not None:\n parts_str = 'parts' if num_parts <=0 else f'parts_num{num_parts}'\n parts_dir = Path(flow_dir) / f'{parts_str}'\n if parts_dir.exists():\n parts = []\n for i in range(0, len(self)):\n fname = parts_dir / f'parts_s2_{self.files[i].stem}.npy'\n part = np.load(fname)\n parts.append(part)\n parts = np.stack(parts)\n num_parts = int(np.max(parts[~np.isnan(parts)])) + 1\n parts[np.isnan(parts)] = num_parts\n\n self.parts = torch.from_numpy(parts.astype(np.int64))\n else:\n print(f\"{parts_dir} doesn't exist. Parts won't load.\")\n self.num_parts = num_parts\n # self.parts = F.one_hot(parts, num_classes=num_parts+1).bool()\n\n # Load pseudo keypoints\n if flow_dir is not None:\n nbb_dir = Path(flow_dir) / 'nbb'\n if nbb_dir.exists():\n self.pseudo_kps = load_nbb(nbb_dir, self.files, self.parts)\n max_matches = self.pseudo_kps.shape[2]\n print(f'Max #matches between an image pair: {max_matches}')\n else:\n print(f\"{nbb_dir} doesn't exist. Pseudo kps won't load.\")\n\n def __len__(self):\n return len(self.files)" }, { "identifier": "SpairDataset", "path": "datasets/spair.py", "snippet": "class SpairDataset(Dataset):\n def __init__(self, data_dir, split='test', img_size=256, spair_cat='cat',\n flow_dir=None, padding_mode='edge', num_parts=0,\n mask_threshold=1, use_coseg_masks=False):\n super().__init__()\n self.img_size = img_size\n self.split = split\n self.cat = spair_cat\n self.padding_mode = padding_mode\n self.flow_dir = flow_dir\n self.num_parts = num_parts\n self.mask_threshold = mask_threshold\n\n normalize = transforms.Normalize(mean=[0.5, 0.5, 0.5],\n std=[0.5, 0.5, 0.5])\n transform = transforms.Compose([\n SquarePad(padding_mode),\n transforms.Resize(img_size),\n transforms.ToTensor(),\n normalize,\n ])\n\n os.makedirs(data_dir, exist_ok=True)\n spair_dir = download_spair(data_dir)\n\n self.files, self.kps, fixed_pairs, thresholds = load_spair_data(\n spair_dir, size=img_size, split=split, category=spair_cat)\n imgs = [transform(Image.open(self.files[i]).convert('RGB'))\n for i in range(len(self))]\n self.imgs = torch.stack(imgs)\n self.fixed_pairs = np.array(fixed_pairs)\n self.thresholds = np.array(thresholds)\n\n self.masks = torch.ones(len(self), 1, img_size, img_size)\n self.pseudo_kps = None\n self.parts = None\n\n # Load masks\n if flow_dir is not None:\n if use_coseg_masks:\n mask_dir = Path(flow_dir) / 'masks_coseg'\n else:\n mask_dir = Path(flow_dir) / 'masks'\n assert mask_dir.exists(), f\"{mask_dir} doesn't exist\"\n masks = []\n for i in range(0, len(self)):\n fname = mask_dir / f'{Path(self.files[i]).stem}.png'\n mask = np.array(Image.open(fname).convert('L'))\n masks.append(mask)\n self.masks = torch.from_numpy(np.stack(masks) >= mask_threshold).float()\n\n # Load parts\n if flow_dir is not None:\n parts_str = 'parts' if num_parts <=0 else f'parts_num{num_parts}'\n parts_dir = Path(flow_dir) / f'{parts_str}'\n if parts_dir.exists():\n parts = []\n for i in range(0, len(self)):\n fname = parts_dir / f'parts_s2_{Path(self.files[i]).stem}.npy'\n part = np.load(fname)\n parts.append(part)\n parts = np.stack(parts)\n num_parts = int(np.max(parts[~np.isnan(parts)])) + 1\n parts[np.isnan(parts)] = num_parts\n\n self.parts = torch.from_numpy(parts.astype(np.int64))\n else:\n print(f\"{parts_dir} doesn't exist. Parts won't load.\")\n self.num_parts = num_parts\n # self.parts = F.one_hot(parts, num_classes=num_parts+1).bool()\n \n # Load pseudo keypoints\n if flow_dir is not None:\n nbb_dir = Path(flow_dir) / 'nbb'\n if nbb_dir.exists():\n self.pseudo_kps = load_nbb(nbb_dir, self.files, self.parts)\n max_matches = self.pseudo_kps.shape[2]\n print(f'Max #matches between an image pair: {max_matches}')\n else:\n print(f\"{nbb_dir} doesn't exist. Pseudo kps won't load.\")\n\n def __len__(self):\n return len(self.files)" }, { "identifier": "Augmentor", "path": "datasets/utils.py", "snippet": "class Augmentor(nn.Module):\n def __init__(self, jitter=[0.4, 0.4, 0.2, 0.1], jitter_prob=0.8,\n gray_prob=0.2, solar_prob=0.2, tps_scale=0.4):\n super().__init__()\n self.color_transform = K.AugmentationSequential(\n # https://github.com/facebookresearch/dino/blob/main/main_dino.py#L424\n K.ColorJitter(brightness=jitter[0], contrast=jitter[1],\n saturation=jitter[2], hue=jitter[3], p=jitter_prob),\n K.RandomGrayscale(p=gray_prob),\n K.RandomGaussianBlur((3, 3), (0.1, 2.0), p=0.1),\n K.RandomSolarize(0.1, 0.1, p=solar_prob),\n )\n\n self.perspective_transform = K.RandomPerspective(0.5, p=1.)\n self.affine_transform = K.RandomAffine(30, scale=(0.7, 1.1),\n padding_mode='border', p=1.0)\n self.elastic_transform = K.RandomElasticTransform(\n p=1.0, sigma=(16., 16.), alpha=(3, 3), padding_mode='border')\n\n # TPS doesn't support transforming points\n # Using it only for dense equivariance loss\n self.tps_transform = K.RandomThinPlateSpline(scale=tps_scale, p=1.)\n\n def forward(self, x):\n pass\n\n @torch.no_grad()\n def forward_color(self, img):\n return self.color_transform(img)\n\n @torch.no_grad()\n def forward_tps(self, img, fixed=False):\n if fixed:\n img_t = self.tps_transform(img, params=self.tps_transform._params)\n else:\n img_t = self.tps_transform(img)\n return img_t\n \n @torch.no_grad()\n def forward_geom(self, img, fixed=False):\n if fixed:\n img_t = self.elastic_transform(\n self.affine_transform(img, params=self.affine_transform._params),\n params=self.elastic_transform._params)\n else:\n img_t = self.elastic_transform(self.affine_transform(img))\n return img_t\n\n\n @torch.no_grad()\n def forward_perspective(self, img, fixed=False):\n if fixed:\n img_t = self.perspective_transform(img, params=self.perspective_transform._params)\n else:\n img_t = self.perspective_transform(img)\n return img_t\n\n @torch.no_grad()\n def forward_perspective_kp(self, kp):\n return kornia.geometry.transform_points(\n self.perspective_transform.transform_matrix, kp)" }, { "identifier": "accumulate", "path": "models/utils.py", "snippet": "def accumulate(model1, model2, decay=0.999):\n par1 = dict(model1.named_parameters())\n par2 = dict(model2.named_parameters())\n\n for k in par1.keys():\n par1[k].data.mul_(decay).add_(par2[k].data, alpha=1 - decay)" }, { "identifier": "requires_grad", "path": "models/utils.py", "snippet": "def requires_grad(model, flag=True):\n for p in model.parameters():\n p.requires_grad = flag" }, { "identifier": "Canonical", "path": "models/canonical.py", "snippet": "class Canonical(nn.Module):\n def __init__(self, size, std=0.1, clamp=True):\n super().__init__()\n mean = torch.zeros(size)\n std = torch.ones(size) * std\n self.grid = nn.Parameter(torch.normal(mean=mean, std=std),\n requires_grad=True)\n norm_class = Normalize()\n norm_class.apply(self.grid)\n if clamp:\n clamp_class = Clamp()\n clamp_class.apply(self.grid)\n\n def get_grid(self, N):\n return self.grid.expand(N, -1, -1, -1)\n\n def unwarp(self, flow, sample_res=256):\n N = flow.size(0)\n if sample_res is not None and sample_res != flow.size(1):\n scale_factor = sample_res / flow.size(1)\n sample_flow = F.interpolate(\n flow.permute(0, 3, 1, 2), scale_factor=scale_factor,\n mode='bilinear').permute(0, 2, 3, 1)\n else:\n sample_flow = flow\n warped_img = F.grid_sample(\n self.get_grid(N), sample_flow,\n padding_mode='border', align_corners=True)\n return warped_img\n\n def forward(self, x):\n return x" }, { "identifier": "CanonicalMLP", "path": "models/canonical.py", "snippet": "class CanonicalMLP(nn.Module):\n def __init__(self, input_dim=2, output_dim=3, hidden_dim=256,\n use_positional=True, positional_dim=10,\n skip_layers=[4, 7], num_layers=8, resolution=256,\n use_tanh=True, apply_softmax=False):\n super().__init__()\n self.use_tanh = use_tanh\n self.resolution = resolution\n self.apply_softmax = apply_softmax\n self.output_dim = output_dim\n if apply_softmax:\n self.softmax= nn.Softmax()\n if use_positional:\n encoding_dimensions = 2 * input_dim * positional_dim\n self.b = nn.Parameter(\n torch.tensor([(2 ** j) * np.pi\n for j in range(positional_dim)], requires_grad = False))\n else:\n encoding_dimensions = input_dim\n\n self.hidden = nn.ModuleList()\n for i in range(num_layers):\n if i == 0:\n input_dims = encoding_dimensions\n elif i in skip_layers:\n input_dims = hidden_dim + encoding_dimensions\n else:\n input_dims = hidden_dim\n\n if i == num_layers - 1:\n # last layer\n self.hidden.append(nn.Linear(input_dims, output_dim, bias=True))\n else:\n self.hidden.append(nn.Linear(input_dims, hidden_dim, bias=True))\n\n self.skip_layers = skip_layers\n self.num_layers = num_layers\n\n self.positional_dim = positional_dim\n self.use_positional = use_positional\n\n def get_grid(self, N, device='cuda'):\n resolution = self.resolution\n indsy = torch.linspace(0, resolution-1, resolution, device=device)\n indsx = torch.linspace(0, resolution-1, resolution, device=device)\n\n # Keep (x, y) indexing to make it consistent with the flow\n points = torch.stack(\n torch.meshgrid(indsx, indsy, indexing='xy'), dim=-1).reshape(-1, 2)\n\n with torch.no_grad():\n grid = self(points)\n\n grid = grid.reshape(1, resolution, resolution, self.output_dim)\n grid = grid.permute(0, 3, 1, 2)\n return grid.expand(N, -1, -1, -1)\n\n def unwarp(self, flow, sample_res=256):\n N = flow.size(0)\n # Output of flow model is usually normalized between -1 and 1\n # So we need to first scale it up to self.resolution\n flow = map_minmax(flow, -1, 1, 0, self.resolution-1)\n\n # Resize flow if computed at a lower resolution\n if sample_res is not None and sample_res != flow.size(1):\n scale_factor = sample_res / flow.size(1)\n sample_flow = F.interpolate(\n flow.permute(0, 3, 1, 2), scale_factor=scale_factor,\n mode='bilinear').permute(0, 2, 3, 1)\n else:\n sample_flow = flow\n\n # Unwarp\n warped_img = self(sample_flow.reshape(-1, 2))\n warped_img = warped_img.reshape(N, sample_res, sample_res, -1)\n warped_img = warped_img.permute(0, 3, 1, 2)\n return warped_img\n\n def forward(self, x):\n if self.use_positional:\n if self.b.device != x.device:\n self.b = self.b.to(x.device)\n pos = positionalEncoding_vec(x, self.b)\n x = pos\n\n input = x.detach().clone()\n for i, layer in enumerate(self.hidden):\n if i > 0:\n x = F.relu(x)\n if i in self.skip_layers:\n x = torch.cat((x, input), 1)\n x = layer(x)\n\n if self.use_tanh:\n x = torch.tanh(x)\n\n if self.apply_softmax:\n x = self.softmax(x)\n return x" }, { "identifier": "Asic", "path": "models/asic.py", "snippet": "class Asic(nn.Module):\n def __init__(self, in_ch, in_size, mf=1., bilinear=False,\n padding_mode='zeros', use_tanh=False):\n super().__init__()\n self.model = UNet(in_ch, 2, mf=mf, bilinear=bilinear)\n self.size = in_size\n self.register_buffer('identity_flow', self.get_identity_flow())\n self.padding_mode = padding_mode\n self.use_tanh = use_tanh\n\n def get_identity_flow(self):\n return F.affine_grid(\n torch.eye(2, 3).unsqueeze(0), (1, 1, self.size, self.size),\n align_corners=True).permute(0, 3, 1, 2).contiguous()\n\n def forward(self, x):\n if self.use_tanh:\n flow = torch.tanh(self.model(x))\n delta_flow = flow - self.identity_flow\n else:\n delta_flow = self.model(x) # (N, 2, H, W)\n flow = self.identity_flow + delta_flow\n\n flow = flow.permute(0, 2, 3, 1)\n delta_flow = delta_flow.permute(0, 2, 3, 1)\n return flow, delta_flow\n\n @torch.no_grad()\n def transfer_points(self, src_kps, src_idx, trg_idx, img, mask=None,\n res=None, return_canon=False, is_flow=False):\n # src_kps are N x P x 2 (in xy format)\n\n # Compute flow from images\n if is_flow:\n flow = img\n else:\n flow, _ = self(img)\n\n # Step 1: Map the points in src to the canonical space\n max_batch_size = 2\n if src_kps.size(0) > max_batch_size:\n N = len(src_kps)\n points_canon = []\n for start_idx in range(0, N, max_batch_size):\n end_idx = min(start_idx+max_batch_size, N)\n\n points_canon_batch = self.transfer_forward(\n flow[src_idx[start_idx:end_idx]],\n src_kps[start_idx:end_idx], res=res, is_flow=True)\n points_canon.append(points_canon_batch)\n points_canon = torch.cat(points_canon, dim=0)\n else:\n points_canon = self.transfer_forward(flow[src_idx], src_kps,\n res=res, is_flow=True)\n # points_canon = torch.clamp(points_canon, min=-1, max=1)\n\n # Step 2: Map the points in the canonical space to trg\n # This is a memory intensive step, so do a single image at a time\n # if the number of points are large\n if src_kps.size(1) > 256 or src_kps.size(0) > max_batch_size:\n N = len(src_kps)\n points_transfered = []\n for start_idx in range(0, N, max_batch_size):\n end_idx = min(start_idx+max_batch_size, N)\n points_transfered_single = self.transfer_reverse(\n flow[[trg_idx[start_idx:end_idx]]],\n points_canon[start_idx:end_idx], res=res,\n mask=mask[trg_idx[start_idx:end_idx]], is_flow=True)\n points_transfered.append(points_transfered_single)\n points_transfered = torch.cat(points_transfered, dim=0)\n else:\n points_transfered = self.transfer_reverse(\n flow[trg_idx], points_canon, res=res, mask=mask[trg_idx],\n is_flow=True)\n\n if return_canon:\n points_canon = self.unnormalize(points_canon, res, res)\n return points_transfered, points_canon\n else:\n return points_transfered\n\n def transfer_forward(self, img, points, res=None, is_flow=False):\n\n # TODO: currently points generated by load_fg_points are not\n # scaled properly. Take a look\n # TODO: Also double check normalize and unnormalize logic\n # points are N x P x 2 (in xy format)\n # assume that the flow is also xy format\n points = self.normalize(points, res, res)\n if is_flow:\n flow = img\n else:\n flow, _ = self(img)\n flow_grid = flow.permute(0, 3, 1, 2)\n points_transfered = F.grid_sample(\n flow_grid, points.unsqueeze(2).float(),\n padding_mode='border', align_corners=True)\n points_transfered = points_transfered.squeeze(3).permute(0, 2, 1)\n\n return points_transfered\n\n def transfer_reverse(self, img, points, res=None, mask=None, is_flow=False):\n N = points.size(0)\n num_points = points.size(1)\n # points are N x P x 2 (in xy format)\n points = points\n if is_flow:\n flow = img\n else:\n flow, _ = self(img)\n if flow.size(1) != res:\n scale_factor = res/flow.size(1)\n flow = F.interpolate(\n flow.permute(0, 3, 1, 2),\n scale_factor=scale_factor,\n mode='bilinear').permute(0, 2, 3, 1)\n # From (N, H, W, 2) to (N, H, W, 1, 1, 2)\n flow_reshaped = flow.unsqueeze(-2).unsqueeze(-2)\n\n # From (N, num_points, 2) to (N, 1, 1, num_points, 2, 1)\n points = points.unsqueeze(1).unsqueeze(1).unsqueeze(-1)\n\n # (N, H, W, num_points)\n similarities = (flow_reshaped @ points)[..., 0, 0]\n distances = points.pow(2).squeeze(-1).sum(dim=-1) + \\\n flow_reshaped.pow(2).sum(dim=-1).squeeze(-1) - 2 * similarities\n\n if mask is not None:\n distances[mask.squeeze(1)<0.1] = float('inf')\n\n nearest_neighbors = distances.reshape(\n N, flow_reshaped.size(1) * flow_reshaped.size(2),\n num_points).argmin(dim=1)\n points_transfered = unravel_index(\n nearest_neighbors, (flow_reshaped.size(1), flow_reshaped.size(2)))\n return points_transfered\n\n @staticmethod\n def normalize(points, res, out_res):\n return points.div(out_res - 1).add(-0.5).mul(2).mul((res - 1) / res)\n\n @staticmethod\n def unnormalize(points, res, out_res):\n return points.div((res - 1) / res).div(2).add(0.5).mul(out_res - 1)" }, { "identifier": "total_variation_loss", "path": "losses/reg_losses.py", "snippet": "def total_variation_loss(delta_flow, reduce_batch=True):\n # flow should be size (N, H, W, 2)\n reduce_dims = (0, 1, 2, 3) if reduce_batch else (1, 2, 3)\n distance_fn = lambda a: torch.where(a <= 1.0, 0.5 * a.pow(2), a - 0.5).mean(dim=reduce_dims)\n # assert delta_flow.size(-1) == 2\n diff_y = distance_fn((delta_flow[:, :-1, :, :] - delta_flow[:, 1:, :, :]).abs())\n diff_x = distance_fn((delta_flow[:, :, :-1, :] - delta_flow[:, :, 1:, :]).abs())\n loss = diff_x + diff_y\n return loss" }, { "identifier": "get_perceptual_loss", "path": "thirdparty/lpips/lpips.py", "snippet": "def get_perceptual_loss(loss_fn, device):\n if loss_fn == 'vgg_ssl':\n download_model('simclr_vgg_phase150') # Download the weights\n loss_fn_vgg = LPIPS(net='vgg', lpips=False, pnet_rand=True, pretrained_weights='pretrained/simclr_vgg_phase150.pt').to(device)\n loss_fn = lambda x,y: loss_fn_vgg(x, y) / 18.0\n elif loss_fn == 'lpips':\n download_lpips() # Download LPIPS weights\n loss_fn = LPIPS(net='vgg').to(device)\n else:\n raise NotImplementedError\n return loss_fn" }, { "identifier": "LossCorrsSparse", "path": "losses/matching_losses.py", "snippet": "class LossCorrsSparse(nn.Module):\n def __init__(self, extractor=None, flow_size=256, T=1.0):\n super().__init__()\n self.extractor = extractor\n self.flow_size = flow_size\n self.T = T\n self.dist_fn = nn.PairwiseDistance(p=2)\n self.loss_fn = nn.CrossEntropyLoss(reduction='none')\n\n def forward(self, src_flow, trg_flow, src_kp, trg_kp, kp_vis, kp_wt):\n N = src_flow.size(0)\n res = src_flow.size(1)\n top_k = kp_vis.shape[1]\n # bb1_canon - N x 2 x top_k x 1\n # bb2_canon - N x 2 x 1 x top_k\n # Sample flow values using the pseudo GT from the flow_grid\n src_kp_canon = F.grid_sample(\n src_flow.permute(0, 3, 1, 2),\n map_minmax(src_kp.unsqueeze(2), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n trg_kp_canon = F.grid_sample(\n trg_flow.permute(0, 3, 1, 2),\n map_minmax(trg_kp.unsqueeze(1), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n\n # dists - N x top_k x top_k\n dists1 = self.dist_fn(src_kp_canon, trg_kp_canon.detach()) * (-1/self.T)\n dists2 = self.dist_fn(src_kp_canon.detach(), trg_kp_canon) * (-1/self.T)\n labels = torch.arange(top_k, dtype=torch.long, device='cuda')\n labels = labels.unsqueeze(0).repeat(N, 1)\n labels[~kp_vis] = -100\n \n loss = self.loss_fn(dists1, labels) + self.loss_fn(dists2, labels)\n loss *= kp_wt\n return loss.sum() / kp_vis.sum()\n\n def forward_eq(self, src_flow, trg_flow, src_kp, trg_kp, kp_vis):\n N = src_flow.size(0)\n res = src_flow.size(1)\n top_k = kp_vis.shape[1]\n # bb1_canon - N x 2 x top_k x 1\n # bb2_canon - N x 2 x 1 x top_k\n # Sample flow values using the pseudo GT from the flow_grid\n src_kp_canon = F.grid_sample(\n src_flow.permute(0, 3, 1, 2),\n map_minmax(src_kp.unsqueeze(2), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n trg_kp_canon = F.grid_sample(\n trg_flow.permute(0, 3, 1, 2),\n map_minmax(trg_kp.unsqueeze(1), 0, res-1, -1, 1), mode='bilinear',\n padding_mode='zeros', align_corners=True).permute(0, 2, 3, 1)\n\n # dists - N x top_k x top_k\n dists1 = self.dist_fn(src_kp_canon, trg_kp_canon.detach()) * (-1/self.T)\n dists2 = self.dist_fn(src_kp_canon.detach(), trg_kp_canon) * (-1/self.T)\n labels = torch.arange(top_k, dtype=torch.long, device='cuda')\n labels = labels.unsqueeze(0).repeat(N, 1)\n labels[~kp_vis] = -100\n return self.loss_fn(dists1, labels).mean() + self.loss_fn(dists2, labels).mean()" }, { "identifier": "DecayingCosineAnnealingWarmRestarts", "path": "thirdparty/gangealing/annealing.py", "snippet": "class DecayingCosineAnnealingWarmRestarts(_LRScheduler):\n r\"\"\"Set the learning rate of each parameter group using a cosine annealing\n schedule, where :math:`\\eta_{max}` is set to the initial lr,\n :math:`T_{cur}` is the number of epochs since the last restart and\n :math:`T_{i}` is the number of epochs between two warm restarts in SGDR:\n .. math::\n \\eta_t = \\eta_{min} + \\frac{1}{2}(\\eta_{max} - \\eta_{min})\\left(1 +\n \\cos\\left(\\frac{T_{cur}}{T_{i}}\\pi\\right)\\right)\n When :math:`T_{cur}=T_{i}`, set :math:`\\eta_t = \\eta_{min}`.\n When :math:`T_{cur}=0` after restart, set :math:`\\eta_t=\\eta_{max}`.\n It has been proposed in\n `SGDR: Stochastic Gradient Descent with Warm Restarts`_.\n Args:\n optimizer (Optimizer): Wrapped optimizer.\n T_0 (int): Number of iterations for the first restart.\n T_mult (int, optional): A factor increases :math:`T_{i}` after a\n restart. Default: 1.\n eta_min (float, optional): Minimum learning rate. Default: 0.\n last_epoch (int, optional): The index of last epoch. Default: -1.\n .. _SGDR\\: Stochastic Gradient Descent with Warm Restarts:\n https://arxiv.org/abs/1608.03983\n \"\"\"\n\n def __init__(self, optimizer, T_0, decay=0.9, T_mult=1, eta_min=0,\n last_epoch=-1):\n if T_0 <= 0 or not isinstance(T_0, int):\n raise ValueError(f\"Expected positive integer T_0, but got {T_0}\")\n if T_mult < 1 or not isinstance(T_mult, int):\n raise ValueError(f\"Expected integer T_mult >= 1, but got {T_mult}\")\n self.T_0 = T_0\n self.T_i = T_0\n self.T_mult = T_mult\n self.eta_min = eta_min\n self.decay = decay\n self.cur_decay = 1.0\n\n super(DecayingCosineAnnealingWarmRestarts, self).__init__(optimizer,\n last_epoch)\n\n self.T_cur = self.last_epoch\n\n def get_lr(self):\n if not self._get_lr_called_within_step:\n warnings.warn(\"To get the last learning rate computed by the \"\n \"scheduler, use `get_last_lr()`.\", UserWarning)\n\n return [self.cur_decay * (self.eta_min + (base_lr - self.eta_min) *\n (1 + math.cos(math.pi * self.T_cur / self.T_i)) / 2)\n for base_lr in self.base_lrs]\n\n def step(self, epoch=None):\n \"\"\"Step could be called after every batch update\"\"\"\n\n if epoch is None and self.last_epoch < 0:\n epoch = 0\n\n if epoch is None:\n epoch = self.last_epoch + 1\n self.T_cur = self.T_cur + 1\n if self.T_cur >= self.T_i:\n self.T_cur = self.T_cur - self.T_i\n self.T_i = self.T_i * self.T_mult\n else:\n if epoch < 0:\n raise ValueError(f\"Expected non-negative epoch, got {epoch}\")\n if epoch >= self.T_0:\n if self.T_mult == 1:\n self.T_cur = epoch % self.T_0\n n = int(epoch // self.T_0)\n else:\n n = int(math.log((epoch / self.T_0 * (self.T_mult - 1)\n + 1), self.T_mult))\n self.T_cur = epoch - self.T_0 * (self.T_mult ** n - 1) / \\\n (self.T_mult - 1)\n self.T_i = self.T_0 * self.T_mult ** (n)\n else:\n self.T_i = self.T_0\n self.T_cur = epoch\n n = 0\n self.cur_decay = self.decay ** n\n self.last_epoch = math.floor(epoch)\n\n class _enable_get_lr_call:\n\n def __init__(self, o):\n self.o = o\n\n def __enter__(self):\n self.o._get_lr_called_within_step = True\n return self\n\n def __exit__(self, type, value, traceback):\n self.o._get_lr_called_within_step = False\n return self\n\n with _enable_get_lr_call(self):\n for param_group, lr in zip(self.optimizer.param_groups,\n self.get_lr()):\n param_group['lr'] = lr\n\n self._last_lr = [group['lr'] for group in self.optimizer.param_groups]" }, { "identifier": "lr_cycle_iters", "path": "thirdparty/gangealing/annealing.py", "snippet": "def lr_cycle_iters(anneal_psi, period, iter, tm):\n zero_lr_iters = [anneal_psi - 1]\n num_cycles = int(math.log((iter - anneal_psi) / period, tm))\n for n in range(num_cycles):\n step = zero_lr_iters[-1] + period * tm ** n\n zero_lr_iters.append(int(step))\n print(f'Learning Rate Cycles: {zero_lr_iters}')\n return zero_lr_iters" } ]
import argparse import torch import numpy as np import json import os import torch.nn.functional as F import wandb from torch import nn, optim from tqdm import tqdm from pathlib import Path from commons.logger import Logger, log_visuals from commons.distributed import get_rank, setup_distributed, reduce_loss_dict,\ get_world_size, primary from commons.utils import sample_tuples from datasets.cub import CUBDataset from datasets.in_memory import InMemoryDataset from datasets.spair import SpairDataset from datasets.utils import Augmentor from models.utils import accumulate, requires_grad from models.canonical import Canonical, CanonicalMLP from models.asic import Asic from losses.reg_losses import total_variation_loss from thirdparty.lpips.lpips import get_perceptual_loss from losses.matching_losses import LossCorrsSparse from thirdparty.gangealing.annealing import DecayingCosineAnnealingWarmRestarts,\ lr_cycle_iters
17,457
N = args.batch pairs = sample_tuples(len(train_dset), count=N // 2) src_idx, trg_idx = pairs[:, 0], pairs[:, 1] all_idx = np.concatenate([src_idx, trg_idx]) batch_imgs = all_imgs[all_idx] batch_parts = all_parts[all_idx] if args.use_nbb_parts: batch_masks = (batch_parts != num_parts).unsqueeze(1).float() batch_masks_resized = resize_fn(batch_masks) else: batch_masks = all_masks[all_idx] batch_masks_resized = resize_fn(batch_masks) kp1 = pseudo_kps[src_idx, trg_idx][:, :loss_topk] # (N/2, K, 4) kp2 = pseudo_kps[trg_idx, src_idx][:, :loss_topk] # (N/2, K, 4) batch_kps_vis = kp1[..., 2] > 0 # (N/2, K) batch_kps_wt = torch.ones_like(batch_kps_vis).float() # (N/2, K) batch_kps = torch.cat([kp1, kp2])[..., :2] # (N, K, 2) if args.use_nbb_parts: nbb_parts_vis = (kp1[..., 3] != args.num_parts) * (kp2[..., 3] != args.num_parts) batch_kps_wt *= nbb_parts_vis # Map the images to the canonical space flow, delta_flow = stn(batch_imgs) unwarped = canon.unwarp(flow, args.unwarp_size) # NBB weight if args.nbb_weight > 0.: nbb_loss = nbb_loss_fn(flow[:N//2], flow[N//2:], batch_kps[:N//2], batch_kps[N//2:], batch_kps_vis, batch_kps_wt) if args.equi_weight > 0.: # Apply tps transformations if args.disable_tps: batch_imgs_t = aug.forward_geom(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_geom(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_geom(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) else: batch_imgs_t = aug.forward_tps(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_tps(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_tps(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) batch_masks_t = torch.where(batch_masks_t > 0.5, 1., 0.) batch_masks_t_resized = resize_fn(batch_masks_t) vis = batch_masks_t * batch_masks # Flow of tps image flow_ft, _ = stn(batch_imgs_t) unwarped_ft = canon.unwarp(flow_ft, args.unwarp_size) equi_loss = F.l1_loss(flow_ft, flow_tf.detach(), reduction='none') \ + F.l1_loss(flow_tf, flow_ft.detach(), reduction='none') equi_loss = (equi_loss * vis.squeeze(1).unsqueeze(-1)).mean() if args.mask_weight > 0: unwarped_mask = unwarped[:, [3]] mask_loss = F.binary_cross_entropy_with_logits(unwarped_mask, batch_masks_resized) if args.equi_weight > 0.: unwarped_ft_mask = unwarped_ft[:, [3]] mask_loss = 0.5 * mask_loss + \ 0.5 * F.binary_cross_entropy_with_logits( unwarped_ft_mask, batch_masks_t_resized) # Get Total Variation Loss on flow if args.flow_tv_weight > 0: flow_tv_loss = total_variation_loss(delta_flow) # Reconstruction loss if args.rec_weight > 0: unwarped = unwarped * batch_masks_resized resized_img = resize_fn(batch_imgs) * batch_masks_resized rec_loss = loss_fn(unwarped[:, :3], resized_img).mean() if args.equi_weight > 0.: unwarped_ft = unwarped_ft * batch_masks_t_resized resized_img = resize_fn(batch_imgs_t) * batch_masks_t_resized rec_loss = 0.5*rec_loss + 0.5 * loss_fn(unwarped_ft[:, :3], resized_img).mean() # Parts Loss if args.parts_weight > 0.: # Calculate the centroid of each part part_centroids = torch.zeros(num_parts+1, 2, dtype=torch.float, device=device) part_centroids.index_add_(0, batch_parts.reshape(-1), flow.reshape(-1, 2)) part_counts = torch.bincount(batch_parts.reshape(-1)).float() part_centroids = (part_centroids/part_counts.unsqueeze(-1)).detach() # Compute the loss as the distance of the centroid from the flows parts_loss = F.l1_loss(flow, part_centroids[batch_parts], reduction='none') parts_loss = (parts_loss * batch_masks.squeeze(1).unsqueeze(-1)).mean() loss_dict = {"p": rec_loss, "ftv": flow_tv_loss, "nbb": nbb_loss, "equi": equi_loss, "mask": mask_loss, 'parts': parts_loss} canon.zero_grad() stn.zero_grad() full_stn_loss = args.rec_weight * rec_loss + \ args.flow_tv_weight * flow_tv_loss + \ args.nbb_weight * nbb_loss + args.equi_weight * equi_loss + \ args.mask_weight * mask_loss + args.parts_weight * parts_loss full_stn_loss.backward() t_optim.step() epoch = max(0, i / args.period) t_sched.step(epoch) if args.canon_lr > 0: canon_optim.step() canon_sched.step(epoch) if args.stn_ema:
def save_state_dict(ckpt_name, c_module, t_module, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, args, step, add_step_to_name=False): ckpt_dict = { "canon": c_module.state_dict(), "t": t_module.state_dict(), "c_ema": c_ema.state_dict(), "t_ema": t_ema.state_dict(), "t_optim": t_optim.state_dict(), "t_sched": t_sched.state_dict(), "canon_optim": canon_optim.state_dict() if canon_optim is not None else None, "canon_sched": canon_sched.state_dict() if canon_sched is not None else None, "args": args, "iter": step } torch.save(ckpt_dict, f'{results_path}/{ckpt_name}.pt') if add_step_to_name: torch.save(ckpt_dict, f'{results_path}/{ckpt_name}_{step:07d}.pt') def count_parameters(model): return sum(p.numel() for p in model.parameters() if p.requires_grad) def base_training_argparse(): parser = argparse.ArgumentParser(description="Training") # Main training arguments: parser.add_argument("--exp-name", type=str, required=True, help="Name for experiment run (used for logging)") parser.add_argument("--results", type=str, default='logs', help='path to the results directory') parser.add_argument("--seed", default=0, type=int, help='Random seed for this experiment') parser.add_argument("--dset", type=str, default='cub', choices=["cub", "spair"]) parser.add_argument("--img_dir", type=str, required=True, help="Path to real data") parser.add_argument("--flow_dir", type=str, default='processed_data', help="Path to preprocessed flows") parser.add_argument("--mask_threshold", type=int, default=1, help="Threshold for masking") parser.add_argument("--mask_bbox_pad", type=int, default=4, help="Crop with some padding") parser.add_argument("--img_size", default=256, type=int, help='resolution of real images') parser.add_argument("--iter", type=int, default=20000, help="total training iterations") parser.add_argument("--batch", type=int, default=20, help="batch size per-GPU") parser.add_argument("--num_workers", type=int, default=2, help="num workers for dataloader") # Dataset hyperparameters: parser.add_argument("--cub_idx", type=int, default=1, help="cub category") parser.add_argument("--split", default='test', choices=['test', 'val'], help='splits for training and validation') parser.add_argument("--use_coseg_masks", action='store_true') parser.add_argument("--num_parts", default=4, type=int) parser.add_argument("--spair_cat", default='cat', help="cub category") # Loss hyperparameters: parser.add_argument("--loss_fn", type=str, default='vgg_ssl', choices=['lpips', 'vgg_ssl'], help="The perceptual loss to use.") parser.add_argument("--rec_weight", type=float, default=1., help='weight for reconstruction loss') parser.add_argument("--nbb_weight", type=float, default=30., help='weight for nbb loss') parser.add_argument("--flow_tv_weight", default=15000.0, type=float, help="""Loss weighting of the Total Variation smoothness regularizer on the residual flow""") parser.add_argument("--equi_weight", default=1.0, type=float, help='Loss weighting for equivariance') parser.add_argument("--sparse_topk", type=int, default=None, help='number of sparse correspondences for loss') parser.add_argument("--sparse_temp", type=float, default=1, help='temperature for sparse loss') parser.add_argument("--mask_weight", default=0.1, type=float, help="""Loss weighting of the mask""") parser.add_argument("--parts_weight", default=10.0, type=float, help="""Loss weighting of the Parts Mask""") parser.add_argument("--use_nbb_parts", action='store_true') # Augmentation hyperparameters parser.add_argument("--jitter", default=[0.4, 0.4, 0.2, 0.1], type=float, nargs='+', help='augmentation mode') parser.add_argument("--jitter_prob", default=0.8, type=float) parser.add_argument("--gray_prob", default=0.2, type=float) parser.add_argument("--solar_prob", default=0.2, type=float) parser.add_argument("--tps_scale", default=0.4, type=float) # Canonical space parser.add_argument("--unwarp_size", type=int, default=128, help="resolution for unwarping") # Learned Grid hyperparameters parser.add_argument("--canon_size", type=int, default=256, help="resolution of canonical space") parser.add_argument("--clamp", action='store_true', help="clamp values of canonical space (-1, 1)") # MLP Hyperparams parser.add_argument("--use_mlp", action='store_true') parser.add_argument("--mlp_hidden_dim", type=int, default=256, help="number of hidden units per layer") parser.add_argument("--mlp_num_layers", type=int, default=8, help="number of layers") parser.add_argument("--mlp_skip_layers", type=int, nargs='+', default=[4, 7], help="skip layers") # Model hyperparameters: parser.add_argument("--canon_lr", type=float, default=0.003, help="base learning rate of canonical space") parser.add_argument("--canon_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_ema", action='store_true', help='Enable ema for canonical space') parser.add_argument("--stn_lr", type=float, default=0.003, help="base learning rate of SpatialTransformer") parser.add_argument("--flow_ssl", action='store_true', help="""If specified, apply STN on SSL features)""") parser.add_argument("--channel_multiplier", default=0.5, type=float, help='channel multiplier for smaller models') parser.add_argument("--bilinear", action='store_true', help='Apply bilinear upsample/downsample') parser.add_argument("--padding_mode", default='border', choices=['border', 'zeros', 'reflection'], type=str, help="""Padding algorithm for when the STN samples beyond image boundaries""") parser.add_argument("--use_tanh", action='store_true', help='Use tanh activation at the flow output') parser.add_argument("--disable_tps", action='store_true', help='disable tps transformations') # Backbone parameters parser.add_argument("--bb", default='dino_vits8', choices=['dino_vits8', 'dino_vits16', 'dino_vitb8', 'dino_vitb16', 'vit_small_patch8_224', 'vit_small_patch16_224', 'vit_base_patch16_224'], help='backbone models') parser.add_argument('--bb_stride', default=2, type=int, help="stride.") # Visualization hyperparameters: parser.add_argument("--vis_every", type=int, default=500, help="""frequency with which visualizations are generated during training""") parser.add_argument("--vis_denseres", type=int, default=32, help='number of sparse correspondences to visualize') parser.add_argument("--ckpt_every", type=int, default=10000, help='frequency of checkpointing during training') parser.add_argument("--log_every", default=25, type=int, help='How frequently to log data to TensorBoard') parser.add_argument("--n_sample", type=int, default=4, help="""number of images (real and fake) to generate visuals for""") parser.add_argument("--disable_wandb", action='store_true', help='Disable wandb for debugging') # Learning Rate scheduler hyperparameters: parser.add_argument("--period", default=10000, type=float, help="""Period for cosine learning rate scheduler (measured in gradient steps)""") parser.add_argument("--decay", default=0.9, type=float, help="""Decay factor for the cosine learning rate scheduler""") parser.add_argument("--tm", default=2, type=int, help="""Period multiplier for the cosine learning rate scheduler""") return parser def train(args, train_dset, canon, stn, c_ema, t_ema, canon_optim, canon_sched, t_optim, t_sched, loss_fn, nbb_loss_fn, device, writer): # Record modules to make saving checkpoints easier: if args.distributed: t_module = stn.module c_module = canon.module else: t_module = stn c_module = canon # Initialize Spatial Transformation Generator (Thin Plate Spline) aug = Augmentor(jitter=args.jitter, jitter_prob=args.jitter_prob, gray_prob=args.gray_prob, solar_prob=args.solar_prob, tps_scale=args.tps_scale).to(device) # A model checkpoint will be saved whenever the learning rate is zero: zero_lr_iters = lr_cycle_iters(0, args.period, args.iter, args.tm) early_ckpt_iters = set(zero_lr_iters) early_vis_iters = {100} early_vis_iters.update(early_ckpt_iters) # Initialize various training variables and constants: rec_loss = torch.tensor(0.0, device='cuda') flow_tv_loss = torch.tensor(0.0, device='cuda') nbb_loss = torch.tensor(0.0, device='cuda') equi_loss = torch.tensor(0.0, device='cuda') mask_loss = torch.tensor(0.0, device='cuda') parts_loss = torch.tensor(0.0, device='cuda') accum = 0.5 ** (32 / (10 * 1000)) # Resize function for perceptual loss if args.unwarp_size != args.img_size: scale_factor = args.unwarp_size / args.img_size resize_fn = nn.Upsample(scale_factor=scale_factor, mode='bilinear', align_corners=True) else: resize_fn = nn.Identity() # Pre-load on GPU # Assuming ~30 images of size 256x256, takes up ~23 MB device memory has_gt_kp = train_dset.kps is not None all_imgs = train_dset.imgs = train_dset.imgs.to(device) # / 127.5 - 1.0 all_masks = train_dset.masks = train_dset.masks.unsqueeze(1).to(device) all_parts = train_dset.parts = train_dset.parts.to(device) if has_gt_kp: all_kps = train_dset.kps = train_dset.kps.to(device) # Pseudo GT pseudo_kps = train_dset.pseudo_kps = torch.from_numpy(train_dset.pseudo_kps).to(device) num_parts = train_dset.num_parts loss_topk = pseudo_kps.shape[2] if args.sparse_topk is None else min(args.sparse_topk, pseudo_kps.shape[2]) # Progress bar for monitoring training: pbar = range(args.start_iter, args.iter) if primary(): pbar = tqdm(pbar, initial=args.start_iter, dynamic_ncols=True, smoothing=0.2) pck_pairs, pck_cycles = log_visuals( c_ema, t_ema, train_dset, 0, writer, vis_sample=args.n_sample, vis_denseres=args.vis_denseres) best_pck_pairs = pck_pairs best_pck_cycles = pck_cycles requires_grad(stn, True) requires_grad(canon, True) for idx in pbar: # main training loop i = idx + args.start_iter + 1 #################################### # TRAIN STN and CANON # #################################### N = args.batch pairs = sample_tuples(len(train_dset), count=N // 2) src_idx, trg_idx = pairs[:, 0], pairs[:, 1] all_idx = np.concatenate([src_idx, trg_idx]) batch_imgs = all_imgs[all_idx] batch_parts = all_parts[all_idx] if args.use_nbb_parts: batch_masks = (batch_parts != num_parts).unsqueeze(1).float() batch_masks_resized = resize_fn(batch_masks) else: batch_masks = all_masks[all_idx] batch_masks_resized = resize_fn(batch_masks) kp1 = pseudo_kps[src_idx, trg_idx][:, :loss_topk] # (N/2, K, 4) kp2 = pseudo_kps[trg_idx, src_idx][:, :loss_topk] # (N/2, K, 4) batch_kps_vis = kp1[..., 2] > 0 # (N/2, K) batch_kps_wt = torch.ones_like(batch_kps_vis).float() # (N/2, K) batch_kps = torch.cat([kp1, kp2])[..., :2] # (N, K, 2) if args.use_nbb_parts: nbb_parts_vis = (kp1[..., 3] != args.num_parts) * (kp2[..., 3] != args.num_parts) batch_kps_wt *= nbb_parts_vis # Map the images to the canonical space flow, delta_flow = stn(batch_imgs) unwarped = canon.unwarp(flow, args.unwarp_size) # NBB weight if args.nbb_weight > 0.: nbb_loss = nbb_loss_fn(flow[:N//2], flow[N//2:], batch_kps[:N//2], batch_kps[N//2:], batch_kps_vis, batch_kps_wt) if args.equi_weight > 0.: # Apply tps transformations if args.disable_tps: batch_imgs_t = aug.forward_geom(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_geom(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_geom(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) else: batch_imgs_t = aug.forward_tps(aug.forward_color(batch_imgs)) batch_masks_t = aug.forward_tps(batch_masks, fixed=True) # Apply tps to flow flow_tf = aug.forward_tps(flow.permute(0, 3, 1, 2), fixed=True).permute(0, 2, 3, 1) batch_masks_t = torch.where(batch_masks_t > 0.5, 1., 0.) batch_masks_t_resized = resize_fn(batch_masks_t) vis = batch_masks_t * batch_masks # Flow of tps image flow_ft, _ = stn(batch_imgs_t) unwarped_ft = canon.unwarp(flow_ft, args.unwarp_size) equi_loss = F.l1_loss(flow_ft, flow_tf.detach(), reduction='none') \ + F.l1_loss(flow_tf, flow_ft.detach(), reduction='none') equi_loss = (equi_loss * vis.squeeze(1).unsqueeze(-1)).mean() if args.mask_weight > 0: unwarped_mask = unwarped[:, [3]] mask_loss = F.binary_cross_entropy_with_logits(unwarped_mask, batch_masks_resized) if args.equi_weight > 0.: unwarped_ft_mask = unwarped_ft[:, [3]] mask_loss = 0.5 * mask_loss + \ 0.5 * F.binary_cross_entropy_with_logits( unwarped_ft_mask, batch_masks_t_resized) # Get Total Variation Loss on flow if args.flow_tv_weight > 0: flow_tv_loss = total_variation_loss(delta_flow) # Reconstruction loss if args.rec_weight > 0: unwarped = unwarped * batch_masks_resized resized_img = resize_fn(batch_imgs) * batch_masks_resized rec_loss = loss_fn(unwarped[:, :3], resized_img).mean() if args.equi_weight > 0.: unwarped_ft = unwarped_ft * batch_masks_t_resized resized_img = resize_fn(batch_imgs_t) * batch_masks_t_resized rec_loss = 0.5*rec_loss + 0.5 * loss_fn(unwarped_ft[:, :3], resized_img).mean() # Parts Loss if args.parts_weight > 0.: # Calculate the centroid of each part part_centroids = torch.zeros(num_parts+1, 2, dtype=torch.float, device=device) part_centroids.index_add_(0, batch_parts.reshape(-1), flow.reshape(-1, 2)) part_counts = torch.bincount(batch_parts.reshape(-1)).float() part_centroids = (part_centroids/part_counts.unsqueeze(-1)).detach() # Compute the loss as the distance of the centroid from the flows parts_loss = F.l1_loss(flow, part_centroids[batch_parts], reduction='none') parts_loss = (parts_loss * batch_masks.squeeze(1).unsqueeze(-1)).mean() loss_dict = {"p": rec_loss, "ftv": flow_tv_loss, "nbb": nbb_loss, "equi": equi_loss, "mask": mask_loss, 'parts': parts_loss} canon.zero_grad() stn.zero_grad() full_stn_loss = args.rec_weight * rec_loss + \ args.flow_tv_weight * flow_tv_loss + \ args.nbb_weight * nbb_loss + args.equi_weight * equi_loss + \ args.mask_weight * mask_loss + args.parts_weight * parts_loss full_stn_loss.backward() t_optim.step() epoch = max(0, i / args.period) t_sched.step(epoch) if args.canon_lr > 0: canon_optim.step() canon_sched.step(epoch) if args.stn_ema:
accumulate(t_ema, t_module, accum)
12
2023-11-14 16:43:16+00:00
24k
tyang816/ProtSSN
src/data.py
[ { "identifier": "CathDataset", "path": "src/dataset/cath_dataset.py", "snippet": "class CathDataset(InMemoryDataset):\n r\"\"\"\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string): The name of the dataset.\n raw_dir (string, optional): Root directory where the\n original dataset stored(default: :obj:`None`)\n\n num_residue_type (int, optional): The number of amino acid types.\n (default: obj:'20')\n micro_radius (int, optional): The radius of micro-environment\n centered on the mask node. (default: obj:'20')\n c_alpha_max_neighbors (int, optional): The number of maximum\n connected nodes. (default: obj:'10')\n cutoff (int, optional): The maximum connected nodes distance\n (default: obj:'30')\n seq_dist_cut (int, optional): one-hot encoding the sequence distance\n edge attribute\n (default: obj:)\n [0.25,0.5,0.75,0.9,0.95,0.98,0.99]\n [ 2. 3. 13. 63. 127. 247. 347.]\n num_val (int, optional): The number of validation samples in case of \"random\" split. (default: 500)\n num_test (int, optional): The number of test samples in case of \"random\" split. (default: 1000)\n\n # use_localdatastet (bool) (bool,optional): If :obj:'True', online dataset\n # will be downloaded. If not, local pdb files will be used\n # (default: obj:'True')\n\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n \"\"\"\n\n splits = ['train', 'val', 'test']\n allowable_features = {\n 'possible_atomic_num_list': list(range(1, 119)) + ['misc'],\n 'possible_chirality_list': [\n 'CHI_UNSPECIFIED',\n 'CHI_TETRAHEDRAL_CW',\n 'CHI_TETRAHEDRAL_CCW',\n 'CHI_OTHER'\n ],\n 'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],\n 'possible_numring_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],\n 'possible_implicit_valence_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],\n 'possible_formal_charge_list': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 'misc'],\n 'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],\n 'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'],\n 'possible_hybridization_list': [\n 'SP', 'SP2', 'SP3', 'SP3D', 'SP3D2', 'misc'\n ],\n 'possible_is_aromatic_list': [False, True],\n 'possible_is_in_ring3_list': [False, True],\n 'possible_is_in_ring4_list': [False, True],\n 'possible_is_in_ring5_list': [False, True],\n 'possible_is_in_ring6_list': [False, True],\n 'possible_is_in_ring7_list': [False, True],\n 'possible_is_in_ring8_list': [False, True],\n 'possible_amino_acids': ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS',\n 'MET',\n 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL', 'HIP', 'HIE', 'TPO', 'HID', 'LEV',\n 'MEU',\n 'PTR', 'GLV', 'CYT', 'SEP', 'HIZ', 'CYM', 'GLM', 'ASQ', 'TYS', 'CYX', 'GLZ', 'misc'],\n 'possible_atom_type_2': ['C*', 'CA', 'CB', 'CD', 'CE', 'CG', 'CH', 'CZ', 'N*', 'ND', 'NE', 'NH', 'NZ', 'O*',\n 'OD',\n 'OE', 'OG', 'OH', 'OX', 'S*', 'SD', 'SG', 'misc'],\n 'possible_atom_type_3': ['C', 'CA', 'CB', 'CD', 'CD1', 'CD2', 'CE', 'CE1', 'CE2', 'CE3', 'CG', 'CG1', 'CG2',\n 'CH2',\n 'CZ', 'CZ2', 'CZ3', 'N', 'ND1', 'ND2', 'NE', 'NE1', 'NE2', 'NH1', 'NH2', 'NZ', 'O',\n 'OD1',\n 'OD2', 'OE1', 'OE2', 'OG', 'OG1', 'OH', 'OXT', 'SD', 'SG', 'misc'],\n }\n\n def __init__(self, root: str,\n split: str = 'train',\n num_residue_type: int = 20,\n micro_radius: int = 20,\n c_alpha_max_neighbors: int = 10,\n cutoff: int = 30,\n seq_dist_cut: int = 64,\n use_micro: bool = False,\n use_angle: bool = False,\n use_omega: bool = False,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None,\n pre_filter: Optional[Callable] = None,\n divide_num: int = 1,\n divide_idx: int = 0,\n set_length: int = 500,\n num_val: int = 10,\n is_normalize: bool = True,\n normalize_file: str = None,\n p: float = 0.5,\n use_sasa: bool =False,\n use_bfactor: bool = False,\n use_dihedral: bool = False,\n use_coordinate: bool = False,\n use_denoise: bool = False,\n noise_type: str = 'wild',\n temperature = 1.0\n ):\n self.p=p\n self.use_sasa=use_sasa\n self.use_bfactor=use_bfactor\n self.use_dihedral=use_dihedral\n self.use_coordinate=use_coordinate\n self.use_denoise=use_denoise\n self.noise_type = noise_type\n self.temperature = temperature\n \n self.split = split\n assert self.split in self.splits\n\n self.num_residue_type = num_residue_type\n self.micro_radius = micro_radius\n self.c_alpha_max_neighbors = c_alpha_max_neighbors\n self.seq_dist_cut = seq_dist_cut\n self.use_micro = use_micro\n self.use_angle = use_angle\n self.use_omega = use_omega\n self.cutoff = cutoff\n\n self.num_val = num_val\n self.divide_num = divide_num\n self.divide_idx = divide_idx\n self.set_length = set_length\n\n self.is_normalize = is_normalize\n self.normalize_file = normalize_file\n\n self.wrong_proteins = ['1kp0A01', '2atcA02']\n\n self.sr = ShrakeRupley(probe_radius=1.4, # in A. Default is 1.40 roughly the radius of a water molecule.\n n_points=100) # resolution of the surface of each atom. Default is 100. A higher number of points results in more precise measurements, but slows down the calculation.\n self.periodic_table = GetPeriodicTable()\n self.biopython_parser = PDBParser()\n\n super().__init__(root, transform, pre_transform, pre_filter)\n self.dataset = torch.load(self.processed_paths[self.splits.index(self.split)])\n # self.data, self.slices = torch.load(\n # self.processed_paths[self.splits.index(self.split)])\n # self.nums_amino_cum = self.slices['x']\n\n @property\n def raw_file_names(self) -> str:\n raw_file_names = os.path.join('data', 'cath', \"dompdb\")\n if not os.path.exists(raw_file_names):\n os.mkdir(raw_file_names)\n return raw_file_names\n\n @property\n def raw_dir(self) -> str:\n if not os.path.exists(self.root):\n os.mkdir(self.root)\n raw_dir = os.path.join(self.root, 'raw')\n if not os.path.exists(raw_dir):\n os.mkdir(raw_dir)\n return raw_dir\n\n @property\n def saved_graph_dir(self) -> str:\n dir_root = os.path.join(self.root)\n if not os.path.exists(dir_root):\n os.mkdir(dir_root)\n dir_name = os.path.join(dir_root, 'graph_seq')\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n if not self.set_length:\n self.set_length = len(os.listdir(dir_name))\n return dir_name\n\n @property\n def saved_amino_cum(self) -> str:\n amino_cum_name = os.path.join(\n self.root, 'amino_cum.pt')\n return amino_cum_name\n\n @property\n def processed_dir(self) -> str:\n return os.path.join(self.root, 'processed_seq')\n\n @property\n def processed_file_names(self) -> str:\n return ['train.pt', 'val.pt']\n\n\n def write_info(self):\n written_filename = os.path.join(self.root, 'wrong_protein_names.txt')\n file = open(written_filename, \"w+\")\n for protein_name in self.wrong_proteins:\n file.writelines(protein_name + '\\n')\n file.close()\n\n def process(self):\n #generate graph data and save in graph dir\n self.generate_protein_graph()\n # self.write_info()\n\n filenames = os.listdir(self.saved_graph_dir)\n protein_length = len(filenames)\n if self.set_length:\n protein_length = min(protein_length, self.set_length)\n\n if not self.normalize_file:\n self.normalize_file = get_stat(self.saved_graph_dir)\n\n random.shuffle(filenames)\n train_list = [f for f in filenames if \"_\" in f or \"-\" in f]\n filenames = [f for f in filenames if \"_\" not in f or \"-\" not in f]\n train_list.extend(filenames[:-self.num_val])\n filenames_list = [train_list, filenames[-self.num_val:]]\n \n for k in range(2):####split train,val,test\n data_list = []\n\n ###move special name to test set\n special_name_list = [\"p53-dimer.pdb.pt\"]\n for special_name in special_name_list:\n if special_name in filenames_list[0]:\n filenames_list[0].remove(special_name)\n filenames_list[1].append(special_name)\n for i in tqdm(range(len(filenames_list[k]))):\n file = filenames_list[k][i]\n try:\n graph1 = torch.load(os.path.join(self.saved_graph_dir, file))##load processed graph data torch pt file\n except:\n print(file)\n continue\n del graph1['distances']\n del graph1['edge_dist']\n del graph1['mu_r_norm']\n del graph1['seq']\n data_list.append(graph1)\n if self.is_normalize:\n normalize_transform = NormalizeProtein(filename=self.normalize_file)\n data_list = [d for d in data_list if normalize_transform(d)]\n if self.pre_filter is not None:\n data_list = [d for d in data_list if self.pre_filter(d)]\n if self.pre_transform is not None:\n data_list = [self.pre_transform(d) for d in data_list]\n\n torch.save(data_list, self.processed_paths[k])\n\n def generate_protein_graph(self):\n names = os.listdir(self.raw_file_names)\n print(names)\n names.sort()\n n = int(np.ceil(len(names) / self.divide_num))\n names = names[n * self.divide_idx:min(len(names), n * (self.divide_idx + 1))]\n for idx, name in enumerate(tqdm(names)):\n saved_graph_filename = os.path.join(self.saved_graph_dir, name + '.pt')\n if os.path.exists(saved_graph_filename):\n continue\n protein_filename = os.path.join(self.raw_file_names, name)\n if (name in self.wrong_proteins) or (not protein_filename):\n continue\n try:\n rec, rec_coords, c_alpha_coords, n_coords, c_coords,seq = self.get_receptor_inference(protein_filename)\n except:\n continue\n if rec !=False:\n if len(seq)>len(c_alpha_coords):\n del seq[-(len(seq)-len(c_alpha_coords)):]\n #meet \"dna\" data will remove the file and rec will be false\n # print(self.c_alpha_max_neighbors)\n rec_graph = self.get_calpha_graph(rec, c_alpha_coords, n_coords, c_coords, rec_coords,seq)\n if not rec_graph:\n self.wrong_proteins.append(name)\n continue\n torch.save(rec_graph, saved_graph_filename)\n\n def rec_residue_featurizer(self, rec, chain_id, one_hot=True, add_feature=None):\n count = 0\n flag_sasa=1\n try:\n self.sr.compute(rec, level=\"R\")\n except:\n flag_sasa=0\n for i, chain in enumerate(rec.get_chains()):\n if i != chain_id:\n continue\n num_res = len(list(chain.get_residues()))#len([_ for _ in rec.get_residues()])\n num_feature = 2\n if add_feature.any():\n num_feature += add_feature.shape[1]\n res_feature = torch.zeros(num_res, self.num_residue_type + num_feature)\n for i, residue in enumerate(chain.get_residues()):\n if flag_sasa==0:\n residue.sasa=0\n sasa = residue.sasa\n for atom in residue:\n if atom.name == 'CA':\n bfactor = atom.bfactor\n assert not np.isinf(bfactor)\n assert not np.isnan(bfactor)\n assert not np.isinf(sasa)\n assert not np.isnan(sasa)\n\n residx = safe_index(\n self.allowable_features['possible_amino_acids'], residue.get_resname())\n res_feat_1 = one_hot_res(\n residx, num_residue_type=self.num_residue_type) if one_hot else [residx]\n if not res_feat_1:\n return False\n res_feat_1.append(sasa)\n res_feat_1.append(bfactor)\n if num_feature > 2:\n res_feat_1.extend(list(add_feature[count, :]))\n res_feature[count, :] = torch.tensor(res_feat_1, dtype=torch.float32)\n count += 1\n # print(\"numnodes:\", num_res, count,len(list(chain.get_residues())))\n for k in range(self.num_residue_type, self.num_residue_type + 2):\n mean = res_feature[:, k].mean()\n std = res_feature[:, k].std()\n res_feature[:, k] = (res_feature[:, k] -mean) / (std + 0.000000001)\n return res_feature\n\n def get_node_features(self, n_coords, c_coords, c_alpha_coords, coord_mask, with_coord_mask=True, use_angle=False,\n use_omega=False):\n num_res = n_coords.shape[0]\n if use_omega:\n num_angle_type = 3\n angles = np.zeros((num_res, num_angle_type))\n for i in range(num_res - 1):\n # These angles are called φ (phi) which involves the backbone atoms C-N-Cα-C\n angles[i, 0] = dihedral(\n c_coords[i], n_coords[i], c_alpha_coords[i], n_coords[i + 1])\n # psi involves the backbone atoms N-Cα-C-N.\n angles[i, 1] = dihedral(\n n_coords[i], c_alpha_coords[i], c_coords[i], n_coords[i + 1])\n angles[i, 2] = dihedral(\n c_alpha_coords[i], c_coords[i], n_coords[i + 1], c_alpha_coords[i + 1])\n else:\n num_angle_type = 2\n angles = np.zeros((num_res, num_angle_type))\n for i in range(num_res - 1):\n # These angles are called φ (phi) which involves the backbone atoms C-N-Cα-C\n angles[i, 0] = dihedral(\n c_coords[i], n_coords[i], c_alpha_coords[i], n_coords[i + 1])\n # psi involves the backbone atoms N-Cα-C-N.\n angles[i, 1] = dihedral(\n n_coords[i], c_alpha_coords[i], c_coords[i], n_coords[i + 1])\n if use_angle:\n node_scalar_features = angles\n else:\n node_scalar_features = np.zeros((num_res, num_angle_type * 2))\n for i in range(num_angle_type):\n node_scalar_features[:, 2 * i] = np.sin(angles[:, i])\n node_scalar_features[:, 2 * i + 1] = np.cos(angles[:, i])\n\n if with_coord_mask:\n node_scalar_features = torch.cat([\n node_scalar_features,\n coord_mask.float().unsqueeze(-1)\n ], dim=-1)\n node_vector_features = None\n return node_scalar_features, node_vector_features\n\n def get_calpha_graph(self, rec, c_alpha_coords, n_coords, c_coords, coords, seq):\n chain_id = 0\n scalar_feature, vec_feature = self.get_node_features(n_coords, c_coords, c_alpha_coords, coord_mask=None, with_coord_mask=False, use_angle=self.use_angle, use_omega=self.use_omega)\n # Extract 3D coordinates and n_i,u_i,v_i\n # vectors of representative residues ################\n residue_representatives_loc_list = []\n n_i_list = []\n u_i_list = []\n v_i_list = []\n for i, chain in enumerate(rec.get_chains()):\n if i != chain_id:\n continue\n for i, residue in enumerate(chain.get_residues()):\n n_coord = n_coords[i]\n c_alpha_coord = c_alpha_coords[i]\n c_coord = c_coords[i]\n u_i = (n_coord - c_alpha_coord) / \\\n np.linalg.norm(n_coord - c_alpha_coord)\n t_i = (c_coord - c_alpha_coord) / \\\n np.linalg.norm(c_coord - c_alpha_coord)\n n_i = np.cross(u_i, t_i) / \\\n np.linalg.norm(np.cross(u_i, t_i)) # main chain\n v_i = np.cross(n_i, u_i)\n assert (math.fabs(\n np.linalg.norm(v_i) - 1.) < 1e-5), \"protein utils protein_to_graph_dips, v_i norm larger than 1\"\n n_i_list.append(n_i)\n u_i_list.append(u_i)\n v_i_list.append(v_i)\n residue_representatives_loc_list.append(c_alpha_coord)\n\n residue_representatives_loc_feat = np.stack(residue_representatives_loc_list, axis=0) # (N_res, 3)\n n_i_feat = np.stack(n_i_list, axis=0)\n u_i_feat = np.stack(u_i_list, axis=0)\n v_i_feat = np.stack(v_i_list, axis=0)\n num_residues = len(c_alpha_coords)\n if num_residues <= 1:\n raise ValueError(f\"rec contains only 1 residue!\")\n ################### Build the k-NN graph ##############################\n assert num_residues == residue_representatives_loc_feat.shape[0]\n assert residue_representatives_loc_feat.shape[1] == 3\n distances = spa.distance.cdist(c_alpha_coords, c_alpha_coords)\n\n src_list = []\n dst_list = []\n dist_list = []\n mean_norm_list = []\n for i in range(num_residues):\n dst = list(np.where(distances[i, :] < self.cutoff)[0])\n dst.remove(i)\n if self.c_alpha_max_neighbors != None and len(dst) > self.c_alpha_max_neighbors:\n dst = list(np.argsort(distances[i, :]))[\n 1: self.c_alpha_max_neighbors + 1]\n if len(dst) == 0:\n # choose second because first is i itself\n dst = list(np.argsort(distances[i, :]))[1:2]\n log(\n f'The c_alpha_cutoff {self.cutoff} was too small for one c_alpha such that it had no neighbors. So we connected it to the closest other c_alpha')\n assert i not in dst\n src = [i] * len(dst)\n src_list.extend(src)\n dst_list.extend(dst)\n valid_dist = list(distances[i, dst])\n dist_list.extend(valid_dist)\n valid_dist_np = distances[i, dst]\n sigma = np.array([1., 2., 5., 10., 30.]).reshape((-1, 1))\n weights = softmax(- valid_dist_np.reshape((1, -1))** 2 / sigma, axis=1) # (sigma_num, neigh_num)\n # print(weights) why weight??\n assert weights[0].sum() > 1 - 1e-2 and weights[0].sum() < 1.01\n diff_vecs = residue_representatives_loc_feat[src, :] - residue_representatives_loc_feat[dst, :] # (neigh_num, 3)\n mean_vec = weights.dot(diff_vecs) # (sigma_num, 3)\n denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1)) # (sigma_num,)\n mean_vec_ratio_norm = np.linalg.norm(mean_vec, axis=1) / denominator # (sigma_num,)\n mean_norm_list.append(mean_vec_ratio_norm)\n assert len(src_list) == len(dst_list)\n assert len(dist_list) == len(dst_list)\n residue_representatives_loc_feat = torch.from_numpy(residue_representatives_loc_feat.astype(np.float32))\n x = self.rec_residue_featurizer(rec, chain_id, one_hot=True, add_feature=scalar_feature)\n if isinstance(x, bool) and (not x):\n return False\n ######key part to generate graph!!!!!main\n graph = Data(\n x=x,## 26 feature 20+sasa+b factor+ two face angle\n pos=residue_representatives_loc_feat,\n edge_attr=self.get_edge_features(src_list, dst_list, dist_list, divisor=4), ##edge features\n edge_index=torch.tensor([src_list, dst_list]),\n edge_dist=torch.tensor(dist_list),\n distances=torch.tensor(distances),\n mu_r_norm=torch.from_numpy(np.array(mean_norm_list).astype(np.float32)),\n seq = seq) ##about density capture\n # Loop over all edges of the graph and build the various p_ij, q_ij, k_ij, t_ij pairs\n edge_feat_ori_list = []\n for i in range(len(dist_list)):\n src = src_list[i]\n dst = dst_list[i]\n # place n_i, u_i, v_i as lines in a 3x3 basis matrix\n basis_matrix = np.stack(\n (n_i_feat[dst, :], u_i_feat[dst, :], v_i_feat[dst, :]), axis=0)\n p_ij = np.matmul(basis_matrix,residue_representatives_loc_feat[src, :] - residue_representatives_loc_feat[dst, :])\n q_ij = np.matmul(basis_matrix, n_i_feat[src, :]) # shape (3,)\n k_ij = np.matmul(basis_matrix, u_i_feat[src, :])\n t_ij = np.matmul(basis_matrix, v_i_feat[src, :])\n s_ij = np.concatenate((p_ij, q_ij, k_ij, t_ij), axis=0) # shape (12,)\n edge_feat_ori_list.append(s_ij)\n\n edge_feat_ori_feat = np.stack(edge_feat_ori_list, axis=0) # shape (num_edges, 4, 3)\n edge_feat_ori_feat = torch.from_numpy(edge_feat_ori_feat.astype(np.float32))\n graph.edge_attr = torch.cat([graph.edge_attr, edge_feat_ori_feat], axis=1) # (num_edges, 17)\n # graph = self.remove_node(graph, graph.x.shape[0]-1)###remove the last node, can not calculate the two face angle\n # self.get_calpha_graph_single(graph, 6)\n return graph\n\n def remove_node(self, graph, node_idx):\n new_graph = Data.clone(graph)\n # delete node\n new_graph.x = torch.cat(\n [new_graph.x[:node_idx, :], new_graph.x[node_idx + 1:, :]])\n new_graph.pos = torch.cat(\n [new_graph.pos[:node_idx, :], new_graph.pos[node_idx + 1:, :]])\n new_graph.mu_r_norm = torch.cat(\n [new_graph.mu_r_norm[:node_idx, :], new_graph.mu_r_norm[node_idx + 1:, :]])\n\n # delete edge\n keep_edge = (torch.sum(new_graph.edge_index == node_idx, dim=0) == 0)\n new_graph.edge_index = new_graph.edge_index[:, keep_edge]\n new_graph.edge_attr = new_graph.edge_attr[keep_edge, :]\n return new_graph\n\n def get_edge_features(self, src_list, dst_list, dist_list, divisor=4):\n seq_edge = torch.absolute(torch.tensor(\n src_list) - torch.tensor(dst_list)).reshape(-1, 1)\n seq_edge = torch.where(seq_edge > self.seq_dist_cut,\n self.seq_dist_cut, seq_edge)\n seq_edge = F.one_hot(\n seq_edge, num_classes=self.seq_dist_cut + 1).reshape((-1, self.seq_dist_cut + 1))\n contact_sig = torch.where(torch.tensor(\n dist_list) <= 8, 1, 0).reshape(-1, 1)\n # avg distance = 7. So divisor = (4/7)*7 = 4\n dist_fea = self.distance_featurizer(dist_list, divisor=divisor)\n return torch.concat([seq_edge, dist_fea, contact_sig], dim=-1)\n\n def get_receptor_inference(self, rec_path):\n chain_id=0\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=PDBConstructionWarning)\n structure = self.biopython_parser.get_structure('random_id', rec_path)\n rec = structure[0]##len(structure)=1\n head = self.biopython_parser.get_header()['head']\n if head.find('dna') > -1:\n return False, False, False, False, False,False\n coords = []\n c_alpha_coords = []\n n_coords = []\n c_coords = []\n valid_chain_ids = []\n lengths = []\n seq = []\n for i, chain in enumerate(rec):\n print(\"chain num\",i,chain_id,chain)\n if i != chain_id:##select chain A:i=0 or B:i=1\n continue\n chain_coords = [] # num_residues, num_atoms, 3\n chain_c_alpha_coords = []\n chain_n_coords = []\n chain_c_coords = []\n count = 0\n invalid_res_ids = []\n for res_idx, residue in enumerate(chain):\n if residue.get_resname() == 'HOH':\n invalid_res_ids.append(residue.get_id())\n continue\n residue_coords = []\n c_alpha, n, c = None, None, None\n for atom in residue:\n if atom.name == 'CA':\n c_alpha = list(atom.get_vector())\n seq.append(str(residue).split(\" \")[1])\n if atom.name == 'N':\n n = list(atom.get_vector())\n if atom.name == 'C':\n c = list(atom.get_vector())\n residue_coords.append(list(atom.get_vector()))\n # only append residue if it is an amino acid and not some weired molecule that is part of the complex\n if c_alpha != None and n != None and c != None:\n chain_c_alpha_coords.append(c_alpha)\n chain_n_coords.append(n)\n chain_c_coords.append(c)\n chain_coords.append(np.array(residue_coords))\n count += 1\n else:\n invalid_res_ids.append(residue.get_id())\n for res_id in invalid_res_ids:\n chain.detach_child(res_id)\n lengths.append(count)\n coords.append(chain_coords)\n c_alpha_coords.append(np.array(chain_c_alpha_coords))\n n_coords.append(np.array(chain_n_coords))\n c_coords.append(np.array(chain_c_coords))\n if len(chain_coords) > 0:\n valid_chain_ids.append(chain.get_id())\n valid_coords = []\n valid_c_alpha_coords = []\n valid_n_coords = []\n valid_c_coords = []\n valid_lengths = []\n invalid_chain_ids = []\n for i, chain in enumerate(rec):\n # print(\"chain:\",i,chain, len(valid_coords), len(valid_chain_ids), len(coords), coords[0][0].shape, len(coords[0]))\n if i != chain_id:\n continue\n if chain.get_id() in valid_chain_ids:\n valid_coords.append(coords[0])\n valid_c_alpha_coords.append(c_alpha_coords[0])\n valid_n_coords.append(n_coords[0])\n valid_c_coords.append(c_coords[0])\n valid_lengths.append(lengths[0])\n else:\n invalid_chain_ids.append(chain.get_id())\n # list with n_residues arrays: [n_atoms, 3]\n coords = [item for sublist in valid_coords for item in sublist]\n if len(valid_c_alpha_coords) == 0:\n return False, False, False, False, False,False\n c_alpha_coords = np.concatenate(valid_c_alpha_coords, axis=0) # [n_residues, 3]\n n_coords = np.concatenate(valid_n_coords, axis=0) # [n_residues, 3]\n c_coords = np.concatenate(valid_c_coords, axis=0) # [n_residues, 3]\n\n for invalid_id in invalid_chain_ids:\n rec.detach_child(invalid_id)\n\n assert len(c_alpha_coords) == len(n_coords)\n assert len(c_alpha_coords) == len(c_coords)\n assert sum(valid_lengths) == len(c_alpha_coords)\n return rec, coords, c_alpha_coords, n_coords, c_coords,seq\n\n def len(self):\n return len(self.dataset)\n\n def get_statistic_info(self):\n node_num = torch.zeros(self.length_total)\n edge_num = torch.zeros(self.length_total)\n for i in tqdm(range(self.length_total)):\n graph = self.get(i)\n node_num[i] = graph.x.shape[0]\n edge_num[i] = graph.edge_index.shape[1]\n num_node_min = torch.min(node_num)\n num_node_max = torch.max(node_num)\n num_node_avg = torch.mean(node_num)\n num_edge_min = torch.min(edge_num)\n num_edge_max = torch.max(edge_num)\n num_edge_avg = torch.mean(edge_num)\n print(f'Graph Num: {self.length_total}')\n print(\n f'Min Nodes: {num_node_min:.2f} Max Nodes: {num_node_max:.2f}. Avg Nodes: {num_node_avg:.2f}')\n print(\n f'Min Edges: {num_edge_min:.2f} Max Edges: {num_edge_max:.2f}. Avg Edges: {num_edge_avg:.2f}')\n\n def _get_noise(self, token_len: int, prob: List=[]):\n prob = prob if prob else [0.08, 0.05, 0.04, 0.06, 0.01, 0.04, 0.07, 0.07, 0.02, 0.06, 0.1, 0.06,\n 0.02, 0.04, 0.04, 0.06, 0.05, 0.01, 0.03, 0.07]\n multant_pos = ((torch.rand(token_len) <= self.p)).nonzero().flatten()\n if len(multant_pos) == 0:\n return None, None\n multant_trg = torch.multinomial(torch.tensor(prob), len(multant_pos), replacement=True)\n return multant_pos, multant_trg\n \n \n def _token_rep_noise(self, data, multant_pos, multant_trg, rep_noise_type='window_3'):\n num_classes = 20\n multant_rep = data.token_rep.clone()\n for mut_pos, mut_trg in zip(multant_pos, multant_trg):\n mut_trg_ = F.one_hot(mut_trg, num_classes=num_classes)\n if rep_noise_type == 'mean':\n trg_rep = data.token_rep[(data.x[:,:20] == mut_trg_).sum(1) == num_classes].mean(0)\n if torch.isnan(trg_rep).sum() > 0:\n continue\n multant_rep[mut_pos] = trg_rep\n elif \"window\" in rep_noise_type:\n window_size = int(rep_noise_type.split(\"_\")[-1])\n start_pos = mut_pos - math.ceil(window_size/2)\n end_pos = start_pos + window_size\n if end_pos > len(data.token_rep):\n start_pos = mut_pos - window_size\n trg_rep = data.token_rep[start_pos:].mean(0)\n elif start_pos < 0:\n end_pos = window_size\n trg_rep = data.token_rep[:end_pos].mean(0)\n else:\n trg_rep = data.token_rep[start_pos:end_pos].mean(0)\n multant_rep[mut_pos] = trg_rep\n return multant_rep\n\n def get(self, idx):\n # idx_protein = idx\n # idx_x0, idx_x1 = self.slices['x'][idx_protein], self.slices['x'][idx_protein + 1]\n # idx_edge0, idx_edge1 = self.slices['edge_index'][idx_protein], self.slices['edge_index'][idx_protein + 1]\n \n # data = Data(\n # x=self.data.x[idx_x0:idx_x1, :],\n # pos=self.data.pos[idx_x0:idx_x1, :],\n # edge_index=self.data.edge_index[:, idx_edge0:idx_edge1],\n # edge_attr=self.data.edge_attr[idx_edge0:idx_edge1, :],\n # lenth=idx_x1-idx_x0\n # )\n data = self.dataset[idx]\n\n token_len = data.x.shape[0]\n data.y = data.x[:token_len, :self.num_residue_type].argmax(1)\n multant_pos, multant_trg = self._get_noise(token_len=token_len)\n if multant_pos is not None:\n noisey = data.x[:, :20].argmax(dim=1)\n noisey[multant_pos] = multant_trg\n data.x[:,:20] = F.one_hot(noisey, num_classes=20)\n \n return data\n \n\n def find_idx(self, idx_protein, amino_idx):\n idx = (self.distances[idx_protein][:-1, amino_idx]< self.micro_radius).nonzero(as_tuple=True)[0]\n return idx\n \n def get_calpha_graph_single(self, graph, idx_protein, amino_idx):\n choosen_amino_idx = self.find_idx(idx_protein, amino_idx)\n keep_edge_index = []\n for edge_idx in range(graph.num_edges):\n edge = graph.edge_index.t()[edge_idx]\n if (edge[0] in choosen_amino_idx) and (edge[1] in choosen_amino_idx):\n keep_edge_index.append(edge_idx)\n graph1 = Data(x=graph.x[choosen_amino_idx, :],\n pos=graph.pos[choosen_amino_idx, :],\n edge_index=graph.edge_index[:, keep_edge_index],\n edge_attr=graph.edge_attr[keep_edge_index, :],\n mu_r_norm=graph.mu_r_norm[choosen_amino_idx, :])\n return graph1\n \n def __repr__(self) -> str:\n return f'{self.__class__.__name__}()'\n \n def distance_featurizer(self, dist_list, divisor) -> torch.Tensor:\n # you want to use a divisor that is close to 4/7 times the average distance that you want to encode\n length_scale_list = [1.5 ** x for x in range(15)]\n center_list = [0. for _ in range(15)]\n num_edge = len(dist_list)\n dist_list = np.array(dist_list)\n transformed_dist = [np.exp(- ((dist_list / divisor) ** 2) / float(length_scale))\n for length_scale, center in zip(length_scale_list, center_list)]\n transformed_dist = np.array(transformed_dist).T\n transformed_dist = transformed_dist.reshape((num_edge, -1))\n return torch.from_numpy(transformed_dist.astype(np.float32))" }, { "identifier": "MutantDataset", "path": "src/dataset/mutant_dataset.py", "snippet": "class MutantDataset(Dataset):\n r\"\"\"\n Args:\n root (string): Root directory where the dataset should be saved.\n name (string): The name of the dataset.\n raw_dir (string, optional): Root directory where the\n original dataset stored(default: :obj:`None`)\n\n num_residue_type (int, optional): The number of amino acid types.\n (default: obj:'20')\n micro_radius (int, optional): The radius of micro-environment\n centered on the mask node. (default: obj:'20')\n c_alpha_max_neighbors (int, optional): The number of maximum\n connected nodes. (default: obj:'10')\n cutoff (int, optional): The maximum connected nodes distance\n (default: obj:'30')\n seq_dist_cut (int, optional): one-hot encoding the sequence distance\n edge attribute\n (default: obj:)\n [0.25,0.5,0.75,0.9,0.95,0.98,0.99]\n [ 2. 3. 13. 63. 127. 247. 347.]\n\n # use_localdatastet (bool) (bool,optional): If :obj:'True', online dataset\n # will be downloaded. If not, local pdb files will be used\n # (default: obj:'True')\n\n transform (callable, optional): A function/transform that takes in an\n :obj:`torch_geometric.data.Data` object and returns a transformed\n version. The data object will be transformed before every access.\n (default: :obj:`None`)\n pre_transform (callable, optional): A function/transform that takes in\n an :obj:`torch_geometric.data.Data` object and returns a\n transformed version. The data object will be transformed before\n being saved to disk. (default: :obj:`None`)\n pre_filter (callable, optional): A function that takes in an\n :obj:`torch_geometric.data.Data` object and returns a boolean\n value, indicating whether the data object should be included in the\n final dataset. (default: :obj:`None`)\n \"\"\"\n allowable_features = {\n 'possible_atomic_num_list': list(range(1, 119)) + ['misc'],\n 'possible_chirality_list': [\n 'CHI_UNSPECIFIED',\n 'CHI_TETRAHEDRAL_CW',\n 'CHI_TETRAHEDRAL_CCW',\n 'CHI_OTHER'\n ],\n 'possible_degree_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 'misc'],\n 'possible_numring_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],\n 'possible_implicit_valence_list': [0, 1, 2, 3, 4, 5, 6, 'misc'],\n 'possible_formal_charge_list': [-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5, 'misc'],\n 'possible_numH_list': [0, 1, 2, 3, 4, 5, 6, 7, 8, 'misc'],\n 'possible_number_radical_e_list': [0, 1, 2, 3, 4, 'misc'],\n 'possible_hybridization_list': [\n 'SP', 'SP2', 'SP3', 'SP3D', 'SP3D2', 'misc'\n ],\n 'possible_is_aromatic_list': [False, True],\n 'possible_is_in_ring3_list': [False, True],\n 'possible_is_in_ring4_list': [False, True],\n 'possible_is_in_ring5_list': [False, True],\n 'possible_is_in_ring6_list': [False, True],\n 'possible_is_in_ring7_list': [False, True],\n 'possible_is_in_ring8_list': [False, True],\n 'possible_amino_acids': ['ALA', 'ARG', 'ASN', 'ASP', 'CYS', 'GLN', 'GLU', 'GLY', 'HIS', 'ILE', 'LEU', 'LYS', 'MET',\n 'PHE', 'PRO', 'SER', 'THR', 'TRP', 'TYR', 'VAL', 'HIP', 'HIE', 'TPO', 'HID', 'LEV', 'MEU',\n 'PTR', 'GLV', 'CYT', 'SEP', 'HIZ', 'CYM', 'GLM', 'ASQ', 'TYS', 'CYX', 'GLZ', 'misc'],\n 'possible_atom_type_2': ['C*', 'CA', 'CB', 'CD', 'CE', 'CG', 'CH', 'CZ', 'N*', 'ND', 'NE', 'NH', 'NZ', 'O*', 'OD',\n 'OE', 'OG', 'OH', 'OX', 'S*', 'SD', 'SG', 'misc'],\n 'possible_atom_type_3': ['C', 'CA', 'CB', 'CD', 'CD1', 'CD2', 'CE', 'CE1', 'CE2', 'CE3', 'CG', 'CG1', 'CG2', 'CH2',\n 'CZ', 'CZ2', 'CZ3', 'N', 'ND1', 'ND2', 'NE', 'NE1', 'NE2', 'NH1', 'NH2', 'NZ', 'O', 'OD1',\n 'OD2', 'OE1', 'OE2', 'OG', 'OG1', 'OH', 'OXT', 'SD', 'SG', 'misc'],\n }\n amino_acids_type = ['A', 'R', 'N', 'D', 'C', 'Q', 'E', 'G', 'H', 'I',\n 'L', 'K', 'M', 'F', 'P', 'S', 'T', 'W', 'Y', 'V']\n\n def __init__(self, root: str, name: str, raw_dir: str,\n num_residue_type: int = 20,\n micro_radius: int = 20,\n c_alpha_max_neighbors: int = 10,\n cutoff: int = 30,\n seq_dist_cut: int = 64,\n use_micro: bool = False,\n use_angle: bool = False,\n use_omega: bool = False,\n transform: Optional[Callable] = None,\n pre_transform: Optional[Callable] = None,\n pre_filter: Optional[Callable] = None,\n divide_num: int = 1,\n divide_idx: int = 0,\n replace_graph: bool = False,\n replace_process: bool = False\n ):\n self.divide_num = divide_num\n self.divide_idx = divide_idx\n self.replace_graph = replace_graph\n self.replace_process = replace_process\n\n self.root = root\n self.name = name\n self.raw_root = raw_dir\n self.num_residue_type = num_residue_type\n self.micro_radius = micro_radius\n self.c_alpha_max_neighbors = c_alpha_max_neighbors\n self.cutoff = cutoff\n self.seq_dist_cut = seq_dist_cut\n self.use_micro = use_micro\n self.use_angle = use_angle\n self.use_omega = use_omega\n \n self.protein_names = []\n self.wrong_protein_names = []\n self.total_protein_names = []\n if os.path.exists(self.total_protein_name_file):\n self.protein_names = open(self.saved_protein_name_file, 'r').read().splitlines()\n if os.path.exists(self.wrong_protein_name_file):\n self.wrong_protein_names = open(self.wrong_protein_name_file, 'r').read().splitlines()\n if os.path.exists(self.total_protein_name_file):\n self.total_protein_names = open(self.total_protein_name_file, 'r').read().splitlines()\n \n # in A. Default is 1.40 roughly the radius of a water molecule.\n # resolution of the surface of each atom. Default is 100. A higher number of points results in more precise measurements, but slows down the calculation.\n self.sr = ShrakeRupley(probe_radius=1.4, n_points=100) \n self.biopython_parser = PDBParser()\n\n self.saved_graph_path = self.mk_saved_graph_path()\n super().__init__(root, transform, pre_transform, pre_filter)\n # After processing protein from pdb --> Data\n \n self.length_total = len(self.protein_names)\n\n @property\n def raw_file_names(self) -> str:\n return self.raw_root\n\n @property\n def raw_dir(self) -> str:\n return self.raw_root\n\n def mk_saved_graph_path(self) -> str:\n os.makedirs(os.path.join(self.root, self.name.capitalize()), exist_ok=True)\n graph_dir = os.path.join(self.root, self.name.capitalize(), 'graph')\n os.makedirs(graph_dir, exist_ok=True)\n return graph_dir\n\n @property\n def total_protein_name_file(self) -> str:\n return os.path.join(self.root, self.name.capitalize(), 'total_proteins.txt')\n\n @property\n def saved_protein_name_file(self) -> str:\n return os.path.join(self.root, self.name.capitalize(), 'saved_proteins.txt')\n\n @property\n def wrong_protein_name_file(self) -> str:\n return os.path.join(self.root, self.name.capitalize(), 'wrong_proteins.txt')\n \n @property\n def processed_dir(self) -> str:\n return os.path.join(self.root, self.name.capitalize(), 'processed')\n\n @property\n def processed_file_names(self) -> str:\n return [p+\".pt\" for p in self.protein_names]\n\n def download(self):\n pass\n\n def process(self):\n # if self.replace_graph:\n self.generate_protein_graph_evaluation()\n\n exist_proteins = []\n proteins = open(self.saved_protein_name_file, 'r').read().splitlines()\n for p in proteins:\n file = p + '.pt'\n if os.path.exists(os.path.join(self.saved_graph_path, file)):\n exist_proteins.append(file)\n\n protein_num = len(exist_proteins)\n if (not self.replace_process) and (len(os.listdir(self.processed_dir)) >= protein_num):\n return 0\n\n process_bar = tqdm(exist_proteins)\n for protein in process_bar:\n process_bar.set_description(f\"Processing {protein}\")\n \n graph_data = torch.load(os.path.join(self.saved_graph_path, protein))\n tmpseq = [one_letter[amino] for amino in graph_data.seq]\n graph_data.seq = \"\".join(tmpseq)\n\n if self.pre_filter is not None:\n graph_data = self.pre_filter(graph_data)\n\n if self.pre_transform is not None:\n graph_data = self.pre_transform(graph_data)\n \n saved_prcessed_name = os.path.join(self.processed_dir, protein)\n torch.save(graph_data, saved_prcessed_name)\n\n def generate_protein_graph_evaluation(self):\n self.total_protein_names = sorted(os.listdir(self.raw_dir))\n process_bar = tqdm(self.total_protein_names)\n for name in process_bar:\n process_bar.set_description(f\"Processing {name}\")\n protein_dir = os.path.join(self.raw_dir, name)\n \n if os.path.exists(os.path.join(self.saved_graph_path, name + '.pt')) or not os.path.isdir(protein_dir):\n continue\n\n pdb_suffix = \".pdb\"\n pdb_file = os.path.join(protein_dir, name + pdb_suffix)\n assert os.path.exists(pdb_file), f\"{pdb_file} does not exist\"\n\n rec, rec_coords, c_alpha_coords, n_coords, c_coords,seq = self.get_receptor_inference(\n pdb_file)\n\n rec_graph = self.get_calpha_graph(rec, c_alpha_coords, n_coords, c_coords,seq)\n if not rec_graph:\n self.wrong_protein_names.append(name)\n continue\n torch.save(rec_graph, os.path.join(self.saved_graph_path, name + '.pt'))\n \n with open(self.total_protein_name_file, 'w') as fp:\n for item in self.total_protein_names:\n fp.writelines(\"%s\\n\" % item)\n print(f\"Total proteins: {self.total_protein_names}\")\n \n self.protein_names = sorted([name.split(\".\")[0] for name in os.listdir(self.saved_graph_path)])\n with open(self.saved_protein_name_file, 'w') as fp:\n for item in self.protein_names:\n fp.writelines(\"%s\\n\" % item)\n \n with open(self.wrong_protein_name_file, 'w') as fp:\n for item in self.wrong_protein_names:\n fp.writelines(\"%s\\n\" % item)\n print(f\"Wrong proteins: {self.wrong_protein_names}\")\n\n def rec_residue_featurizer(self, rec, one_hot=True, add_feature=None):\n num_res = len([_ for _ in rec.get_residues()])\n num_feature = 2\n if add_feature.any():\n num_feature += add_feature.shape[1]\n res_feature = torch.zeros(num_res, self.num_residue_type + num_feature)\n count = 0\n self.sr.compute(rec, level=\"R\")\n for residue in rec.get_residues():\n sasa = residue.sasa\n for atom in residue:\n if atom.name == 'CA':\n bfactor = atom.bfactor\n assert not np.isinf(bfactor)\n assert not np.isnan(bfactor)\n assert not np.isinf(sasa)\n assert not np.isnan(sasa)\n\n residx = safe_index(\n self.allowable_features['possible_amino_acids'], residue.get_resname())\n res_feat_1 = one_hot_res(\n residx, num_residue_type=self.num_residue_type) if one_hot else [residx]\n if not res_feat_1:\n return False\n res_feat_1.append(sasa)\n res_feat_1.append(bfactor)\n if num_feature > 2:\n res_feat_1.extend(list(add_feature[count, :]))\n res_feature[count, :] = torch.tensor(\n res_feat_1, dtype=torch.float32)\n count += 1\n\n for k in range(self.num_residue_type, self.num_residue_type + 2):\n mean = res_feature[:, k].mean()\n std = res_feature[:, k].std()\n res_feature[:, k] = (res_feature[:, k] - mean) / (std + 0.000000001)\n return res_feature\n\n def get_node_features(self, n_coords, c_coords, c_alpha_coords, coord_mask, with_coord_mask=True, use_angle=False, use_omega=False):\n num_res = n_coords.shape[0]\n if use_omega:\n num_angle_type = 3\n angles = np.zeros((num_res, num_angle_type))\n for i in range(num_res-1):\n # These angles are called φ (phi) which involves the backbone atoms C-N-Cα-C\n angles[i, 0] = dihedral(c_coords[i], n_coords[i], c_alpha_coords[i], n_coords[i+1])\n # psi involves the backbone atoms N-Cα-C-N.\n angles[i, 1] = dihedral(n_coords[i], c_alpha_coords[i], c_coords[i], n_coords[i+1])\n angles[i, 2] = dihedral(c_alpha_coords[i], c_coords[i], n_coords[i+1], c_alpha_coords[i+1])\n else:\n num_angle_type = 2\n angles = np.zeros((num_res, num_angle_type))\n for i in range(num_res-1):\n # These angles are called φ (phi) which involves the backbone atoms C-N-Cα-C\n angles[i, 0] = dihedral(c_coords[i], n_coords[i], c_alpha_coords[i], n_coords[i+1])\n # psi involves the backbone atoms N-Cα-C-N.\n angles[i, 1] = dihedral(n_coords[i], c_alpha_coords[i], c_coords[i], n_coords[i+1])\n if use_angle:\n node_scalar_features = angles\n else:\n node_scalar_features = np.zeros((num_res, num_angle_type*2))\n for i in range(num_angle_type):\n node_scalar_features[:, 2*i] = np.sin(angles[:, i])\n node_scalar_features[:, 2*i + 1] = np.cos(angles[:, i])\n\n if with_coord_mask:\n node_scalar_features = torch.cat([\n node_scalar_features,\n coord_mask.float().unsqueeze(-1)\n ], dim=-1)\n node_vector_features = None\n return node_scalar_features, node_vector_features\n\n def get_calpha_graph(self, rec, c_alpha_coords, n_coords, c_coords,seq):\n scalar_feature, vec_feature = self.get_node_features(\n n_coords, c_coords, c_alpha_coords, coord_mask=None, \n with_coord_mask=False, use_angle=self.use_angle, use_omega=self.use_omega\n )\n # Extract 3D coordinates and n_i,u_i,v_i\n # vectors of representative residues ################\n residue_representatives_loc_list = []\n n_i_list = []\n u_i_list = []\n v_i_list = []\n for i, residue in enumerate(rec.get_residues()):\n n_coord = n_coords[i]\n c_alpha_coord = c_alpha_coords[i]\n c_coord = c_coords[i]\n u_i = (n_coord - c_alpha_coord) / np.linalg.norm(n_coord - c_alpha_coord)\n t_i = (c_coord - c_alpha_coord) / np.linalg.norm(c_coord - c_alpha_coord)\n n_i = np.cross(u_i, t_i) / np.linalg.norm(np.cross(u_i, t_i)) # main chain\n v_i = np.cross(n_i, u_i)\n assert (math.fabs(np.linalg.norm(v_i) - 1.) < 1e-5), \"protein utils protein_to_graph_dips, v_i norm larger than 1\"\n n_i_list.append(n_i)\n u_i_list.append(u_i)\n v_i_list.append(v_i)\n residue_representatives_loc_list.append(c_alpha_coord)\n \n # (N_res, 3)\n residue_representatives_loc_feat = np.stack(residue_representatives_loc_list, axis=0)\n \n n_i_feat = np.stack(n_i_list, axis=0)\n u_i_feat = np.stack(u_i_list, axis=0)\n v_i_feat = np.stack(v_i_list, axis=0)\n num_residues = len(c_alpha_coords)\n if num_residues <= 1:\n raise ValueError(f\"rec contains only 1 residue!\")\n\n ################### Build the k-NN graph ##############################\n assert num_residues == residue_representatives_loc_feat.shape[0]\n assert residue_representatives_loc_feat.shape[1] == 3\n distances = spa.distance.cdist(c_alpha_coords, c_alpha_coords)\n\n src_list = []\n dst_list = []\n dist_list = []\n mean_norm_list = []\n for i in range(num_residues):\n dst = list(np.where(distances[i, :] < self.cutoff)[0])\n dst.remove(i)\n if self.c_alpha_max_neighbors != None and len(dst) > self.c_alpha_max_neighbors:\n dst = list(np.argsort(distances[i, :]))[1: self.c_alpha_max_neighbors + 1]\n if len(dst) == 0:\n # choose second because first is i itself\n dst = list(np.argsort(distances[i, :]))[1:2]\n log(f'The c_alpha_cutoff {self.cutoff} was too small for one c_alpha such that it had no neighbors. So we connected it to the closest other c_alpha')\n assert i not in dst\n \n src = [i] * len(dst)\n src_list.extend(src)\n dst_list.extend(dst)\n valid_dist = list(distances[i, dst])\n dist_list.extend(valid_dist)\n valid_dist_np = distances[i, dst]\n \n sigma = np.array([1., 2., 5., 10., 30.]).reshape((-1, 1))\n # (sigma_num, neigh_num)\n weights = softmax(-valid_dist_np.reshape((1, -1)) ** 2 / sigma, axis=1)\n # print(weights)\n assert weights[0].sum() > 1 - 1e-2 and weights[0].sum() < 1.01\n # (neigh_num, 3)\n diff_vecs = residue_representatives_loc_feat[src, :] - residue_representatives_loc_feat[dst, :]\n # (sigma_num, 3)\n mean_vec = weights.dot(diff_vecs)\n # (sigma_num,)\n denominator = weights.dot(np.linalg.norm(diff_vecs, axis=1))\n # (sigma_num,)\n mean_vec_ratio_norm = np.linalg.norm(mean_vec, axis=1) / denominator\n mean_norm_list.append(mean_vec_ratio_norm)\n \n assert len(src_list) == len(dst_list)\n assert len(dist_list) == len(dst_list)\n \n residue_representatives_loc_feat = torch.from_numpy(residue_representatives_loc_feat.astype(np.float32))\n x = self.rec_residue_featurizer(rec, one_hot=True, add_feature=scalar_feature)\n \n if isinstance(x, bool) and (not x):\n return False\n\n graph = Data(\n x=x,\n pos=residue_representatives_loc_feat,\n edge_attr=self.get_edge_features(src_list, dst_list, dist_list, divisor=4),\n edge_index=torch.tensor([src_list, dst_list]),\n edge_dist=torch.tensor(dist_list),\n distances=torch.tensor(distances),\n mu_r_norm=torch.from_numpy(np.array(mean_norm_list).astype(np.float32)),\n seq=seq\n )\n\n # Loop over all edges of the graph and build the various p_ij, q_ij, k_ij, t_ij pairs\n edge_feat_ori_list = []\n for i in range(len(dist_list)):\n src = src_list[i]\n dst = dst_list[i]\n # place n_i, u_i, v_i as lines in a 3x3 basis matrix\n basis_matrix = np.stack((n_i_feat[dst, :], u_i_feat[dst, :], v_i_feat[dst, :]), axis=0)\n p_ij = np.matmul(\n basis_matrix,\n residue_representatives_loc_feat[src, :] - residue_representatives_loc_feat[dst, :]\n )\n q_ij = np.matmul(basis_matrix, n_i_feat[src, :]) # shape (3,)\n k_ij = np.matmul(basis_matrix, u_i_feat[src, :])\n t_ij = np.matmul(basis_matrix, v_i_feat[src, :])\n s_ij = np.concatenate((p_ij, q_ij, k_ij, t_ij), axis=0) # shape (12,)\n edge_feat_ori_list.append(s_ij)\n\n edge_feat_ori_feat = np.stack(edge_feat_ori_list, axis=0) # shape (num_edges, 4, 3)\n edge_feat_ori_feat = torch.from_numpy(edge_feat_ori_feat.astype(np.float32))\n\n graph.edge_attr = torch.cat([graph.edge_attr, edge_feat_ori_feat], axis=1) # (num_edges, 17)\n #graph = self.remove_node(graph, graph.x.shape[0]-1)\n # self.get_calpha_graph_single(graph, 6)\n return graph\n\n def remove_node(self, graph, node_idx):\n new_graph = Data.clone(graph)\n # delete node\n new_graph.x = torch.cat(\n [new_graph.x[:node_idx, :], new_graph.x[node_idx+1:, :]])\n new_graph.pos = torch.cat(\n [new_graph.pos[:node_idx, :], new_graph.pos[node_idx+1:, :]])\n new_graph.mu_r_norm = torch.cat(\n [new_graph.mu_r_norm[:node_idx, :], new_graph.mu_r_norm[node_idx+1:, :]])\n\n # delete edge\n keep_edge = (torch.sum(new_graph.edge_index == node_idx, dim=0) == 0)\n new_graph.edge_index = new_graph.edge_index[:, keep_edge]\n new_graph.edge_attr = new_graph.edge_attr[keep_edge, :]\n return new_graph\n\n def get_edge_features(self, src_list, dst_list, dist_list, divisor=4):\n seq_edge = torch.absolute(torch.tensor(src_list) - torch.tensor(dst_list)).reshape(-1, 1)\n seq_edge = torch.where(seq_edge > self.seq_dist_cut, self.seq_dist_cut, seq_edge)\n seq_edge = F.one_hot(seq_edge, num_classes=self.seq_dist_cut + 1).reshape((-1, self.seq_dist_cut + 1))\n \n contact_sig = torch.where(torch.tensor(dist_list) <= 8, 1, 0).reshape(-1, 1)\n # avg distance = 7. So divisor = (4/7)*7 = 4\n dist_fea = self.distance_featurizer(dist_list, divisor=divisor)\n \n return torch.concat([seq_edge, dist_fea, contact_sig], dim=-1)\n\n def get_receptor_inference(self, rec_path):\n with warnings.catch_warnings():\n warnings.filterwarnings(\"ignore\", category=PDBConstructionWarning)\n structure = self.biopython_parser.get_structure('random_id', rec_path)\n rec = structure[0]\n coords = []\n c_alpha_coords = []\n n_coords = []\n c_coords = []\n valid_chain_ids = []\n lengths = []\n seq = []\n for i, chain in enumerate(rec):\n chain_coords = [] # num_residues, num_atoms, 3\n chain_c_alpha_coords = []\n chain_n_coords = []\n chain_c_coords = []\n count = 0\n invalid_res_ids = []\n for res_idx, residue in enumerate(chain):\n if residue.get_resname() == 'HOH':\n invalid_res_ids.append(residue.get_id())\n continue\n residue_coords = []\n c_alpha, n, c = None, None, None\n for atom in residue:\n if atom.name == 'CA':\n c_alpha = list(atom.get_vector())\n seq.append(str(residue).split(\" \")[1])\n if atom.name == 'N':\n n = list(atom.get_vector())\n if atom.name == 'C':\n c = list(atom.get_vector())\n residue_coords.append(list(atom.get_vector()))\n # only append residue if it is an amino acid and not some weired molecule that is part of the complex\n if c_alpha != None and n != None and c != None:\n chain_c_alpha_coords.append(c_alpha)\n chain_n_coords.append(n)\n chain_c_coords.append(c)\n chain_coords.append(np.array(residue_coords))\n count += 1\n else:\n invalid_res_ids.append(residue.get_id())\n for res_id in invalid_res_ids:\n chain.detach_child(res_id)\n lengths.append(count)\n coords.append(chain_coords)\n c_alpha_coords.append(np.array(chain_c_alpha_coords))\n n_coords.append(np.array(chain_n_coords))\n c_coords.append(np.array(chain_c_coords))\n if len(chain_coords) > 0:\n valid_chain_ids.append(chain.get_id())\n valid_coords = []\n valid_c_alpha_coords = []\n valid_n_coords = []\n valid_c_coords = []\n valid_lengths = []\n invalid_chain_ids = []\n for i, chain in enumerate(rec):\n if chain.get_id() in valid_chain_ids:\n valid_coords.append(coords[i])\n valid_c_alpha_coords.append(c_alpha_coords[i])\n valid_n_coords.append(n_coords[i])\n valid_c_coords.append(c_coords[i])\n valid_lengths.append(lengths[i])\n else:\n invalid_chain_ids.append(chain.get_id())\n # list with n_residues arrays: [n_atoms, 3]\n coords = [item for sublist in valid_coords for item in sublist]\n\n c_alpha_coords = np.concatenate(valid_c_alpha_coords, axis=0) # [n_residues, 3]\n n_coords = np.concatenate(valid_n_coords, axis=0) # [n_residues, 3]\n c_coords = np.concatenate(valid_c_coords, axis=0) # [n_residues, 3]\n\n for invalid_id in invalid_chain_ids:\n rec.detach_child(invalid_id)\n\n assert len(c_alpha_coords) == len(n_coords)\n assert len(c_alpha_coords) == len(c_coords)\n assert sum(valid_lengths) == len(c_alpha_coords)\n return rec, coords, c_alpha_coords, n_coords, c_coords,seq\n\n def len(self):\n return len(os.listdir(self.saved_graph_path))\n\n def get_statistic_info(self):\n node_num = torch.zeros(self.length_total)\n edge_num = torch.zeros(self.length_total)\n for i in tqdm(range(self.length_total)):\n graph = self.get(i)\n node_num[i] = graph.x.shape[0]\n edge_num[i] = graph.edge_index.shape[1]\n # if i == 1000:\n # break\n num_node_min = torch.min(node_num)\n num_node_max = torch.max(node_num)\n num_node_avg = torch.mean(node_num)\n num_edge_min = torch.min(edge_num)\n num_edge_max = torch.max(edge_num)\n num_edge_avg = torch.mean(edge_num)\n print(f'Graph Num: {self.length_total}')\n print(\n f'Min Nodes: {num_node_min:.2f} Max Nodes: {num_node_max:.2f}. Avg Nodes: {num_node_avg:.2f}')\n print(\n f'Min Edges: {num_edge_min:.2f} Max Edges: {num_edge_max:.2f}. Avg Edges: {num_edge_avg:.2f}')\n\n def get(self, idx):\n protein_name = self.protein_names[idx]\n data = torch.load(os.path.join(self.processed_dir, f'{protein_name}.pt'))\n notes_number = list((data.x[:, :20].argmax(dim=1)).size())[0]\n data.y = torch.argmax(data.x[torch.tensor(range(notes_number)), :self.num_residue_type], dim=1)\n data.protein_name = protein_name\n return data\n\n def find_idx(self, idx_protein, amino_idx):\n idx = (self.distances[idx_protein][:-1, amino_idx]\n < self.micro_radius).nonzero(as_tuple=True)[0]\n return idx\n\n def get_calpha_graph_single(self, graph, idx_protein, amino_idx):\n choosen_amino_idx = self.find_idx(idx_protein, amino_idx)\n keep_edge_index = []\n \n for edge_idx in range(graph.num_edges):\n edge = graph.edge_index.t()[edge_idx]\n if (edge[0] in choosen_amino_idx) and (edge[1] in choosen_amino_idx):\n keep_edge_index.append(edge_idx)\n \n graph1 = Data(\n x=graph.x[choosen_amino_idx, :],\n pos=graph.pos[choosen_amino_idx, :],\n edge_index=graph.edge_index[:, keep_edge_index],\n edge_attr=graph.edge_attr[keep_edge_index, :],\n mu_r_norm=graph.mu_r_norm[choosen_amino_idx, :]\n )\n return graph1\n\n def __repr__(self) -> str:\n return f'{self.__class__.__name__}{self.name.capitalize()}()'\n\n def distance_featurizer(self, dist_list, divisor) -> torch.Tensor:\n # you want to use a divisor that is close to 4/7 times the average distance that you want to encode\n length_scale_list = [1.5 ** x for x in range(15)]\n center_list = [0. for _ in range(15)]\n\n num_edge = len(dist_list)\n dist_list = np.array(dist_list)\n\n transformed_dist = [np.exp(- ((dist_list / divisor) ** 2) / float(length_scale))\n for length_scale, center in zip(length_scale_list, center_list)]\n\n transformed_dist = np.array(transformed_dist).T\n transformed_dist = transformed_dist.reshape((num_edge, -1))\n return torch.from_numpy(transformed_dist.astype(np.float32))" }, { "identifier": "NormalizeProtein", "path": "src/utils/dataset_utils.py", "snippet": "class NormalizeProtein(BaseTransform):\n r\"\"\"Centers and normalizes node positions to the interval :math:`(-1, 1)`\n (functional name: :obj:`normalize_scale`).\n \"\"\"\n\n def __init__(self, filename, skip_x=20, skip_edge_attr=64, safe_domi=1e-10):\n\n dic = torch.load(filename)\n self.skip_x = skip_x\n self.skip_edge_attr = skip_edge_attr\n self.safe_domi = safe_domi\n self.x_mean = dic['x_mean']\n self.x_std = dic['x_std']\n self.pos_mean = dic['pos_mean']\n self.pos_std = torch.mean(dic['pos_std'])\n self.edge_attr_mean = dic['edge_attr_mean']\n self.edge_attr_std = dic['edge_attr_std']\n\n def __call__(self, data):\n data.x[:, self.skip_x:] = (data.x[:, self.skip_x:] - self.x_mean[self.skip_x:]\n ).div_(self.x_std[self.skip_x:] + self.safe_domi)\n data.pos = data.pos - data.pos.mean(dim=-2, keepdim=False)\n data.pos = data.pos.div_(self.pos_std + self.safe_domi)\n data.edge_attr[:, self.skip_edge_attr:] = (data.edge_attr[:, self.skip_edge_attr:]\n - self.edge_attr_mean[self.skip_edge_attr:]).div_(self.edge_attr_std[self.skip_edge_attr:] + self.safe_domi)\n\n return data" } ]
import os, sys import argparse from src.dataset.cath_dataset import CathDataset from src.dataset.mutant_dataset import MutantDataset from src.utils.dataset_utils import NormalizeProtein
17,167
# set path current_dir = os.getcwd() sys.path.append(current_dir) def build_cath_dataset(args, split):
# set path current_dir = os.getcwd() sys.path.append(current_dir) def build_cath_dataset(args, split):
dataset = CathDataset(
0
2023-11-10 07:21:37+00:00
24k
atlantic-quantum/Shipyard
tests/printers/visualizer/test_visualize_pulse_sequences.py
[ { "identifier": "CoreType", "path": "shipyard/awg_core/awg_core.py", "snippet": "class CoreType(Enum):\n \"\"\"Enumeration of AWG Core types\"\"\"\n\n HD = \"HD\"\n QA = \"QA\"\n SG = \"SG\"" }, { "identifier": "ActivationRecord", "path": "shipyard/call_stack.py", "snippet": "class ActivationRecord:\n \"\"\"Activation Records for shipyard\"\"\"\n\n def __init__(\n self,\n name: str,\n ar_type: ARType,\n nesting_level: int,\n ):\n self.name = name\n self.type = ar_type\n self.nesting_level = nesting_level\n self.members = {}\n\n def __setitem__(self, key, value):\n self.members[key] = value\n LOGGER.debug(\"%s: %s\", key, value)\n\n def __getitem__(self, key):\n return self.members[key]\n\n def get(self, key, default=None):\n \"\"\"Gets a member of the activation record by key\"\"\"\n return self.members.get(key, default)\n\n def __str__(self):\n lines = [f\"{self.nesting_level}: {self.type.value} {self.name}\"]\n for name, val in self.members.items():\n lines.append(f\" {name:<20}: {val}\")\n\n return \"\\n\".join(lines)\n\n def __repr__(self):\n return self.__str__()" }, { "identifier": "ARType", "path": "shipyard/call_stack.py", "snippet": "class ARType(Enum):\n \"\"\"\n Enumeration of Acivation Record Types\n \"\"\"\n\n PROGRAM = \"PROGRAM\"\n EXTERN = \"EXTERN\"\n SUBROUTINE = \"SUBROUTINE\"\n CALIBRATION = \"CALIBRATION\"\n DEFCAL = \"DEFCAL\"\n GATE = \"GATE\"\n LOOP = \"LOOP\"" }, { "identifier": "Compiler", "path": "shipyard/compiler.py", "snippet": "class Compiler:\n version = \"0.1.1\"\n \"\"\"\n Compiler to compile openQASM programs to target programs for different AWG Cores.\n Currently supports compilation to ZI SEQC cores.\n\n Args:\n program_path (Path):\n Path object pointing to a qasm program file.\n setup (Setup | Path):\n Path object pointing to a experiment setup json file.\n frames_from_setup (bool, optional):\n If True, frame definitions and port declarations are generated from setup.\n If False, frame definitions and port declarations should be written\n explicitly in the qasm program.\n Defaults to False to preserve original behavior.\n \"\"\"\n\n def __init__(\n self,\n program_path: Path,\n setup: Setup | Path,\n frames_from_setup: bool = False,\n ) -> None:\n self.program_path = program_path\n self.program = CopyTransformer().visit_Program(self.load_program(program_path))\n setup = setup if isinstance(setup, Setup) else Setup.from_file(setup)\n if frames_from_setup:\n self._frames_from_setup(setup)\n self.setup = setup.to_internal()\n self.split_programs: dict[tuple[str, int, str], ast.Program] = {}\n self.split_compiled: dict[tuple[str, int, str], str] = {}\n self.core_settings: dict[tuple[str, int, str], list[tuple[str], Any]] = {}\n self.wfm_mapping: dict[tuple[str, int, str], dict[int, str]] = {}\n\n @staticmethod\n @lru_cache()\n def load_program(path: Path) -> ast.Program:\n \"\"\"\n Loads a qasm program as an AST from a file\n\n Args:\n path (Path): path to the qasm program file\n\n Returns:\n ast.Program: qasm program as an AST\n \"\"\"\n with open(path, encoding=\"utf_8\") as qasm_file:\n qasm_code = qasm_file.read()\n return parse(qasm_code)\n\n def compile(\n self,\n inputs: dict = None,\n printer_kwargs: dict = None,\n waveforms: dict[str, ndarray] | None = None,\n command_tables: dict[tuple[str, int, str], CommandTable] | None = None,\n ):\n \"\"\"\n Compile a single openQASM program into multiple programs for each\n AWG core in the setup\n\n Args:\n inputs (dict, optional):\n Dictionary of input values for the program. Defaults to None.\n Used to resolve input declarations in the program.\n printer_kwargs (dict, optional):\n Dictionary of keyword arguments to pass to the printer.\n See the printer documentation for more details.\n \"\"\"\n ResolveIODeclaration(inputs).visit(self.program)\n IncludeAnalyzer(self.program_path).visit(self.program)\n IncludeWaveforms(waveforms).visit(self.program)\n SemanticAnalyzer().visit(self.program)\n DurationTransformer().visit(self.program)\n TimingConstraints(self.setup, external_zi_function_dict()).visit(self.program)\n max_delay_obj = DetermineMaxDelay(\n self.program, self.setup, external_zi_function_dict()\n )\n extractor_obj = ShotsExtractor()\n extractor_obj.visit(self.program)\n signature = extractor_obj.create_signature()\n printer_kwargs = printer_kwargs or {}\n for instr, core_index, core_type in self.setup.cores():\n if command_tables:\n command_table = command_tables.get((instr, core_index, core_type))\n else:\n command_table = None\n ports = ports_for_core(self.setup, instr, core_index)\n split_program = CoreSplitter(ports).visit_Program(self.program)\n LOGGER.debug(\n \"Split Program before removing unused, core: (%s, %i, %s):\",\n instr,\n core_index,\n core_type,\n )\n LOGGER.debug(\"\\n%s\", LazyRepr(qasm_dumps, [split_program]))\n for repetition in [\"1st pass\", \"2nd pass\"]:\n RemoveUnused(split_program)\n LOGGER.debug(\n \"Split Program after removing unused (%s), core: (%s, %i, %s):\",\n repetition,\n instr,\n core_index,\n core_type,\n )\n LOGGER.debug(\"\\n%s\", LazyRepr(qasm_dumps, [split_program]))\n self.split_programs[(instr, core_index, core_type)] = split_program\n # todo dynamically choose printer based on instrument type\n InsertCTWaveforms(command_table).visit(split_program)\n printer = SEQCPrinter(\n io.StringIO(),\n self.setup,\n signature,\n max_delay_obj.result(),\n **printer_kwargs\n )\n printer.visit(split_program)\n compiled = printer.stream.getvalue()\n LOGGER.debug(\n \"Compiled Program, core: core: (%s, %i, %s):\",\n instr,\n core_index,\n core_type,\n )\n LOGGER.debug(\"\\n%s\", compiled)\n self.split_compiled[(instr, core_index, core_type)] = compiled\n self.core_settings[(instr, core_index, core_type)] = printer.settings()\n self.wfm_mapping[(instr, core_index, core_type)] = printer.wfm_mapping()\n\n @lru_cache()\n @staticmethod\n def cached_compile(\n program_path: Path,\n setup: Setup | Path,\n inputs: dict | None = None,\n printer_kwargs: dict | None = None,\n frames_from_setup: bool = False,\n ) -> \"Compiler\":\n \"\"\"Method to compile a program and cache the result.\n\n Args:\n program_path (Path):\n path to the qasm program file\n setup (Setup | Path):\n path to the laboratory setup file\n inputs (dict | None, optional):\n dictionary of input values for the program,\n used to resolve input declarations. Defaults to None.\n printer_kwargs (dict | None, optional):\n Dictionary of kwarg arguments to pass to the printer,\n see printer documentation for details. Defaults to None.\n frames_from_setup (bool, optional):\n If True, frame definitions and port declarations are generated from\n setup.\n If False, frame definitions and port declarations should be written\n explicitly in the qasm program.\n Defaults to False to preserve original behavior.\n\n Returns:\n Compiler: cached compiler object\n \"\"\"\n compiler = Compiler(program_path, setup, frames_from_setup)\n compiler.compile(inputs, printer_kwargs)\n return compiler\n\n def _frames_from_setup(self, setup: Setup) -> None:\n \"\"\"\n inserts a calibrationStatement after the defcalgrammar statement, the\n calibrationStatement created from the setup file\n\n Args:\n setup_path (Path): path to the setup file\n\n Raises:\n ValueError: if no calibration grammar is defined in the program\n ValueError: if the calibration grammar is not openpulse\n \"\"\"\n # make sure defcalgrammar has been define before inserting setup\n for i, statement in enumerate(self.program.statements):\n if isinstance(statement, ast.CalibrationGrammarDeclaration):\n break\n else:\n raise ValueError(\n \"No calibration grammar defined in program, cannot insert setup.\"\n )\n # make sure defcalgrammar is openpulse\n if statement.name != \"openpulse\":\n raise ValueError(\"calibration grammar be 'openpulse', \")\n # insert cal from setup after defcalgrammar statement\n self.program.statements.insert(i + 1, setup.get_qasm())" }, { "identifier": "Duration", "path": "shipyard/duration.py", "snippet": "class Duration(BaseModel):\n \"\"\"\n pydantic model for managing times/durations in openQASM programs\n\n Durations have both time and unit (ns, us, ms, s) (and dt which represents sample\n time at 2GS/s)\n\n Durations can be added to other Durations or numbers (int, float), they can also\n be compared to one another or to numbers (int, float)\n\n the native max/min python operations work with lists of Durations.\n\n The unit of a Duration can be changed using the 'set_unit' method.\n \"\"\"\n\n # todo consider rounding to nearest ps/fs to avoid floating point errors.\n time: float\n unit: TimeUnits = TimeUnits.dt\n\n def set_unit(self, unit: TimeUnits):\n \"\"\"\n Changes the unit of the Duration and updates the time to be represented in the\n new unit.\n\n Example:\n dur = Duration(time=100, unit=TimeUnits.ns)\n dur.set_unit(TimeUnits.us)\n\n # dur -> Duration(time=0.1, unit=TimeUnits.us)\n \"\"\"\n self.time = self.time * self.unit.value / unit.value\n self.unit = unit\n\n def _real_time(self) -> float:\n \"\"\"Calculates the time in seconds\n\n Returns:\n float: time in seconds\n \"\"\"\n return self.time * self.unit.value\n\n def __add__(self, other): # (self, other: Self) -> Self\n \"\"\"\n Adds Durations together or a number to a Duration\n\n Example (two Durations):\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n dur3 = dur1 + dur2 # dur3 -> Duration(time=101, unit=TimeUnits.ns)\n dur4 = dur2 + dur1 # dur3 -> Duration(time=0.101, unit=TimeUnits.us)\n\n Example (Duration and int or float):\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = dur1 + 10e-9 # dur2 -> Duration(time=11, unit.TimeUnits.ns)\n\n Args:\n other (Duration | int | float): the Duration or number to add to this\n duration\n\n Raises:\n ValueError: if 'other' is not a Durration, int or float\n\n Returns:\n Duration: sum of this Duration and other\n \"\"\"\n if isinstance(other, Duration):\n return Duration(\n time=self.time + other.time * other.unit.value / self.unit.value,\n unit=self.unit,\n )\n if isinstance(other, (int, float)):\n return Duration(time=self.time + other / self.unit.value, unit=self.unit)\n raise ValueError(f\"'+' not supported between {type(self)} and {type(other)}\")\n\n def __radd__(self, other):\n \"\"\"\n right addition, allows Durations to be added to numbers\n addition of Durations is complimentary\n\n Args:\n other (int | float): number Duration is being added to\n\n Returns:\n Duration: sum of this Duration and other\n \"\"\"\n return self.__add__(other)\n\n def __str__(self) -> str:\n \"\"\"\n Formats how Durations are printed\n Example:\n dur = Duration(time=16, unit=TimeUnits.ns)\n print(dur) -> '16 ns'\n\n Returns:\n str: formated string representation of Duration\n \"\"\"\n return f\"{self.time} {self.unit.name}\"\n\n def __lt__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is lower than another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 < dur2 -> True\n dur < 2 -> False\n dur < 0.1 -> False\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is lower than other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() < other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() < other\n raise ValueError(f\"'<' not supported between {type(self)} and {type(other)}\")\n\n def __gt__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is greater than another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 > dur2 -> False\n dur > 2 -> False\n dur > 0.1e-9 -> True\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is greater than other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() > other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() > other\n raise ValueError(f\"'>' not supported between {type(self)} and {type(other)}\")\n\n def __eq__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is equal to another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 == dur2 -> False\n dur1 == dur1 -> True\n dur == 1e-9 -> True\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is equal to other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() == other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() == other\n raise ValueError(f\"'==' not supported between {type(self)} and {type(other)}\")\n\n def __ne__(self, other) -> bool: # (self, other: Self) -> bool:\n \"\"\"\n Compares if this Duration is not equal to another Duration, int or Float\n\n Example:\n dur1 = Duration(time=1, unit=TimeUnits.ns)\n dur2 = Duration(time=0.1, unit=TimeUnits.us)\n\n dur1 != dur2 -> True\n dur1 != dur1 -> False\n dur != 1e-9 -> False\n\n Args:\n other (Duration | int | float): to compare to\n\n Raises:\n ValueError: if other is not a Duration, int or float\n\n Returns:\n bool:\n True if _real_time() value of this duration is equal to other,\n else False.\n \"\"\"\n if isinstance(other, Duration):\n return self._real_time() != other._real_time()\n if isinstance(other, (int, float)):\n return self._real_time() != other\n raise ValueError(f\"'!=' not supported between {type(self)} and {type(other)}\")" }, { "identifier": "TimeUnits", "path": "shipyard/duration.py", "snippet": "class TimeUnits(Enum):\n \"\"\"\n Enumerations of common time units\n ns, µs, us, ms, s\n\n and\n\n dt = 0.5e-9 <- timestep @ 2GS/s\n \"\"\"\n\n dt = 0.5e-9\n ns = 1e-9\n µs = 1e-6\n us = 1e-6\n ms = 1e-3\n s = 1" }, { "identifier": "DurationTransformer", "path": "shipyard/passes/duration_transformer.py", "snippet": "class DurationTransformer(GenericTransformer):\n \"\"\"\n QASM Transformer that transforms DurationLiterals to have units of samples (dt).\n\n Args:\n sample_rate (int):\n the sample rate that DurationLiterals will be transformed to.\n Default value = 2e9\n \"\"\"\n\n def __init__(self, sample_rate: int = 2e9) -> None:\n self.sample_rate = sample_rate\n super().__init__()\n\n # pylint: disable=C0103\n # snake_case naming style\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral) -> ast.DurationLiteral:\n \"\"\"\n DurationLiteral node Transformer. Transforms DurationLiteral nodes from any\n unit to a node with sample units (dt).\n\n Example:\n in: node = ast.DurationLiteral(value=20, unit=ast.TimeUnit.ns)\n\n usage: DurationTransformer().visit(node)\n\n out: ast.DurationLiteral(value=40, unit=ast.TimeUnit.dt)\n\n\n Args:\n node (ast.DurationLiteral):\n DurationLiteral node to transform.\n\n Returns:\n ast.DurationLiteral:\n Tranformed DurationLiteral node with unit set to samples (dt)\n \"\"\"\n if node.unit.name != \"dt\":\n new_node = ast.DurationLiteral(\n value=int(\n round(\n node.value\n * TimeUnitToValue[node.unit.name].value\n * self.sample_rate\n )\n ),\n unit=ast.TimeUnit.dt,\n )\n return new_node\n return node\n\n # pylint: enable=C0103" }, { "identifier": "ResolveIODeclaration", "path": "shipyard/passes/resolve_io_declaration.py", "snippet": "class ResolveIODeclaration(GenericTransformer):\n def __init__(self, inputs: dict = None):\n self.inputs = inputs or {} # e.g. inputs = {\"basis\": 1}\n\n def visit_IODeclaration(self, node: ast.IODeclaration) -> ast.ConstantDeclaration:\n \"\"\"\n IODeclaration node Transformer. Transforms IODeclaration nodes to\n ConstantDeclarations. Searches through ResolveIODeclaration.inputs\n for info to populate the ConstantDeclaration.\n\n Args:\n node (ast.IODeclaration):\n IODeclaration node to transform.\n\n Returns:\n ast.ConstantDeclaration:\n Tranformed ConstantDeclaration node with relevant data (identifier and\n init_expression)\n \"\"\"\n if node.io_identifier == ast.IOKeyword.input:\n if node.identifier.name not in self.inputs:\n raise SetupError(\n ErrorCode.ID_NOT_FOUND,\n message=f\"Input: {node.identifier.name} not found in input\"\n \" dictionary\",\n )\n match node.type:\n case ast.IntType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.IntegerLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case ast.DurationType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.DurationLiteral(\n value=(self.inputs[node.identifier.name] * 1e9),\n unit=ast.TimeUnit.ns,\n ),\n )\n # todo: AQC-311 add support for complex input type\n # case ast.ComplexType():\n # return ast.ConstantDeclaration(\n # type=node.type,\n # identifier=node.identifier,\n # init_expression=ast.BinaryExpression(\n # op= ast.BinaryOperator['+'],\n # lhs=ast.FloatLiteral(\n # value= self.inputs[node.identifier.name].real),\n # rhs=ast.ImaginaryLiteral(\n # value= self.inputs[node.identifier.name].imag))\n # )\n case ast.FloatType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.FloatLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case ast.BoolType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.BooleanLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case ast.BitType():\n if isinstance(self.inputs[node.identifier.name], list):\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.ArrayLiteral(\n values=[\n ast.IntegerLiteral(value=s)\n for s in self.inputs[node.identifier.name]\n ]\n ),\n )\n elif isinstance(self.inputs[node.identifier.name], int):\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.IntegerLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n else:\n raise SemanticError(\n ErrorCode.INPUT_TYPE_NOT_SUPPORTED,\n message=f\"Input type not supported: {node.type}\",\n )\n case ast.UintType():\n return ast.ConstantDeclaration(\n type=node.type,\n identifier=node.identifier,\n init_expression=ast.IntegerLiteral(\n value=self.inputs[node.identifier.name]\n ),\n )\n case _:\n raise SemanticError(\n ErrorCode.INPUT_TYPE_NOT_SUPPORTED,\n message=f\"Input type not supported: {node.type}\",\n )\n # case ast.ArrayType():\n # return ast.ConstantDeclaration(\n # type=node.type,\n # identifier=node.identifier,\n # init_expression=ast.ArrayLiteral(\n # values = [ast.IntegerLiteral(value=s)\n # for s in self.inputs[node.identifier.name]]),\n # )\n\n # todo: AQC-312 add support for angle input type\n # case ast.AngleType():\n # # return ast.ConstantDeclaration(\n # # type=node.type,\n # # identifier=node.identifier,\n # # init_expression=ast.FloatLiteral(\n # # value = self.inputs[node.identifier.name]),\n # # )\n # todo: AQC-310 add support for stretch input type\n # case ast.StretchType():\n else:\n raise SemanticError(\n ErrorCode.OUTPUT_NOT_SUPPORTED,\n message=f\"Output type not supported: {node}\",\n )" }, { "identifier": "SemanticAnalyzer", "path": "shipyard/passes/semantic_analysis/semantic_analyzer.py", "snippet": "class SemanticAnalyzer(TypeVisitor, LiteralVisitor, GenericVisitor):\n \"\"\"\n QASMVisitor class that peforms semantic analysis on a openQASM Abstract Syntax Tree\n\n usage:\n qasm_ast = openpulse.parse(qasm_program_string)\n sa = SemanticAnalyser()\n sa.visit(qasm_ast)\n \"\"\"\n\n def __init__(self) -> None:\n self.current_scope: ScopedSymbolTable = None\n self._calibration_scope: CalScopedSymbolTable = None\n self._scope_context: ScopeContext = None\n super().__init__()\n\n @property\n def calibration_scope(self) -> CalScopedSymbolTable:\n \"\"\"Getter for the 'calibration_scope' symbol table of a SemanticAnalyser\n instance. Creates and returns an initialised calibration scope on first call.\n Subsequent calls return the same scope.\n\n Returns:\n CalScopedSymbolTable: a scoped symbol table used for symbols declared within\n openpulse syntax (cal & defcal)\n \"\"\"\n if self._calibration_scope is None:\n self.ensure_in_global_scope(ast.Identifier(\"init cal scope\"))\n self._calibration_scope = CalScopedSymbolTable(\n \"cal_scope\", enclosing_scope=self.current_scope, init_cal=True\n )\n return self._calibration_scope\n\n @property\n def scope_context(self) -> ScopeContext:\n \"\"\"Getter for the 'scope_context' property of a SemanticAnalyser instance\"\"\"\n return self._scope_context\n\n @scope_context.setter\n def scope_context(self, value: ScopeContext):\n LOGGER.debug(\"SET SCOPE CONTEXT: %s\", value)\n self._scope_context = value\n\n # pylint: disable=C0103\n # disable snake_case naming style\n # these functions are of the form \"visit_{QASMNode class name}\"\n def visit_Program(self, node: ast.Program) -> None:\n \"\"\"\n Program node visitor,\n creates and enters a global symbol table (global scope),\n visits all other statements in the openQASM program.\n\n Args:\n node (ast.Program):\n openQASM program ast node to visit\n \"\"\"\n global_scope = ScopedSymbolTable(\n scope_name=\"global\",\n enclosing_scope=self.current_scope,\n )\n with self.scope_context_manager(global_scope, ScopeContext.GLOBAL):\n for statement in node.statements:\n self.visit(statement)\n\n def visit_ExternDeclaration(self, node: ast.ExternDeclaration) -> None:\n \"\"\"\n ExternDeclaration node visitor,\n inserts a symbol representing the external function declaration\n into current_scope (symbol table)\n\n Args:\n node (ast.ExternDeclaration):\n openQASM external function declaration ast node to visit\n \"\"\"\n extern_name = node.name.name\n params = [\n ClassicalSymbol(\n name=f\"{extern_name}_arg_{i}\", kind=self.visit(argument.type)\n )\n for i, argument in enumerate(node.arguments)\n ]\n return_type = self.visit(node.return_type) if node.return_type else None\n extern_symbol = ExternSymbol(\n name=extern_name, params=params, return_type=return_type\n )\n self.declare_symbol(extern_symbol)\n\n def visit_SubroutineDefinition(self, node: ast.SubroutineDefinition) -> None:\n \"\"\"\n SubroutineDefinition node visitor, subroutines may only be defined in global\n scope.\n inserts a symbol representing the subroutine definition into current_scope,\n creates and enters a symbol table (local scope) to encapsulate\n the subroutie,\n inserts all the parameters of the subroutine function signature into the\n new symbol table,\n visits all statements within the subroutine.\n\n Args:\n node (ast.SubroutineDefinition):\n openQASM subroutine definition ast node to visit\n \"\"\"\n self.ensure_in_global_scope(node.name)\n return_type = self.visit(node.return_type) if node.return_type else None\n subroutine_symbol = SubroutineSymbol(\n name=node.name.name, return_type=return_type\n )\n\n self.declare_symbol(subroutine_symbol)\n\n subroutine_scope = ScopedSymbolTable(\n scope_name=node.name.name,\n enclosing_scope=self.current_scope,\n )\n\n with self.scope_context_manager(subroutine_scope, ScopeContext.SUBROUTINE):\n for argument in node.arguments:\n arg_symbol = self.visit(argument)\n subroutine_symbol.params.append(arg_symbol)\n\n for statement in node.body:\n self.visit(statement)\n\n def visit_QuantumGateDefinition(self, node: ast.QuantumGateDefinition) -> None:\n \"\"\"\n QuantumGateDefinition node visitor, quantum gates may only be defined in global\n scope.\n inserts a symbol representing the gate definition into current_scope,\n creates and enters a symbol table (local scope) to encapsulate\n the gate,\n inserts all the parameters and qubits of the gate function signature\n into the new symbol table,\n visits all statements within the gate definition.\n\n Args:\n node (ast.QuantumGateDefinition):\n openQASM quantum gate definition ast node to visit\n \"\"\"\n self.ensure_in_global_scope(node.name)\n gate_symbol = GateSymbol(name=node.name.name)\n\n self.declare_symbol(gate_symbol)\n\n gate_scope = ScopedSymbolTable(\n scope_name=gate_symbol.name,\n enclosing_scope=self.current_scope,\n )\n\n with self.scope_context_manager(gate_scope, ScopeContext.SUBROUTINE):\n for argument in node.arguments:\n arg_symbol = Symbol(name=argument.name)\n self.declare_symbol(arg_symbol)\n gate_symbol.params.append(arg_symbol)\n\n for qubit in node.qubits:\n qubit_symbol = QuantumSymbol(name=qubit.name, kind=\"QUBIT\")\n self.declare_symbol(qubit_symbol)\n gate_symbol.qubits.append(qubit_symbol)\n\n for statement in node.body:\n self.visit(statement)\n\n def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration) -> None:\n \"\"\"\n ClassicalDeclaration node visitor\n inserts a symbol representing the classical variable into current_scope\n\n Note:\n Arrays cannot be declared inside the body of a function or gate.\n All arrays must be declared within the global scope of the program.\n https://openqasm.com/language/types.html#arrays\n\n Args:\n node (ast.ClassicalDeclaration):\n openQASM classical declaration ast node to visit\n \"\"\"\n if isinstance(node.type, ast.ArrayType):\n self.ensure_in_global_scope(node.identifier)\n type_symbol = self.visit(node.type)\n LOGGER.debug(\n \"Classical Declaration: name: %s, kind: %s\",\n node.identifier.name,\n type_symbol,\n )\n decl_symbol = ClassicalSymbol(name=node.identifier.name, kind=type_symbol)\n self.declare_symbol(decl_symbol)\n\n def visit_ConstantDeclaration(self, node: ast.ConstantDeclaration) -> None:\n \"\"\"\n ConstantDeclaration node visitor\n inserts a symbol representing the constant into current_scope\n\n Args:\n node (ast.ConstantDeclaration):\n openQASM constant declaration ast node to visit\n \"\"\"\n type_symbol = self.visit(node.type)\n decl_symbol = ConstantSymbol(name=node.identifier.name, kind=type_symbol)\n self.declare_symbol(decl_symbol)\n\n def visit_QubitDeclaration(self, node: ast.QubitDeclaration) -> None:\n \"\"\"\n QubitDeclaration node visitor\n inserts a symbol representing the qubit into current_scope\n\n Note:\n All qubits are global variables.\n Qubits cannot be declared within gates or subroutines.\n https://openqasm.com/language/types.html#quantum-types\n\n Args:\n node (ast.QubitDeclaration):\n openQASM qubit declaration ast node to visit\n \"\"\"\n # qubits can only be declared in global scope\n self.ensure_in_global_scope(node.qubit)\n decl_symbol = QuantumSymbol(name=node.qubit.name, kind=\"QUBIT\")\n self.declare_symbol(decl_symbol)\n\n def visit_IODeclaration(self, node: ast.IODeclaration) -> None:\n \"\"\"\n ToDo: may require more / different handling when we start using this\n\n IODeclaration node visitor\n inserts a symbol representing the io into current_scope\n\n input/output modifiers can be used to indicate that variables will be\n supplied to / generated by an openQASM program at runtime\n\n https://openqasm.com/language/directives.html#input-output\n\n Args:\n node (ast.IODeclaration):\n openQASM io declaration ast node to visit\n \"\"\"\n type_symbol = self.visit(node.type)\n decl_symbol = IOSymbol(name=node.identifier.name, kind=type_symbol)\n self.declare_symbol(decl_symbol)\n\n def visit_Identifier(self, node: ast.Identifier):\n \"\"\"\n Identifier node visitor:\n Looks up the name of the identifer within current and enclosing scope,\n raises an ID_NOT_FOUND error if the identifier hasn't been declared\n\n Args:\n node (ast.Identifier):\n openQASM identifier node to visit\n\n Raises:\n SemanticError with ErrorCode.ID_NOT_FOUND\n \"\"\"\n node_symbol = self.current_scope.lookup(node.name)\n if node.name[0] == \"$\":\n pass\n elif node_symbol is None:\n raise self.error(ErrorCode.ID_NOT_FOUND, node.name)\n\n def visit_AliasStatement(self, node: ast.AliasStatement) -> None:\n \"\"\"\n AliastStatement node visitor:\n Creates and declares a symbol for an Alias.\n Then visits the value the alias is assigned\n\n Args:\n node (ast.AliasStatement):\n openQASM alias statment to visit\n \"\"\"\n alias_symbol = AliasSymbol(name=node.target.name)\n self.declare_symbol(alias_symbol)\n self.visit(node.value)\n\n def visit_CalibrationStatement(self, node: ast.CalibrationStatement) -> None:\n \"\"\"\n CalibrationStatement node visitor, (cal {} statements):\n Enters calibration scope and visits all statements in the body of the\n calibration statement.\n\n Args:\n node (ast.CalibrationStatement):\n openQASM calibration statement node to visit\n \"\"\"\n self.ensure_in_global_scope(ast.Identifier(\"Calibration Statement\"))\n with self.scope_context_manager(self.calibration_scope, ScopeContext.DEFCAL):\n for statement in node.body:\n self.visit(statement)\n\n def visit_CalibrationDefinition(self, node: ast.CalibrationDefinition) -> None:\n \"\"\"\n CalibrationDefinition node visitor, (defcal {} statements):\n Gets a mangles name for the calibration definition and uses it\n to create a symbol representing the defcal statement.\n Inserts a symbol representing the defcal statement into calibration scope.\n Creates a new CalScopedSymbolTable and enters it.\n Inserts symbols for all parameters and qubits into the new scope.\n Visits all statements withing the body of the defcal statement\n\n Args:\n node (ast.CalibrationDefinition):\n openQASM calibration definition node to visit\n \"\"\"\n self.ensure_in_global_scope(node.name)\n defcal_name = Mangler(node).signature().mangle()\n return_type = self.visit(node.return_type) if node.return_type else None\n defcal_symbol = DefcalSymbol(name=defcal_name, return_type=return_type)\n with self.scope_context_manager(\n self.calibration_scope, context=ScopeContext.DEFCAL\n ):\n self.declare_symbol(defcal_symbol)\n\n defcal_scope = CalScopedSymbolTable(\n scope_name=defcal_symbol.name,\n enclosing_scope=self.calibration_scope,\n )\n\n with self.scope_context_manager(defcal_scope, ScopeContext.DEFCAL):\n for argument in node.arguments:\n arg_symbol = self.visit(argument)\n defcal_symbol.params.append(arg_symbol)\n\n for qubit in node.qubits:\n qubit_symbol = QuantumSymbol(\n name=qubit.name, kind=self.current_scope.lookup(\"QUBIT\").name\n )\n self.declare_symbol(qubit_symbol)\n defcal_symbol.qubits.append(qubit_symbol)\n\n for statement in node.body:\n self.visit(statement)\n\n def visit_QuantumGate(self, node: ast.QuantumGate) -> None:\n \"\"\"\n QuantumGate node visitor, (gate call):\n Gets the mangled name best matching the gate call.\n Looks up the mangled name of the gate within the calibration scope.\n Raises an ID_NOT_FOUND error if the gate hasn't been declared.\n\n Args:\n node (ast.QuantumGate):\n openQASM qauntum gate node to visit\n\n Raises:\n SemanticError with ErrorCode.ID_NOT_FOUND\n \"\"\"\n f_signature = Mangler(node).signature()\n symbols = f_signature.match(self.current_scope.keys())\n if not symbols:\n symbols = f_signature.match(self.calibration_scope.keys())\n if symbols:\n # per https://github.com/openqasm/openqasm/issues/245\n return symbols[-1]\n raise self.error(ErrorCode.ID_NOT_FOUND, node.name)\n\n def visit_ClassicalArgument(self, node: ast.ClassicalArgument) -> ClassicalSymbol:\n \"\"\"\n ClassicalArgument node visitor:\n Creates and inserts a ClassicalSymbol for function arguments (def, defcal)\n into current scope\n\n Args:\n node (ast.ClassicalArgument):\n openQASM classical argument node to visit\n\n Returns:\n ClassicalSymbol: the symbol inserted in to current scope\n \"\"\"\n arg_symbol = ClassicalSymbol(name=node.name.name, kind=self.visit(node.type))\n self.declare_symbol(arg_symbol)\n return arg_symbol\n\n def visit_QuantumArgument(self, node: ast.QuantumArgument) -> QuantumSymbol:\n \"\"\"\n QuantumArgument node visitor:\n Creates and inserts a QuantumSymbol for function arguments (def, defcal)\n into current scope\n\n Args:\n node (ast.QuantumArgument):\n openQASM quantum argument node to visit\n\n Returns:\n QuantumSymbol: the symbol inserted in to current scope\n \"\"\"\n arg_symbol = QuantumSymbol(name=node.name.name, kind=\"QUBIT\")\n self.declare_symbol(arg_symbol)\n return arg_symbol\n\n def visit_ForInLoop(self, node: ast.ForInLoop) -> None:\n \"\"\"\n ForInLoop node visitor:\n Visits the set declaration (what will be looped over)\n Enters a new scope.\n Inserts a symbol representing the loop variable into the new scope\n Visits every statement in the block of the ForInLoop\n\n Args:\n node (ast.ForInLoop):\n openQASM for in loop node to visit\n \"\"\"\n type_symbol = self.visit(node.type)\n loop_symbol = ClassicalSymbol(name=node.identifier.name, kind=type_symbol)\n self.visit(node.set_declaration)\n with self.local_context_manager(\"for_loop_scope\", node.block):\n self.current_scope.insert(loop_symbol)\n\n def visit_BranchingStatement(self, node: ast.BranchingStatement) -> None:\n \"\"\"\n BranchingStatement node visitor (if/else):\n visits the condition node of the if/else statement\n Enters a new scope for the if block and visits every statment within it.\n Leaves the if block scope\n Enters a new scope for the else block and visits every statment within it.\n\n Args:\n node (ast.BranchingStatement):\n openQASM branching (if/else) node to visit\n \"\"\"\n self.visit(node.condition)\n with self.local_context_manager(\"if_scope\", node.if_block):\n pass\n with self.local_context_manager(\"else_scope\", node.else_block):\n pass\n\n def visit_WhileLoop(self, node: ast.WhileLoop) -> None:\n \"\"\"\n WhileLoop node visitor:\n visits the condition node of the while statement\n Enters a new scope for the while block and visits every statment within it.\n\n Args:\n node (ast.WhileLoop):\n openQASM while node to visit\n \"\"\"\n self.visit(node.while_condition)\n with self.local_context_manager(\"while_scope\", node.block):\n pass\n\n def visit_Box(self, node: ast.Box) -> None:\n \"\"\"\n Box node visitor:\n visits the duration node of the Box statement\n Enters a new scope for the Box block and visits every statment within it.\n\n Args:\n node (ast.Box):\n openQASM Box node to visit\n \"\"\"\n if node.duration:\n self.visit(node.duration)\n with self.local_context_manager(\"box_scope\", node.body):\n pass\n\n def visit_UnaryExpression(self, node: ast.UnaryExpression):\n \"\"\"\n UnaryExpression node visitor:\n validates the operator of the unary expression node\n visits the expression of the unary expression node\n\n Args:\n node (ast.UnaryExpression):\n openQASM unary expression node to visit\n \"\"\"\n # todo check if unary op is allowed for expression\n assert isinstance(node.op, type(ast.UnaryOperator[\"!\"]))\n self.visit(node.expression)\n\n def visit_BinaryExpression(self, node: ast.BinaryExpression):\n \"\"\"\n BinaryExpression node visitor:\n validates the operator of the binary expression node\n visits each side of the binary expression\n\n Args:\n node (ast.BinaryExpression):\n openQASM binary expression node to visit\n \"\"\"\n # todo check if binary op is allowed between lhs and rhs\n assert isinstance(node.op, type(ast.BinaryOperator[\"+\"]))\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_FunctionCall(self, node: ast.FunctionCall):\n \"\"\"\n FunctionCall node visitor:\n visits the name (Identifier) node of the function call\n visits all the argument nodes of the function call\n\n Args:\n node (ast.FunctionCall):\n openQASM function call node to visit\n \"\"\"\n self.visit(node.name)\n for argument in node.arguments:\n self.visit(argument)\n\n def visit_Cast(self, node: ast.Cast):\n \"\"\"\n Cast node visitor:\n validates that the type being cast to is a classical type\n # todo should be more narrow, e.g. durration can't be cast to\n visits the argument node of the cast node\n\n Args:\n node (ast.Cast):\n openQASM cast node to visit\n \"\"\"\n assert isinstance(node.type, ast.ClassicalType)\n self.visit(node.argument)\n\n def visit_IndexExpression(self, node: ast.IndexExpression):\n \"\"\"\n IndexExpression node visitor:\n visits collection node of an index expression node\n visits index node of an index expression node\n\n Args:\n node (ast.IndexExpression):\n openQASM index expression node to visit\n \"\"\"\n self.visit(node.collection)\n if isinstance(node.index, list):\n for i_node in node.index:\n self.visit(i_node)\n else:\n self.visit(node.index)\n\n def visit_DiscreteSet(self, node: ast.DiscreteSet):\n \"\"\"\n DiscreteSet node visitor:\n visits each node of a DiscreteSet\n\n Args:\n node (ast.DiscreteSet):\n openQASM discreate set node to visit\n \"\"\"\n for expression in node.values:\n self.visit(expression)\n\n def visit_RangeDefinition(self, node: ast.RangeDefinition):\n \"\"\"\n RangeDefinition node visitor:\n visits start, end and step nodes of a RangeDefinition\n\n Args:\n node (ast.RangeDefinition):\n openQASM range definition node to visit\n \"\"\"\n if node.start:\n self.visit(node.start)\n if node.end:\n self.visit(node.end)\n if node.step:\n self.visit(node.step)\n\n def visit_Concatenation(self, node: ast.Concatenation):\n \"\"\"\n Concatenation node visitor:\n visits each side of the concatenation expression\n\n Args:\n node (ast.Concatenation):\n openQASM concatenation node to visit\n \"\"\"\n self.visit(node.lhs)\n self.visit(node.rhs)\n\n def visit_BitstringLiteral(self, node: ast.BitstringLiteral) -> LiteralSymbol:\n \"\"\"\n BitstringLiteral node visitor:\n\n Args:\n node (ast.BitstringLiteral):\n openQASM bitstring literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_BitstringLiteral(node)\n return LiteralSymbol(name=value, kind=\"BITSTRING\")\n\n def visit_IntegerLiteral(self, node: ast.IntegerLiteral) -> LiteralSymbol:\n \"\"\"\n IntegerLiteral node visitor:\n\n Args:\n node (ast.IntegerLiteral):\n openQASM integer literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_IntegerLiteral(node)\n return LiteralSymbol(name=value, kind=\"INT\")\n\n def visit_FloatLiteral(self, node: ast.FloatLiteral) -> LiteralSymbol:\n \"\"\"\n FloatLiteral node visitor:\n\n Args:\n node (ast.FloatLiteral):\n openQASM float literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_FloatLiteral(node)\n return LiteralSymbol(name=value, kind=\"FLOAT\")\n\n def visit_ImaginaryLiteral(self, node: ast.ImaginaryLiteral) -> LiteralSymbol:\n \"\"\"\n ImaginaryLiteral node visitor:\n\n Args:\n node (ast.ImaginaryLiteral):\n openQASM imaginary literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_ImaginaryLiteral(node)\n return LiteralSymbol(name=value, kind=\"IMAGINARY\")\n\n def visit_BooleanLiteral(self, node: ast.BooleanLiteral) -> LiteralSymbol:\n \"\"\"\n BooleanLiteral node visitor:\n\n Args:\n node (ast.BooleanLiteral):\n openQASM boolean literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_BooleanLiteral(node)\n return LiteralSymbol(name=value, kind=\"BOOL\")\n\n def visit_DurationLiteral(self, node: ast.DurationLiteral) -> LiteralSymbol:\n \"\"\"\n DurationLiteral node visitor:\n\n Args:\n node (ast.DurationLiteral):\n openQASM duration literal node to visit\n\n Returns:\n LiteralSymbol: symbol representation of the node value\n \"\"\"\n value = super().visit_DurationLiteral(node)\n return LiteralSymbol(name=value, kind=\"DURATION\")\n\n # pylint: disable=C0103\n # (snake_case naming style)\n\n def _visit_type_node(self, node: ast.ClassicalType) -> str:\n \"\"\"\n type node visitor:\n Returns the name of a Type node\n Example:\n node:ast.FloatType -> 'FLOAT'\n\n Args:\n node (ast.ClassicalType): node that is a subclass of ClassicalType\n\n Returns:\n str: name of the node type\n \"\"\"\n name = super()._visit_type_node(node)\n name_in_table = self.current_scope.lookup(name).name\n return name_in_table\n\n def error(self, error_code: ErrorCode, name: str) -> SemanticError:\n \"\"\"\n Method for standardizing error handling of the SemanticAnalyser class.\n Logs current scope and returns a SemanticError object that should be raised\n immediately after this method retuns\n\n Usage:\n raise self.error(...)\n\n Args:\n error_code (ErrorCode):\n Code to identify what issue caused an error to be raised\n name (str):\n An identifer string to identify what caused the error\n\n Returns:\n SemanticError: should be raised immediatly on method return\n \"\"\"\n LOGGER.debug(\"CURRENT SCOPE: %s\", self.current_scope)\n LOGGER.debug(\"CALIBRATION SCOPE: %s\", self._calibration_scope)\n return SemanticError(error_code, message=f\"{error_code.value} -> {name}\")\n\n def declare_symbol(self, symbol: Symbol):\n \"\"\"Method for standardizing symbol declaration.\n Symbols are first looked up (in current scope only)\n before being inserted into current scope (if not already in scope)\n\n Args:\n symbol (Symbol): to insert into current scope\n\n Raises:\n SemanticError: ErrorCode.DUBLICATE_ID\n \"\"\"\n if self.current_scope.lookup(symbol.name, current_scope_only=True):\n raise self.error(ErrorCode.DUPLICATE_ID, symbol.name)\n self.current_scope.insert(symbol)\n\n def ensure_in_global_scope(self, node: ast.Identifier):\n \"\"\"\n Ensures that the current scope_context is global scope\n Used to make sure that declarations such as Subroutines and defcals\n Are only used in the allowed scope (GLOBAL)\n\n Args:\n node (ast.Identifier): Node that is currently being visited\n\n Raises:\n SemanticError: ErrorCode.NOT_IN_GLOBAL_SCOPE\n \"\"\"\n if not self.scope_context == ScopeContext.GLOBAL:\n raise self.error(ErrorCode.NOT_IN_GLOBAL_SCOPE, node.name)\n\n @contextmanager\n def scope_context_manager(\n self,\n symbol_table: ScopedSymbolTable,\n context: ScopeContext,\n ):\n \"\"\"\n Context manager for entering/leaving scopes in specific ScopeContext\n\n Args:\n symbol_table (ScopedSymbolTable): Symbol Table / Scope to enter\n context (ScopeContext): what context the scope is entered in\n \"\"\"\n enclosing_scope = self.current_scope\n enclosing_context = self.scope_context\n self.current_scope = symbol_table\n self.scope_context = context\n try:\n yield\n finally:\n if enclosing_context:\n self.scope_context = enclosing_context\n if enclosing_scope:\n self.current_scope = enclosing_scope\n LOGGER.debug(symbol_table)\n LOGGER.debug(\"LEAVE scope: %s\", symbol_table.scope_name)\n\n @contextmanager\n def local_context_manager(self, name: str, block: list[ast.Statement]):\n \"\"\"\n Context manager for entering/leaving local scopes (if/else, for, while, box)\n What ScopeContext is entered depends on the current ScopeContext.\n If in GLOBAL then enter LOCAL\n Else (LOCAL, SUBROUTINE, DEFCAL) then keep context unchanged.\n Once in the new scope nodes in the block of the scope will be visited in order\n\n Args:\n name (str):\n Name of the ScopedSymbolTable to enter\n block (list[ast.Statement]):\n list of openQASM statments nodes, visited in order\n \"\"\"\n scope = ScopedSymbolTable(name, enclosing_scope=self.current_scope)\n context = (\n ScopeContext.LOCAL\n if self.scope_context == ScopeContext.GLOBAL\n else self.scope_context\n )\n\n with self.scope_context_manager(scope, context):\n yield\n for statement in block:\n self.visit(statement)" }, { "identifier": "PulseVisualizer", "path": "shipyard/printers/visualizer/visualize_pulse_sequence.py", "snippet": "class PulseVisualizer(Interpreter):\n def __init__(\n self,\n setup: SetupInternal = None,\n external_functions: dict = None,\n ):\n super().__init__(setup, external_functions)\n self.pulses = {} # dict of pulses for each frame/ port\n self.phases = {} # dict of phases for each frame/ port\n self.frequencies = {} # dict of frequencies for each frame/ port\n self.plot_flag: bool = False\n\n def visit_Program(self, node: ast.Program) -> None:\n activation_record = ActivationRecord(\n name=\"main\", ar_type=ARType.PROGRAM, nesting_level=1\n )\n with self.ar_context_manager(activation_record):\n for statement in node.statements:\n self.visit(statement)\n for frame in self.pulses.keys():\n self.plotter(\n np.concatenate(self.pulses[frame]),\n np.concatenate(self.phases[frame]),\n np.concatenate(self.frequencies[frame]),\n frame,\n )\n\n def plotter(self, wfm_array, phase_array, frequency_array, frame_name):\n fig, axs = plt.subplots(3)\n if all(isinstance(i, complex) for i in wfm_array):\n axs[0].plot([value.real for value in wfm_array], label=\"real\")\n axs[0].plot([value.imag for value in wfm_array], label=\"imag\")\n axs[0].legend()\n else:\n axs[0].plot(wfm_array)\n axs[0].set(ylabel=f\"{frame_name} amplitude\")\n axs[1].plot(phase_array)\n axs[1].set(ylabel=f\"{frame_name} phase\")\n axs[2].plot(frequency_array)\n axs[2].set(ylabel=f\"{frame_name} frequency\")\n if self.plot_flag: # pragma: no cover\n plt.show()\n\n @_maybe_annotated\n def visit_ClassicalDeclaration(self, node: ast.ClassicalDeclaration) -> None:\n \"\"\"\n ClassicalDeclaration node visitor:\n Visits and stores classical declarations of variables. If the variable\n declared is a frame, the frame is added to the current activation record,\n as well as the Interpreter's pulse, phase, and frequency dictionaries.\n\n Args:\n node (ast.ClassicalDeclaration): openQASM ClassicalDeclaration AST node\n\n \"\"\"\n activation_record = self.call_stack.peek()\n match node:\n case ast.ClassicalDeclaration(type=ast.PortType()):\n name = node.identifier.name\n activation_record[name] = self.setup.ports[name]\n case ast.ClassicalDeclaration(\n type=ast.FrameType(),\n init_expression=ast.FunctionCall(name=ast.Identifier(\"newframe\")),\n ):\n call = node.init_expression\n assert isinstance(call, ast.FunctionCall)\n assert len(call.arguments) == 3\n port = call.arguments[0].name\n frequency = self.visit(call.arguments[1])\n phase = self.visit(call.arguments[2])\n frame = Frame(\n name=node.identifier.name,\n port=activation_record[port],\n frequency=frequency,\n phase=phase,\n )\n self.pulses[frame.name] = []\n self.phases[frame.name] = []\n self.frequencies[frame.name] = []\n activation_record[frame.name] = frame\n case ast.ClassicalDeclaration(type=ast.ArrayType()):\n if node.init_expression is None:\n shapes = [dim.value for dim in node.type.dimensions]\n activation_record[node.identifier.name] = np.zeros(shape=shapes)\n else:\n activation_record[node.identifier.name] = self.visit(\n node.init_expression\n )\n case _:\n if node.init_expression is not None:\n activation_record[node.identifier.name] = self.visit(\n node.init_expression\n )\n else:\n activation_record[node.identifier.name] = None\n\n @_maybe_annotated\n def visit_DelayInstruction(self, node: ast.DelayInstruction) -> None:\n \"\"\"\n DelayInstruction node visitor:\n Appends delay of 0s to relevant frame\n\n Args:\n node (ast.DelayInstruction): openQASM DelayInstruction AST node\n \"\"\"\n for q in node.qubits:\n if q.name in self.pulses.keys():\n self.pulses[q.name].append(np.zeros(int(self.visit(node.duration))))\n self.phases[q.name].append(\n np.full(\n int(self.visit(node.duration)),\n self.call_stack.down_stack(q.name)[q.name].phase,\n )\n )\n self.frequencies[q.name].append(\n np.full(\n int(self.visit(node.duration)),\n self.call_stack.down_stack(q.name)[q.name].frequency,\n )\n )\n\n def visit_play(self, node: ast.FunctionCall) -> None:\n \"\"\"\n FunctionCall node visitor. Handles 'play' and 'capture' function calls.\n For 'play', 'capture_v1', and 'capture_v2' function calls, the function\n call is visited and the resulting waveform is appended to the relevant\n frame's pulse, phase, and frequency arrays. For 'capture_v3' and\n 'capture_v1' function calls, the function call is visited and the resulting\n time value is returned and turned into an array of 1s of that length, and\n appeneded to the relevant frame's pulse, phase, and frequency arrays.\n\n Args:\n node (ast.FunctionCall): 'play' FunctionCall node to visit\n\n Raises:\n Error:\n ErrorCode.UNHANDLED\n If the node does not match the expected format/structure\n \"\"\"\n match node:\n case ast.FunctionCall(\n name=ast.Identifier(\"play\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v1\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v2\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ):\n wfm_array = self.visit(wfm_node)\n self.phases[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].phase,\n )\n )\n self.pulses[frame_name].append(wfm_array)\n self.frequencies[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].frequency,\n )\n )\n case ast.FunctionCall(\n name=ast.Identifier(\"capture_v3\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ) | ast.FunctionCall(\n name=ast.Identifier(\"capture_v1_spectrum\"),\n arguments=[ast.Identifier(frame_name), wfm_node],\n ):\n val = self.visit(wfm_node)\n self.phases[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].phase,\n )\n )\n self.pulses[frame_name].append(np.ones(int(val)))\n self.frequencies[frame_name].append(\n np.full(\n len(wfm_array),\n self.call_stack.down_stack(frame_name)[frame_name].frequency,\n )\n )\n\n case _:\n raise Error(\n ErrorCode.UNHANDLED,\n f\"Unhandled waveform generation: {node}\",\n )" }, { "identifier": "waveform_functions", "path": "shipyard/printers/zi/waveform_functions.py", "snippet": "def zeros(samples: int) -> np.ndarray:\ndef placeholder(samples: int) -> np.ndarray:\ndef ones(samples: int) -> np.ndarray:\ndef sine(\n samples: int,\n amplitue: float,\n phase_offset: float,\n n_periods: int,\n) -> np.ndarray:\ndef cosine(\n samples: int,\n amplitue: float,\n phase_offset: float,\n n_periods: int,\n) -> np.ndarray:\ndef sinc(samples: int, amplitude: float, position: int, beta: float) -> np.ndarray:\ndef ramp(samples: int, start_level: float, end_level: float) -> np.ndarray:\ndef sawtooth(\n samples: int, amplitude: float, phase_offset: float, n_periods: int\n) -> np.ndarray:\ndef triangle(\n samples: int, amplitude: float, phase_offset: float, n_periods: int\n) -> np.ndarray:\ndef gauss(samples: int, amplitude: float, position: int, width: float) -> np.ndarray:\ndef drag(samples: int, amplitude: float, position: int, width: float) -> np.ndarray:\ndef blackman(samples: int, amplitude: float, alpha: float) -> np.ndarray:\ndef hamming(samples: int, amplitude: float) -> np.ndarray:\ndef hann(samples: int, amplitude: float) -> np.ndarray:\ndef rect(samples: int, amplitude: float) -> np.ndarray:\ndef chirp(\n samples: int,\n amplitude: float,\n start_freq: float,\n stop_freq: float,\n phase: float = 0.0,\n) -> np.ndarray:\ndef rrc(\n samples: int, amplitude: float, position: int, beta: float, width: float\n) -> np.ndarray:\n def _special_value():" }, { "identifier": "Frame", "path": "shipyard/setup/internal.py", "snippet": "class Frame(BaseModel):\n \"\"\"\n Representation of the openQASM openpulse frame concept as a pydantic model.\n https://openqasm.com/language/openpulse.html#frames\n\n Args:\n name (str):\n name of the frame.\n port (Port):\n the Port object the frame is associated with.\n frequency (float):\n the frequency the frame evolves at. Defaults to 0.\n phase (float):\n the phase of the frame.\n time (Duration):\n the time of the frame.\n \"\"\"\n\n name: str\n port: Port\n frequency: float = 0.0\n phase: float = 0.0\n time: Duration = Duration(time=0)\n\n def set_phase(self, phase: float):\n \"\"\"Sets the phase of the frame\n\n Args:\n phase (float): the value the phase will be set to\n \"\"\"\n self.phase = phase\n\n def shift_phase(self, phase: float):\n \"\"\"Shifts the phase of the frame\n\n Args:\n phase (float): the value the phase will be shifted by.\n \"\"\"\n self.phase += phase\n\n def get_phase(self) -> float:\n \"\"\"Gets the phase of the frame\n\n Returns:\n float: current value of the phase of the frame.\n \"\"\"\n return self.phase\n\n def set_frequency(self, frequency: float):\n \"\"\"Sets the frequency of the frame\n\n Args:\n frequency (float): the value the frequency will be set to.\n \"\"\"\n self.frequency = frequency\n\n def shift_frequency(self, frequency: float):\n \"\"\"Shifts the frequency of the frame\n\n Args:\n frequency (float): the value the frequency will be shifted by.\n \"\"\"\n self.frequency += frequency\n\n def get_frequency(self) -> float:\n \"\"\"Gets the frequency of the frame\n\n Returns:\n float: current value of the frequency of the frame.\n \"\"\"\n return self.frequency\n\n def advance(self, duration: Duration):\n \"\"\"Advances the time of the frame by some duration\n\n Args:\n duration (Duration): the duration to advance the time of the frame by.\n \"\"\"\n self.time += duration\n\n def advance_to(self, duration: Duration):\n \"\"\"Advances the time of the frame to some other time\n\n Args:\n duration (Duration): the duratioin to advance the time fo the frame to.\n\n Raises:\n ValueError:\n If the time the frame should be advanced to is less than the\n current time of the frame.\n \"\"\"\n duration.set_unit(self.time.unit)\n if self.time > duration:\n raise ValueError(f\"Cant advance current time {self.time} to {duration}\")\n self.time.time = int(duration.time * duration.unit.value / self.time.unit.value)" }, { "identifier": "Instrument", "path": "shipyard/setup/internal.py", "snippet": "class Instrument(BaseModel):\n \"\"\"\n Minimal information required to identify an Instrument\n\n Args:\n name (str):\n name of instrument instance, used to easily identify one intrument from\n another.\n type (InstrumentType):\n Literal representing the type/model of the instrument.\n serial (str):\n Serial number of the instrument in string format.\n \"\"\"\n\n name: str\n type: InstrumentType\n serial: str" }, { "identifier": "Port", "path": "shipyard/setup/internal.py", "snippet": "class Port(BaseModel):\n \"\"\"\n Representation of the openQASM openpulse port concept as a pydantic model.\n https://openqasm.com/language/openpulse.html#ports\n\n Args:\n name (str):\n name of the port.\n instrument (Instrument):\n What instrument the port is associated with.\n core (Core):\n Settings for the AWG Core the port is associated with.\n \"\"\"\n\n class Core(BaseModel):\n \"\"\"\n Settings for a AWG core\n\n Args:\n type (CoreType):\n the Type of AWG Core this 'Core' object is\n index (int):\n the index of the AWG Core on the Instrument this 'Core' object belongs.\n channels (list[int]):\n the channels of the AWG Core this 'Core' object belongs\n \"\"\"\n\n type: CoreType\n index: int\n channels: list[int]\n\n # pylint: disable=R0903\n # too-few-public-methods\n class Config:\n \"\"\"Pydantic model config for Core\"\"\"\n\n frozen = True\n\n # pylint: enable=R0903\n\n def obj(self) -> AWGCore:\n \"\"\"\n Returns an AWGCore subclass of type matching the type of the pydantic core\n model.\n\n Returns:\n AWGCore: AWGCore subclass of type matching the model instance.\n \"\"\"\n return CORE_TYPE_TO_CLASS[self.type]\n\n @validator(\"channels\")\n def not_more_channels_than_core_type_allows(cls, channels: list[int], values):\n \"\"\"\n Validates that the number of channels for the Core object does\n not exceed the number of channels allowed by the CoreType\n \"\"\"\n assert channels\n assert \"type\" in values\n assert len(channels) <= CORE_TYPE_TO_CLASS[values[\"type\"]].n_channels\n return channels\n\n name: str\n instrument: Instrument\n core: Core\n\n # pylint: disable=R0903\n # too-few-public-methods\n class Config:\n \"\"\"Pydantic model config for Port\"\"\"\n\n frozen = True\n\n # pylint: enable=R0903" }, { "identifier": "SetupInternal", "path": "shipyard/setup/internal.py", "snippet": "class SetupInternal(BaseModel):\n\n \"\"\"\n A Pydantic model containing the information required to compile an openQASM program\n to instrument level instructions.\n\n It is recommended to instanciate this object from a configuration file\n (json (future yml?))\n \"\"\"\n\n # todo validation\n\n # todo move to own module\n instruments: dict[str, Instrument]\n ports: dict[str, Port]\n frames: dict[str, Frame]\n\n @classmethod\n def from_dict(cls, setup: dict[str, dict[str, dict]]) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a dictionary\n\n Args:\n setup (dict[str, dict[str, dict]]): dictionary to create a Setup object from\n\n Returns:\n Setup: created from dictionary\n \"\"\"\n instruments = {\n k: Instrument(name=k, **v) for k, v in setup[\"Instruments\"].items()\n }\n ports = {}\n for k, val in setup[\"Ports\"].items():\n val[\"instrument\"] = instruments[val[\"instrument\"]]\n val[\"core\"] = Port.Core(**val[\"core\"])\n ports[k] = Port(name=k, **val)\n frames = {}\n for k, val in setup[\"Frames\"].items():\n val[\"port\"] = ports[val[\"port\"]]\n frames[k] = Frame(name=k, **val)\n return cls(instruments=instruments, ports=ports, frames=frames)\n\n def to_dict(self) -> dict[str, dict[str, dict]]:\n \"\"\"Creates a dictionary from a Setup object\n\n Args:\n filename (Path | str, optional):\n path to save dictionary to. Defaults to None.\n\n Returns:\n dict[str, dict[str, dict]]: dictionary created from Setup object\n \"\"\"\n setup = {\n \"Instruments\": {\n k: {\n \"type\": v.type,\n \"serial\": v.serial,\n }\n for k, v in self.instruments.items()\n },\n \"Ports\": {\n k: {\n \"instrument\": v.instrument.name,\n \"core\": {\n \"type\": v.core.type.value,\n \"index\": v.core.index,\n \"channels\": v.core.channels,\n },\n }\n for k, v in self.ports.items()\n },\n \"Frames\": {\n k: {\n \"port\": v.port.name,\n \"frequency\": v.frequency,\n \"phase\": v.phase,\n }\n for k, v in self.frames.items()\n },\n }\n return setup\n\n @classmethod\n def from_json(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a json file\n\n Args:\n filename (str | Path): path to json file\n\n Returns:\n Setup: created from json file\n \"\"\"\n with open(filename, encoding=\"utf-8\") as file:\n data = json.load(file)\n return cls.from_dict(data)\n\n def to_json(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a json file\n\n Args:\n filename (str | Path): path to json file to create\n\n Returns:\n Path: path to json file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n json.dump(data, file, indent=4)\n return Path(filename)\n\n @classmethod\n def from_yml(cls, filename: str | Path) -> \"SetupInternal\":\n \"\"\"Creates a Setup object from a yml file\n\n Args:\n filename (str | Path): path to yml file\n\n Returns:\n Setup: created from yml file\n \"\"\"\n with open(filename, \"r\", encoding=\"utf-8\") as file:\n data = yaml.safe_load(file)\n return cls.from_dict(data)\n\n def to_yml(self, filename: str | Path) -> Path:\n \"\"\"Writes a Setup object to a yml file\n\n Args:\n filename (str | Path): path to yml file to create\n\n Returns:\n Path: path to yml file\n \"\"\"\n data = self.to_dict()\n with open(filename, \"w\", encoding=\"utf-8\") as file:\n yaml.dump(data, file)\n return Path(filename)\n\n def cores(self) -> set[tuple[str, int, str]]:\n \"\"\"Gets all the AWG Cores used in the setup\n\n Returns:\n set[tuple[str, int, str]]:\n a Set of tuples, each tuple has a string representing the instruement\n name, a integer representing the index of the awg core of the\n instrument and a string representing the type of the awg core.\n \"\"\"\n return set(\n (port.instrument.name, port.core.index, port.core.type.value)\n for port in self.ports.values()\n )" } ]
import codecs import json import numpy as np import pytest from pathlib import Path from shipyard.awg_core.awg_core import CoreType from shipyard.call_stack import ActivationRecord, ARType from shipyard.compiler import Compiler from shipyard.duration import Duration, TimeUnits from shipyard.passes.duration_transformer import DurationTransformer from shipyard.passes.resolve_io_declaration import ResolveIODeclaration from shipyard.passes.semantic_analysis.semantic_analyzer import SemanticAnalyzer from shipyard.printers.visualizer.visualize_pulse_sequence import PulseVisualizer from shipyard.printers.zi import waveform_functions from shipyard.setup.internal import Frame, Instrument, Port, SetupInternal
17,463
final_call_stack = { "nested_subroutines": {"dummy": 16}, "complex_arrays": { "dummy": 4, "two_d": [[1, 2], [3, 4], [5, 6]], "my_arr": [complex(1, 0), complex(0, 1), complex(0.8, 0.6)], "second": [1, 2, 3, 4], }, } def files() -> list[str]: base_path = Path(__file__).parent.parent.parent / "qasm/visualize_pulse" plen = len(base_path.parts) FILES = list(base_path.glob("**/*.qasm")) return [str(Path(*path.parts[plen:])) for path in FILES] QASM_FILES = files() def common_files() -> list[str]: files = [] cut = -5 for q_file in QASM_FILES: files.append(q_file[:cut]) return files COMMON_FILES = common_files() @pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent.parent / "setups/interpreter.json" return SetupInternal.from_json(json_path) def test_visit_ClassicalDeclaration(): setup_path = Path(__file__).parent.parent.parent / "setups/complex.json" qasm_path = Path(__file__).parent.parent.parent / "qasm/interpreter/phase_freq.qasm" compiler = Compiler(qasm_path, setup_path) qasm_ast = compiler.load_program(qasm_path) ResolveIODeclaration().visit(qasm_ast) SemanticAnalyzer().visit(qasm_ast) DurationTransformer().visit(qasm_ast)
final_call_stack = { "nested_subroutines": {"dummy": 16}, "complex_arrays": { "dummy": 4, "two_d": [[1, 2], [3, 4], [5, 6]], "my_arr": [complex(1, 0), complex(0, 1), complex(0.8, 0.6)], "second": [1, 2, 3, 4], }, } def files() -> list[str]: base_path = Path(__file__).parent.parent.parent / "qasm/visualize_pulse" plen = len(base_path.parts) FILES = list(base_path.glob("**/*.qasm")) return [str(Path(*path.parts[plen:])) for path in FILES] QASM_FILES = files() def common_files() -> list[str]: files = [] cut = -5 for q_file in QASM_FILES: files.append(q_file[:cut]) return files COMMON_FILES = common_files() @pytest.fixture(name="basic_setup") def fixture_basic_setup() -> SetupInternal: json_path = Path(__file__).parent.parent.parent / "setups/interpreter.json" return SetupInternal.from_json(json_path) def test_visit_ClassicalDeclaration(): setup_path = Path(__file__).parent.parent.parent / "setups/complex.json" qasm_path = Path(__file__).parent.parent.parent / "qasm/interpreter/phase_freq.qasm" compiler = Compiler(qasm_path, setup_path) qasm_ast = compiler.load_program(qasm_path) ResolveIODeclaration().visit(qasm_ast) SemanticAnalyzer().visit(qasm_ast) DurationTransformer().visit(qasm_ast)
pv = PulseVisualizer(
9
2023-11-16 17:37:29+00:00
24k
quantuminterface/qiclib
src/qiclib/code/qi_jobs.py
[ { "identifier": "TaskRunner", "path": "src/qiclib/hardware/taskrunner.py", "snippet": "class TaskRunner(PlatformComponent):\n \"\"\"Driver to control the Taskrunner on the Hardware Platform.\"\"\"\n\n def __init__(\n self,\n name: str,\n connection,\n controller,\n qkit_instrument=True,\n ):\n super().__init__(name, connection, controller, qkit_instrument)\n self._stub = grpc_stub.TaskRunnerServiceStub(self._conn.channel)\n\n @property\n @platform_attribute\n @ServiceHubCall(\n errormsg=\"Could not fetch the current firmware hash of the Taskrunner\"\n )\n def firmware_hash(self):\n \"\"\"The hash of the current firmware running on the realtime core.\"\"\"\n return self._stub.GetStatus(proto.Empty()).firmware_hash\n\n @property\n @platform_attribute\n @ServiceHubCall(\n errormsg=\"Could not determine the build date of the Taskrunner firmware\"\n )\n def firmware_build_date(self):\n \"\"\"Returns the build date of the Taskrunner firmware.\"\"\"\n return self._stub.GetStatus(proto.Empty()).build_date\n\n @property\n @platform_attribute\n @ServiceHubCall(\n errormsg=\"Could not determine the build commit of the Taskrunner firmware\"\n )\n def firmware_build_commit(self):\n \"\"\"Returns the build commit hash of the Taskrunner firmware.\"\"\"\n return self._stub.GetStatus(proto.Empty()).build_commit\n\n @property\n @platform_attribute\n @ServiceHubCall(errormsg=\"Could not determine the status of the taskrunner\")\n def loaded_task(self):\n \"\"\"The name of the currently loaded task.\"\"\"\n return self._stub.GetStatus(proto.Empty()).task_name\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine the progress of the task\")\n def task_progress(self):\n \"\"\"Returns the progress of the task\"\"\"\n return self._stub.GetStatus(proto.Empty()).task_progress\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine number of available databoxes\")\n def databoxes_available(self):\n \"\"\"Returns the number of available databoxes.\"\"\"\n return self._stub.GetStatus(proto.Empty()).databoxes_available\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine state of the taskrunner\")\n def busy(self):\n \"\"\"Returns if the taskrunner is currently busy.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).busy\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine if task has finished\")\n def task_done(self):\n \"\"\"Returns if the task has finished.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).done\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine if task has error messages\")\n def task_errormsg_available(self):\n \"\"\"Returns if task has error messages.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).error_msg_available\n\n @property\n @ServiceHubCall(errormsg=\"Could not determine if error message queue is full\")\n def task_errormsg_queue_full(self):\n \"\"\"Returns if if error message queue is full.\"\"\"\n return self._stub.GetTaskState(proto.Empty()).error_msg_queue_full\n\n @ServiceHubCall(errormsg=\"Failed to start task\")\n def start_task(self, loop=False, overwrite=False):\n \"\"\"Starts the execution of a previously loaded task.\n\n :param loop: bool, optional\n if the task should be executed in a loop, by default False\n :param overwrite: bool, optional\n if a current running task should be stopped, by default False\n \"\"\"\n self._stub.StartTask(\n proto.StartTaskRequest(looping=loop, stop_running=overwrite)\n )\n\n @ServiceHubCall(errormsg=\"Failed to stop task\")\n def stop_task(self):\n \"\"\"Stops the execution of running task.\"\"\"\n self._stub.StopTask(proto.StopTaskRequest())\n\n @ServiceHubCall(errormsg=\"Failed to reset task\")\n def reset_task(self):\n \"\"\"Resets (unloads) a loaded task.\"\"\"\n self._stub.StopTask(proto.StopTaskRequest(reset=True))\n\n @ServiceHubCall(errormsg=\"Failed to load task binary\")\n def load_task_binary(self, filename, taskname):\n \"\"\"Loads a task binary into the taskrunner.\n The *taskname* needs to match the name of the task to load\n in order to verify that it is indeed the desired task file.\n\n :param filename: str\n name of the file with the task\n :param taskname: str\n name of the task\n\n :raises ValueError:\n if the path of the file is not found\n \"\"\"\n if not os.path.exists(filename):\n raise ValueError(\"File not found!\")\n\n with open(filename, \"rb\") as f:\n binary = f.read()\n self._stub.ProgramTask(proto.ProgramTaskRequest(name=taskname, task=binary))\n\n @ServiceHubCall(errormsg=\"Failed to compile and load task binary\")\n def load_task_source(self, filename, taskname):\n \"\"\"Loads a task source file `filename` into the taskrunner.\n `taskname` can be freely chosen to later identify the task on the platform.\n\n :param filename:\n name of the file with the task\n :param taskname:\n name of the task\n \"\"\"\n if os.path.isfile(filename):\n # File name can be full path to a file\n filepath = filename\n else:\n # or just the file name -> pick from task repository\n filepath = get_task_source(filename)\n\n with open(filepath, \"rb\") as f:\n binary = f.read()\n\n self._stub.CompileTask(proto.ProgramTaskRequest(name=taskname, task=binary))\n\n @ServiceHubCall(errormsg=\"Failed to set parameters\")\n def set_param_list(self, param_list):\n \"\"\"Sets the parameters for the task. param_list has to be an array of 32bit values.\"\"\"\n self._stub.SetParameter(proto.ParameterRequest(parameters=param_list))\n\n class DataMode(Enum):\n INT8 = 1\n UINT8 = 2\n INT16 = 3\n UINT16 = 4\n INT32 = 5\n UINT32 = 6\n INT64 = 7\n UINT64 = 8\n\n @ServiceHubCall(errormsg=\"Failed to fetch databoxes from taskrunner\")\n def get_databoxes_with_mode(\n self, mode=DataMode.INT32, require_done=True\n ) -> List[List[Any]]:\n \"\"\"Retrieves data from a previously started task on the R5.\n Depending on the parameter mode, the data is interpreted differently.\n\n :param mode:\n DataMode of the databoxes, by default DataMode.INT32\n :param require_done:\n if the task has to be finished before fetching data, by default True\n\n :return:\n A list of databoxes, being list of values themselves, either int32 or uint32.\n\n :raises Exception:\n If require_done is True and the Task is not finished\n :raises ValueError:\n If the data mode is not known\n :raises Exception:\n If require_done and not data is available\n \"\"\"\n self.check_task_errors()\n\n if require_done and not self.task_done:\n raise RuntimeError(\"Task should be finished prior to fetching data.\")\n\n method_call = {\n TaskRunner.DataMode.INT8: self._stub.GetDataboxesINT8,\n TaskRunner.DataMode.UINT8: self._stub.GetDataboxesUINT8,\n TaskRunner.DataMode.INT16: self._stub.GetDataboxesINT16,\n TaskRunner.DataMode.UINT16: self._stub.GetDataboxesUINT16,\n TaskRunner.DataMode.INT32: self._stub.GetDataboxesINT32,\n TaskRunner.DataMode.UINT32: self._stub.GetDataboxesUINT32,\n TaskRunner.DataMode.INT64: self._stub.GetDataboxesINT64,\n TaskRunner.DataMode.UINT64: self._stub.GetDataboxesUINT64,\n }.get(mode, None)\n if method_call is None:\n raise ValueError(\"Data mode is unknown! Only use DataMode Enum values.\")\n\n databoxes: List[List[Any]] = []\n last_index = -1\n for databox_reply in method_call(proto.Empty()):\n # print databox_reply.index, databox_reply.data[:]\n if last_index != databox_reply.index:\n # Create new (empty) databox in list\n databoxes.append([])\n last_index = databox_reply.index\n # Fill the latest databox with content\n databoxes[-1].extend(databox_reply.data[:])\n\n if require_done and not databoxes:\n raise RuntimeError(\n \"No data available to fetch. Are you sure the task completed successfully?\"\n )\n\n return databoxes\n\n def get_databoxes(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 32bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT32, require_done)\n\n def get_databoxes_INT8(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 8bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT8, require_done)\n\n def get_databoxes_UINT8(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 8bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT8, require_done)\n\n def get_databoxes_INT16(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 16bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT16, require_done)\n\n def get_databoxes_UINT16(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 16bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT16, require_done)\n\n def get_databoxes_INT32(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 32bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT32, require_done)\n\n def get_databoxes_UINT32(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 32bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT32, require_done)\n\n def get_databoxes_INT64(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 64bit signed integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.INT64, require_done)\n\n def get_databoxes_UINT64(self, require_done=True):\n \"\"\"Retrieves data from a previously started task on the R5.\n\n Data is interpreted as 64bit unsigned integer values which are returned as array.\n \"\"\"\n return self.get_databoxes_with_mode(TaskRunner.DataMode.UINT64, require_done)\n\n @ServiceHubCall\n def get_error_messages(self):\n \"\"\"Retrieves all error messages from the task\"\"\"\n reply = self._stub.GetTaskErrorMessages(proto.Empty())\n return reply.message[:]\n\n def check_task_errors(self):\n errors = self.get_error_messages()\n if errors:\n raise RuntimeError(\n \"The following error messages were retrieved \"\n + \"from the Taskrunner:\\n{}\".format(\"\\n\".join(errors))\n )\n\n # DEPRECATED STUFF\n @property\n def data_size(self):\n \"\"\"TODO Replace by progress in all experiments.\"\"\"\n raise DeprecationWarning(\n \"data_size is not supported anymore! Use task_progress instead!\"\n )" }, { "identifier": "DataProvider", "path": "src/qiclib/experiment/qicode/data_provider.py", "snippet": "class DataProvider(ABC):\n \"\"\"\n Provides uniform access to experiment result data.\n\n Result data is received either from the taskrunner plugin or the unit cell plugin and comes in different formats.\n This class encapsulates the format differences, to allow for further processing of the data to be handled\n independently.\n \"\"\"\n\n @classmethod\n def create(cls, result, use_taskrunner: bool):\n if use_taskrunner:\n return _TaskrunnerDataProvider(result)\n return _InternalPluginDataProvider(result)\n\n def __init__(self, result):\n self._result = result\n\n @abstractmethod\n def get_raw_i(self, cell_index: int):\n pass\n\n @abstractmethod\n def get_raw_q(self, cell_index: int):\n pass\n\n def get_default_i(self, cell_index: int, index: int):\n return self.get_raw_i(cell_index)[index]\n\n def get_default_q(self, cell_index: int, index: int):\n return self.get_raw_q(cell_index)[index]\n\n def get_amp_pha_i(self, cell_index: int, index: int):\n return self.get_default_i(cell_index, index)\n\n def get_amp_pha_q(self, cell_index: int, index: int):\n return self.get_default_q(cell_index, index)\n\n @abstractmethod\n def get_iq_cloud_i(self, cell_index: int, index: int, recording_count: int):\n pass\n\n @abstractmethod\n def get_iq_cloud_q(self, cell_index: int, index: int, recording_count: int):\n pass\n\n def get_states(self, cell_index: int):\n return self._result[cell_index]\n\n def get_counts(self):\n return self.get_states(0)" }, { "identifier": "DataHandler", "path": "src/qiclib/experiment/qicode/data_handler.py", "snippet": "class DataHandler(ABC):\n \"\"\"\n Each subclass of this one handles a different way to process result data, depending on the type of experiment run.\n This usually includes splitting it up for the different boxes.\n It takes a list of cells and the recording data provider and processes it however it sees fit.\n In order to find out the box in which to store a recording it can access the `_result_recording_order` of a cell\n which provides the correct QiResult for the n-th executed recording.\n For examples, see the subclasses.\n\n :param data_provider: to access the experiments results\n :param cell_list: to store processed results there\n \"\"\"\n\n @staticmethod\n def _data_handler_factories() -> (\n Dict[str, Callable[[DataProvider, List[\"QiCell\"], int], \"DataHandler\"]]\n ):\n \"\"\"\n This is a method instead of a static variable, because forward references to the subclasses are not possible in\n static variable assignments.\n \"\"\"\n return {\n \"average\": lambda data_provider, cell_list, averages: _DefaultDataHandler(\n data_provider, cell_list\n ),\n \"amp_pha\": lambda data_provider, cell_list, averages: _AmplitudePhaseDataHandler(\n data_provider, cell_list\n ),\n \"iqcloud\": lambda data_provider, cell_list, averages: _IQCloudDataHandler(\n data_provider, cell_list\n ),\n \"raw\": lambda data_provider, cell_list, averages: _RawDataHandler(\n data_provider, cell_list\n ),\n \"states\": _StateDataHandler,\n \"counts\": lambda data_provider, cell_list, averages: _CountDataHandler(\n data_provider, cell_list\n ),\n \"quantum_jumps\": lambda data_provider, cell_list, averages: _QuantumJumpsDataHandler(\n data_provider, cell_list\n ),\n \"custom\": lambda data_provider, cell_list, averages: _NotImplementedDataHandler(\n data_provider, cell_list\n ),\n }\n\n @staticmethod\n def names():\n return DataHandler._data_handler_factories().keys()\n\n @classmethod\n def get_factory_by_name(\n cls, name: str\n ) -> Optional[Callable[[DataProvider, List[\"QiCell\"], int], \"DataHandler\"]]:\n factories = DataHandler._data_handler_factories()\n if name not in factories:\n return None\n return factories[name]\n\n @classmethod\n def get_custom_wrapper_factory(\n cls, custom_data_handler: Callable[[List[\"QiCell\"], DataProvider], None]\n ) -> Callable[[DataProvider, List[\"QiCell\"], int], \"DataHandler\"]:\n return lambda data_provider, cell_list, averages: _CustomDataHandlerWrapper(\n data_provider, cell_list, custom_data_handler\n )\n\n def __init__(self, data_provider: DataProvider, cell_list: List[\"QiCell\"]):\n self.data_provider = data_provider\n self.cell_list = cell_list\n\n @abstractmethod\n def process_results(self):\n pass" }, { "identifier": "SequencerInstruction", "path": "src/qiclib/code/qi_seq_instructions.py", "snippet": "class SequencerInstruction:\n OPCODE_WIDTH = 7\n FUNCT3_WIDTH = 3\n FUNCT7_WIDTH = 7\n REGISTER_WIDTH = 5\n LOWER_IMMEDIATE_WIDTH = 12\n UPPER_IMMEDIATE_WIDTH = 20\n\n LOWER_IMM_MAX = (\n 2 ** (LOWER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Lower immediate 12 Bits - 1Bit Signed\n LOWER_IMM_MIN = -(2 ** (LOWER_IMMEDIATE_WIDTH - 1))\n\n UPPER_IMM_MAX = (\n 2 ** (UPPER_IMMEDIATE_WIDTH - 1)\n ) - 1 # Upper immediate 20 Bits - 1Bit Signed\n UPPER_IMM_MIN = -(2 ** (UPPER_IMMEDIATE_WIDTH - 1))\n UPPER_IMM_MAX_UNSIGNED = 2**UPPER_IMMEDIATE_WIDTH\n\n imm_type = Union[int] # might include float in the future\n\n def __init__(self, OpCode: SeqOpCode) -> None:\n self.op = OpCode\n\n @staticmethod\n def is_value_in_lower_immediate(val: imm_type) -> bool:\n return (\n SequencerInstruction.LOWER_IMM_MIN\n <= val\n <= SequencerInstruction.LOWER_IMM_MAX\n )\n\n @staticmethod\n def is_value_in_unsigned_upper_immediate(val: imm_type) -> bool:\n return SequencerInstruction.UPPER_IMM_MAX_UNSIGNED >= abs(val)\n\n @abstractmethod\n def get_riscv_instruction(self) -> int:\n pass" }, { "identifier": "_QiVariableBase", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiVariableBase(QiExpression):\n \"\"\"Base class for QiVariables.\n Variables can be relevant to only a subset of QiCells, this subset is saved in _relevant_cells.\n Variables are simple expressions and, therefore, are typed.\n Variables can be compared by self.id.\"\"\"\n\n id_iter = itertools.count()\n str_id_iter = itertools.count()\n\n def __init__(\n self,\n type: QiType,\n value: Optional[Union[int, float]] = None,\n name=None,\n ):\n from .qi_jobs import QiCell\n\n assert isinstance(type, QiType)\n assert value is None or isinstance(value, (int, float))\n\n super().__init__()\n\n if type != QiType.UNKNOWN:\n self._type_info.set_type(type, _TypeDefiningUse.VARIABLE_DEFINITION)\n\n self.value = value\n\n self._value = value\n self._relevant_cells: Set[QiCell] = set()\n self.id = next(_QiVariableBase.id_iter)\n self.str_id = next(_QiVariableBase.str_id_iter)\n\n self._contained_variables.add(self)\n\n self.name = name\n\n @property\n def contained_variables(self):\n return self._contained_variables\n\n @staticmethod\n def reset_str_id():\n _QiVariableBase.str_id_iter = itertools.count()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_variable(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, _QiVariableBase) and self.id == other.id\n\n def __hash__(self) -> int:\n return self.id\n\n def __str__(self) -> str:\n return f\"QiVariable({self.name or ''})\"" }, { "identifier": "_QiCalcBase", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiCalcBase(QiExpression):\n \"\"\"Represents binary and unary operations.\"\"\"\n\n def __init__(self, val1, op, val2) -> None:\n super().__init__()\n\n self.val1 = val1\n self.op: QiOp = op\n self.val2 = val2\n\n from .qi_types import add_qi_calc_constraints\n\n add_qi_calc_constraints(op, val1, val2, self)\n\n @property\n def contained_variables(self):\n \"\"\"Function traverses the operation tree to determine which QiVariables are used for the calculations.\n Found QiVariables are added to _contained_variables\"\"\"\n if len(self._contained_variables) == 0:\n self._variables_to_container()\n\n return self._contained_variables\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_calc(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return (\n isinstance(other, _QiCalcBase)\n and self.op == other.op\n and self.val1._equal_syntax(other.val1)\n and self.val2._equal_syntax(other.val2)\n )\n\n def __str__(self):\n return (\n \"(\"\n + self.val1.__str__()\n + \" \"\n + self.op.value\n + \" \"\n + self.val2.__str__()\n + \")\"\n )" }, { "identifier": "_QiConstValue", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class _QiConstValue(QiExpression):\n \"\"\"Represents QiExpression which are a constant (compiletime known) values.\n Integers can be used as either NORMAL, TIME or FREQUENCY values. It is up to the type inference to figure it out.\n If the value can be represented as a float value it has an additional attribute float_value which represents the value before\n it has been converted to the integer representation used by the sequencer.\n \"\"\"\n\n def __init__(self, value: Union[int, float]):\n super().__init__()\n\n self._given_value = value # Value given to the constructor. Is interpreted differently depending on the type.\n\n # Constant STATE values can only be 0 or 1, therefore we forbid QiType.STATE if we have a different value.\n if isinstance(self._given_value, float) or self._given_value not in [1, 0]:\n self._type_info.add_illegal_type(\n QiType.STATE, _IllegalTypeReason.INVALID_STATE_CONSTANT\n )\n\n if isinstance(self._given_value, float):\n self._type_info.add_illegal_type(\n QiType.NORMAL, _IllegalTypeReason.INVALID_NORMAL_CONSTANT\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME or self.type, QiType.FREQUENCY)\n return self._given_value\n\n @property\n def value(self):\n \"\"\"\n Integer representation of the constant value.\n Since the sequencer doesn't have a floating point unit, any calculations has to be using integers.\n In practice, this means we only perform fixpoint arithmetic and need to convert any float like value\n to such an fixpoint value.\n The correct conversion depends on the type.\n \"\"\"\n if self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n return self._given_value\n elif self.type == QiType.TIME:\n return int(util.conv_time_to_cycles(self._given_value, \"ceil\"))\n else:\n assert self.type == QiType.FREQUENCY\n return util.conv_freq_to_nco_phase_inc(self._given_value)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_constant(self)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n assert QiType.UNKNOWN not in (self.type, other.type)\n return isinstance(other, _QiConstValue) and self.value == other.value\n\n def __str__(self):\n if self.type in (QiType.TIME, QiType.FREQUENCY):\n value = self.float_value\n elif self.type in (QiType.NORMAL, QiType.STATE, QiType.UNKNOWN):\n value = self.value\n else:\n raise RuntimeError(\n \"This program point should be unreacheable. Please file a bug report.\"\n )\n return f\"{value:g}\"" }, { "identifier": "QiCellProperty", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiCellProperty(QiExpression):\n \"\"\"When describing experiments, properties of cells might not yet be defined. Instead a QiCellProperty object will be generated.\n This object can be used as length definition in cQiWait commands and QiPulse\"\"\"\n\n def __init__(self, cell, name):\n super().__init__()\n from .qi_jobs import QiCell\n\n self.name: str = name\n self.cell: QiCell = cell\n self.operations = lambda val: val\n self.opcode = \"x\"\n\n @property\n def opcode_p(self):\n \"\"\"Old opcode in parantheses for building new opcode\"\"\"\n return self.opcode if self.opcode == \"x\" else f\"({self.opcode})\"\n\n def resolve_equal(self, o: object) -> bool:\n if isinstance(o, QiCellProperty):\n return self.name == o.name and self.opcode == o.opcode\n elif o is None:\n return False\n try:\n return o == self()\n except KeyError:\n return False # At time of comparison, unresolved property is not equal to o\n\n def __call__(self):\n value = self.cell._properties.get(self.name)\n\n if isinstance(value, QiCellProperty) or value is None:\n raise KeyError(\"Property could not be resolved\")\n return self.operations(value)\n\n @property\n def value(self):\n if self.type == QiType.TIME:\n return util.conv_time_to_cycles(self())\n elif self.type == QiType.FREQUENCY:\n return util.conv_freq_to_nco_phase_inc(self())\n elif self.type == QiType.NORMAL:\n return self()\n elif self.type == QiType.STATE:\n return self()\n else:\n raise RuntimeError(\n \"Mising type information to resolve value to convert to a machine value.\"\n )\n\n @property\n def float_value(self):\n assert self.type in (QiType.TIME, QiType.FREQUENCY)\n return self()\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n visitor.visit_cell_property(self)\n\n @property\n def contained_variables(self):\n return QiVariableSet()\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n return isinstance(other, QiCellProperty) and self.resolve_equal(other)\n\n def move_add_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations # Necessary because of recursion otherwise\n self.operations = lambda val: old_op(val) + x.value\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_radd_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value + old_op(val)\n self.opcode = f\"{self.opcode_p} + {x}\"\n return self\n\n def move_sub_op_to_property(self, x: _QiConstValue):\n if x._given_value == 0:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) - x.value\n self.opcode = f\"{self.opcode_p} - {x}\"\n return self\n\n def move_rsub_op_to_property(self, x: _QiConstValue):\n old_op = self.operations\n self.operations = lambda val: x.value - old_op(val)\n self.opcode = f\"{x} - {self.opcode_p}\"\n return self\n\n def move_mul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) * x.value\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n def move_rmul_op_to_property(self, x: _QiConstValue):\n if x._given_value == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: x.value * old_op(val)\n self.opcode = f\"{x} * {self.opcode_p}\"\n return self\n\n # These operations are not implemented for general QiExpressions\n # and are, therefore, left as they are.\n\n def __truediv__(self, x):\n if (isinstance(x, _QiConstValue) and x._given_value == 1) or x == 1:\n return self\n old_op = self.operations\n self.operations = lambda val: old_op(val) / x\n self.opcode = f\"{self.opcode_p} / {x}\"\n return self\n\n def __rtruediv__(self, x):\n old_op = self.operations\n self.operations = lambda val: x / old_op(val)\n self.opcode = f\"{x} / {self.opcode_p}\"\n return self" }, { "identifier": "QiExpression", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiExpression:\n \"\"\"Superclass of every possible qicode expression.\"\"\"\n\n def __init__(self):\n self._contained_variables = QiVariableSet()\n self._type_info = _TypeInformation(self)\n\n @property\n def type(self):\n return self._type_info.type\n\n @staticmethod\n def _from(x):\n \"\"\"Creates an instance of QiExpression of the provided argument if possible.\"\"\"\n if isinstance(x, (float, int)):\n return _QiConstValue(x)\n elif isinstance(x, QiExpression):\n return x\n else:\n raise RuntimeError(f\"Can not create QiExpression from type {type(x)}.\")\n\n @abstractmethod\n def accept(self, visitor: QiExpressionVisitor):\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `accept`. This is a bug.\"\n )\n\n @property\n def contained_variables(self):\n \"\"\"Returns the variables used in this expression.\n QiExpression subclasses which contain variables (_QiCalcBase and _QiVariableBase) need to overwrite this.\n \"\"\"\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `contained_variables`. This is a bug.\"\n )\n\n def _variables_to_container(self):\n if isinstance(self, _QiVariableBase):\n self._contained_variables.add(self)\n elif isinstance(self, _QiCalcBase):\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n def _equal_syntax(self, other: \"QiExpression\") -> bool:\n raise NotImplementedError(\n f\"{self.__class__} has not implemented `_equal_syntax`. This is a bug.\"\n )\n\n # QiCellProperties are supposed to support some form of constant folding.\n # However, originally, instead of implementing this in an extra pass over\n # QiJob they were added to the QiCellProperty class.\n # In order to keep support for this limited form of constant folding\n # This logic was placed here.\n\n # (I'm not sure why we don't fold when both operands are QiCellProperty.\n # And I think the reason we don't fold tow _QiConstValue is that originally\n # They were just int/float and would \"fold\" implicitely when using any\n # math operator on them)\n\n # If anyone ever feels the need to improve this situation, I would\n # encourage them to implement a constant folding pass using the existing\n # dataflow infrastructure.\n # This pdf seems to give a nice short introduction into the topic:\n # http://openclassroom.stanford.edu/MainFolder/courses/Compilers/docs/slides/15-02-constant-propagation-annotated.pdf\n\n def __add__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_add_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_radd_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.PLUS, x)\n\n def __radd__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_radd_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_add_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.PLUS, self)\n\n def __sub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_sub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rsub_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MINUS, x)\n\n def __rsub__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rsub_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_sub_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MINUS, self)\n\n def __mul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_mul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_rmul_op_to_property(self)\n else:\n return _QiCalcBase(self, QiOp.MULT, x)\n\n def __rmul__(self, x):\n x = QiExpression._from(x)\n if isinstance(self, QiCellProperty) and isinstance(x, _QiConstValue):\n return self.move_rmul_op_to_property(x)\n elif isinstance(self, _QiConstValue) and isinstance(x, QiCellProperty):\n return x.move_mul_op_to_property(self)\n else:\n return _QiCalcBase(x, QiOp.MULT, self)\n\n def __lshift__(self, x):\n return _QiCalcBase(self, QiOp.LSH, QiExpression._from(x))\n\n def __rshift__(self, x):\n return _QiCalcBase(self, QiOp.RSH, QiExpression._from(x))\n\n def __and__(self, x):\n return _QiCalcBase(self, QiOp.AND, QiExpression._from(x))\n\n def __rand__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.AND, self)\n\n def __or__(self, x):\n return _QiCalcBase(self, QiOp.OR, QiExpression._from(x))\n\n def __ror__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.OR, self)\n\n def __xor__(self, x):\n return _QiCalcBase(self, QiOp.XOR, QiExpression._from(x))\n\n def __rxor__(self, x):\n return _QiCalcBase(QiExpression._from(x), QiOp.XOR, self)\n\n def __invert__(self):\n return _QiCalcBase(self, QiOp.NOT, None)\n\n def __lt__(self, x):\n return QiCondition(self, QiOpCond.LT, QiExpression._from(x))\n\n def __le__(self, x):\n return QiCondition(self, QiOpCond.LE, QiExpression._from(x))\n\n def __gt__(self, x):\n return QiCondition(self, QiOpCond.GT, QiExpression._from(x))\n\n def __ge__(self, x):\n return QiCondition(self, QiOpCond.GE, QiExpression._from(x))\n\n def __eq__(self, x):\n return QiCondition(self, QiOpCond.EQ, QiExpression._from(x))\n\n def __ne__(self, x):\n return QiCondition(self, QiOpCond.NE, QiExpression._from(x))" }, { "identifier": "QiVariableSet", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiVariableSet:\n \"\"\"Class provides Set functionality for QiVariables.\n QiVariables overwrite comparison operations to build operation trees, to still allow comparisons ids are used.\n \"\"\"\n\n def __init__(self) -> None:\n self._var_list: List[\"_QiVariableBase\"] = []\n self._var_id_list: List[int] = []\n\n def __contains__(self, x):\n return x.id in self._var_id_list\n\n def add(self, x: \"_QiVariableBase\"):\n if x.id not in self._var_id_list:\n self._var_id_list.append(x.id)\n self._var_list.append(x)\n\n def update(self, var_set):\n for var in var_set:\n self.add(var)\n\n def __iter__(self):\n self.n = 0\n return self\n\n def __next__(self):\n if self.n < len(self._var_list):\n var = self._var_list[self.n]\n self.n += 1\n return var\n else:\n raise StopIteration\n\n def __len__(self):\n return len(self._var_list)" }, { "identifier": "QiCondition", "path": "src/qiclib/code/qi_var_definitions.py", "snippet": "class QiCondition:\n \"\"\"Saves conditional comparisons.\n Can only be root node\"\"\"\n\n def __init__(\n self,\n val1: QiExpression,\n op: QiOpCond = QiOpCond.GT,\n val2: QiExpression = _QiConstValue(0),\n ) -> None:\n self._contained_variables = QiVariableSet()\n\n self.val1 = val1\n self.op = op\n self.val2 = val2\n\n from .qi_types import add_qi_condition_constraints\n\n add_qi_condition_constraints(op, val1, val2)\n\n @property\n def contained_variables(self):\n if len(self._contained_variables) == 0:\n self._contained_variables.update(self.val1.contained_variables)\n self._contained_variables.update(self.val2.contained_variables)\n\n return self._contained_variables\n\n def accept(self, visitor):\n visitor.visit_condition(self)\n\n def __str__(self) -> str:\n return f\"{self.val1} {self.op.value} {self.val2}\"" }, { "identifier": "QiPulse", "path": "src/qiclib/code/qi_pulse.py", "snippet": "class QiPulse:\n \"\"\"\n Class to describe a single pulse.\n\n :param length: length of the pulse. This can also be a QiVariable for variable pulse lengths.\n :param shape: pulse shape (i.e. rect, gauss, ...)\n :param amplitude: relative amplitude of your pulse. This can also be a QiVariable for variable pulse amplitudes. NOT IMPLEMENTED\n :param phase: phase of the pulse in deg. (i.e. 90 for pulse around y-axis of the bloch sphere)\n :param frequency: Frequency of your pulse, which is loaded to the PulseGen\n \"\"\"\n\n Type = Union[float, _QiVariableBase]\n\n def __init__(\n self,\n length: Union[float, _QiVariableBase, str],\n shape: Shape = ShapeLib.rect,\n amplitude: Union[float, _QiVariableBase] = 1.0,\n phase: float = 0.0,\n frequency: Union[float, QiExpression, None] = None,\n hold=False,\n ):\n from .qi_jobs import QiCellProperty\n\n if isinstance(length, str):\n mode = length.lower()\n if not mode in [\"cw\", \"off\"]:\n raise ValueError(\"QiPulse with str length only accepts 'cw' or 'off'.\")\n length = util.conv_cycles_to_time(1)\n if mode == \"cw\":\n hold = True\n else:\n amplitude = 0\n else:\n mode = \"normal\"\n\n self.mode = mode\n self.shape = shape\n self.amplitude = amplitude\n self.phase = phase\n self._length = length\n self.frequency = (\n QiExpression._from(frequency) if frequency is not None else None\n )\n self.hold = hold\n self.shift_phase = False\n\n if self.frequency is not None:\n self.frequency._type_info.set_type(\n QiType.FREQUENCY, _TypeDefiningUse.PULSE_FREQUENCY\n )\n\n self.var_dict = {}\n\n if isinstance(length, QiExpression):\n length._type_info.set_type(QiType.TIME, _TypeDefiningUse.PULSE_LENGTH)\n\n if isinstance(length, _QiVariableBase):\n self.var_dict[\"length\"] = length\n if shape != ShapeLib.rect:\n raise NotImplementedError(\n \"Variable pulse lengths are only supported for rectangular pulses\"\n )\n elif isinstance(length, QiCellProperty):\n pass\n elif util.conv_time_to_cycles(length) >= 2**32:\n raise RuntimeError(\n f\"Pulse length exceeds possible wait time, cycles {util.conv_time_to_cycles(length)}\"\n )\n\n if isinstance(amplitude, _QiVariableBase):\n raise NotImplementedError(\"Variable Amplitude not implemented yet\")\n # self.var_dict[\"amplitude\"] = amplitude\n\n def _are_variable_length(self, other) -> bool:\n return self.is_variable_length and other.is_variable_length\n\n def _are_same_length(self, other) -> bool:\n return (\n not isinstance(self._length, _QiVariableBase)\n and not isinstance(other._length, _QiVariableBase)\n and (self._length is other._length)\n )\n\n def _are_same_amplitude(self, other) -> bool:\n return (\n not isinstance(self.amplitude, _QiVariableBase)\n and not isinstance(other.amplitude, _QiVariableBase)\n and (self.amplitude == other.amplitude)\n )\n\n def __eq__(self, o: object) -> bool:\n equal_length: bool = isinstance(o, QiPulse) and (\n self._are_variable_length(o) or self._are_same_length(o)\n )\n equal_amplitude: bool = isinstance(o, QiPulse) and self._are_same_amplitude(o)\n\n return (\n isinstance(o, QiPulse)\n and equal_length\n and equal_amplitude\n and (self.hold == o.hold)\n and (self.shape == o.shape)\n and (self.phase == o.phase)\n and (\n self.frequency._equal_syntax(o.frequency)\n if self.frequency is not None and o.frequency is not None\n else self.frequency is o.frequency\n )\n )\n\n def __call__(self, samplerate: float, **variables: Any) -> np.ndarray:\n \"\"\"\n Returns the pulse envelope for a given frequency.\n :param samplerate: sample rate for calculating the envelope\n :param variables: the variables for the length/amplitude function, if any; legacy of qup_pulses\n\n :return: envelope of the pulse as numpy array.\n \"\"\"\n from .qi_jobs import QiCellProperty\n\n if self.is_variable_length:\n # variable pulses are hold till ended by another pulse, so no need to use correct length\n return np.array([self.amplitude] * 4)\n\n length = (\n self._length() if isinstance(self._length, QiCellProperty) else self._length\n )\n\n if (\n util.conv_time_to_cycles(length) >= 2**32\n ): # check value again, QiCellproperty might be used\n raise RuntimeError(\n f\"Pulse length exceeds possible wait time, cycles {util.conv_time_to_cycles(length)}\"\n )\n\n amplitude = self.amplitude\n timestep = 1.0 / samplerate\n\n if length < timestep / 2.0:\n if length != 0:\n logging.warning(\n \"A pulse is shorter than %f ns and thus is omitted.\", length * 1e09\n )\n\n return np.zeros(0)\n\n time_fractions = np.arange(0, length, timestep) / length\n envelope = amplitude * self.shape(time_fractions)\n\n return envelope\n\n @property\n def length(self):\n return self.var_dict.get(\"length\", self._length)\n\n @property\n def variables(self):\n return list(self.var_dict.values())\n\n @property\n def is_variable_length(self):\n return isinstance(self._length, _QiVariableBase)\n\n def _stringify_args(self) -> str:\n \"\"\"Determines non-default args to explicitly stringify\"\"\"\n arg_strings = []\n defaults = self.__init__.__defaults__\n\n if self.mode == \"normal\":\n arg_strings.append(str(self.length))\n else:\n arg_strings.append(f'\"{self.mode}\"')\n\n if self.shape != defaults[0]:\n arg_strings.append(f\"shape={self.shape}\")\n if not _equal(self.amplitude, defaults[1]) and self.mode != \"off\":\n arg_strings.append(f\"amplitude={self.amplitude}\")\n if not _equal(self.phase, defaults[2]):\n arg_strings.append(f\"phase={self.phase}\")\n if not _equal(self.frequency, defaults[3]):\n arg_strings.append(f\"frequency={self.frequency}\")\n\n return \", \".join(arg_strings)\n\n def _stringify(self) -> str:\n return f\"QiPulse({self._stringify_args()})\"" }, { "identifier": "QiCMContainedCellVisitor", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiCMContainedCellVisitor(QiCommandVisitor):\n \"\"\"Visitor to check which cells are used inside context managers.\"\"\"\n\n def __init__(self) -> None:\n self.contained_cells: Set[QiCell] = set()\n\n def visit_cell_command(self, cell_cmd):\n self.contained_cells.update(cell_cmd._relevant_cells)\n\n def visit_context_manager(self, context_manager):\n visitor = QiCMContainedCellVisitor()\n for item in context_manager.body:\n item.accept(visitor)\n\n context_manager._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_if(self, if_cm):\n visitor = QiCMContainedCellVisitor()\n for command in if_cm.body:\n command.accept(visitor)\n\n for command in if_cm._else_body:\n command.accept(visitor)\n\n if_cm._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_parallel(self, parallel_cm):\n visitor = QiCMContainedCellVisitor()\n for cmd_list in parallel_cm.entries:\n for cmd in cmd_list:\n cmd.accept(visitor)\n\n parallel_cm._relevant_cells.update(visitor.contained_cells)\n\n self.contained_cells.update(visitor.contained_cells)\n\n def visit_variable_command(self, variable_cmd):\n self.contained_cells.update(variable_cmd._relevant_cells)\n\n def visit_sync_command(self, sync_cmd):\n self.contained_cells.update(sync_cmd._relevant_cells)\n\n def visit_asm_command(self, asm_cmd):\n self.contained_cells.update(asm_cmd._relevant_cells)\n\n def visit_mem_store_command(self, store_cmd):\n self.contained_cells.update(store_cmd._relevant_cells)" }, { "identifier": "QiResultCollector", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiResultCollector(QiCommandVisitor):\n def __init__(self):\n # If there are multiple QiResults used, we need to\n # simulate in which order they record.\n self.found_qi_results = set()\n # We also collect the recordings which contain the qi_results above\n self.corresponding_recordings = set()\n\n # Is a recording which saves to a QiResult within an if.\n # In these cases we can not necessarily simulate the recording order.\n self.recording_in_if = False\n\n self.if_else_depth = 0\n\n def visit_cell_command(self, cell_cmd):\n from .qi_jobs import cQiRecording, cQiPlayReadout\n\n if isinstance(cell_cmd, cQiPlayReadout) and cell_cmd.recording is not None:\n cell_cmd = cell_cmd.recording\n\n if isinstance(cell_cmd, cQiRecording):\n if self.if_else_depth > 0:\n self.recording_in_if = True\n\n self.found_qi_results.add(cell_cmd.save_to)\n self.corresponding_recordings.add(cell_cmd)\n\n def visit_if(self, if_cm):\n self.if_else_depth += 1\n\n for cmd in if_cm.body:\n cmd.accept(self)\n\n for cmd in if_cm.body:\n cmd.accept(self)\n\n self.if_else_depth -= 1\n\n def visit_parallel(self, parallel_cm):\n for cmd in parallel_cm.body:\n cmd.accept(self)\n\n def visit_for_range(self, for_range_cm):\n for cmd in for_range_cm.body:\n cmd.accept(self)" }, { "identifier": "QiVarInForRange", "path": "src/qiclib/code/qi_visitor.py", "snippet": "class QiVarInForRange(QiCommandVisitor):\n \"\"\"Visitor used to visit QiCommands inside ForRange-Contextmanager. Raises error, if variable used in ForRange-Head is target of an Assign or Store\n command inside ForRange-Body. Additionally generates UserWarning when loop-variable is used inside Parallel-CM.\n \"\"\"\n\n def __init__(self, var) -> None:\n self.var = var\n\n def raise_exception(self):\n raise RuntimeError(\n \"Variable used in ForRange must not be used in internal Assign-Commands, var: \"\n + str(self.var)\n )\n\n def visit_cell_command(self, cell_cmd):\n from .qi_jobs import cQiStore\n\n if isinstance(cell_cmd, cQiStore):\n if id(cell_cmd.store_var) == id(self.var):\n self.raise_exception()\n\n def visit_context_manager(self, context_manager):\n for item in context_manager.body:\n item.accept(self)\n\n def visit_if(self, if_cm):\n for command in if_cm.body:\n command.accept(self)\n\n for command in if_cm._else_body:\n command.accept(self)\n\n def visit_parallel(self, parallel_cm):\n if self.var in parallel_cm._associated_variable_set:\n raise RuntimeError(\n \"Loop variable inside Parallel Context Manager might result in unexpected behaviour. \"\n \"Please unroll loop or change variable.\"\n )\n\n def visit_variable_command(self, variable_cmd):\n pass\n\n def visit_assign_command(self, assign_cmd):\n if id(assign_cmd.var) == id(self.var):\n self.raise_exception()\n\n def visit_sync_command(self, sync_cmd):\n pass" }, { "identifier": "QiProgramBuilder", "path": "src/qiclib/code/qi_prog_builder.py", "snippet": "class QiProgramBuilder:\n def __init__(\n self,\n cell_list: List[Any],\n cell_map: List[Any],\n command_list: List[Any],\n skip_nco_sync: bool = False,\n nco_sync_length: float = 0,\n ) -> None:\n from .qi_sequencer import Sequencer\n\n self.cell_seq_dict: Dict[Any, Sequencer] = {}\n self.result_boxes = []\n\n for cell, index in zip(cell_list, cell_map):\n self.cell_seq_dict[cell] = Sequencer(cell_index=index)\n\n for resultbox in cell._result_container.values():\n self.result_boxes.append(resultbox)\n\n self.cell_map = cell_map\n\n self.command_list = command_list\n\n self.skip_nco = skip_nco_sync\n self.nco_length = nco_sync_length\n\n @staticmethod\n def assign_cell_to_context_manager(commands: List[Any]):\n contained_cells_visitor = QiCMContainedCellVisitor()\n for command in commands:\n command.accept(contained_cells_visitor)\n\n @staticmethod\n def assign_variables_to_cell(commands: List[Any]):\n cell_to_variable_visitor = QiCmdVariableInspection()\n for command in reversed(commands):\n command.accept(cell_to_variable_visitor)\n\n QiProgramBuilder.assign_cell_to_context_manager(\n commands\n ) # run again, to ensure all Assignment statements are considered as well\n\n def build_program(self):\n for cell, sequencer in self.cell_seq_dict.items():\n cell.reset()\n\n if self.skip_nco is False:\n sequencer.add_nco_sync(self.nco_length)\n\n self.assign_cell_to_context_manager(self.command_list)\n\n self.assign_variables_to_cell(self.command_list)\n\n prog_builder = ProgramBuilderVisitor(self.cell_seq_dict, self.cell_map)\n\n for command in self.command_list:\n command.accept(prog_builder)\n\n for sequencer in self.cell_seq_dict.values():\n sequencer.end_of_program()\n\n return self.cell_seq_dict\n\n def get_all_variables(self) -> Dict[Any, Dict[Any, int]]:\n vars: Dict[Any, Dict[Any, int]] = {}\n for cell, seq in self.cell_seq_dict.items():\n for var in cell._relevant_vars:\n if var not in vars:\n vars[var] = {}\n vars[var][cell] = seq.get_var_register(var).adr\n return vars" }, { "identifier": "QiType", "path": "src/qiclib/code/qi_types.py", "snippet": "class QiType(Enum):\n \"\"\"The type that a :class:`~qiclib.code.qi_var_definitions.QiExpression` has.\"\"\"\n\n UNKNOWN = 0\n TIME = 1\n \"\"\"Time values contain some amount of times (in cycles) that, for example, can be used in wait commands.\n They are specified using float (seconds) and are converted to cycles automatically.\n \"\"\"\n STATE = 2\n \"\"\"State values are the result of a recording.\"\"\"\n NORMAL = 3\n \"\"\"Freely usable integer values.\"\"\"\n FREQUENCY = 4\n \"\"\"\n Frequency values can be used in the Play/PlayReadout commands and, like TIME, are specified using floats.\n \"\"\"" }, { "identifier": "QiPostTypecheckVisitor", "path": "src/qiclib/code/qi_types.py", "snippet": "class QiPostTypecheckVisitor(QiJobVisitor):\n \"\"\"Checks that every variable has an assigned type.\n The start and end values of ForRanges over time values are converted to cycles, because we only know with\n certainty whether they iterate over NORMAL or TIME values after the QiTypeFallbackVisitor has run.\n \"\"\"\n\n def __init__(self):\n pass\n\n def visit_for_range(self, for_range_cm):\n from qiclib.packages.constants import CONTROLLER_CYCLE_TIME\n from .qi_var_definitions import _QiConstValue, QiType\n from .qi_jobs import ForRange\n import numpy as np\n\n for_range_cm: ForRange = for_range_cm\n\n for_range_cm.var.accept(self)\n for_range_cm.start.accept(self)\n for_range_cm.end.accept(self)\n\n super().visit_for_range(for_range_cm)\n\n if for_range_cm.var.type == QiType.TIME:\n if isinstance(for_range_cm.start, _QiConstValue):\n if for_range_cm.start.value < 0:\n raise RuntimeError(\n f\"ForRange with negative time value ({for_range_cm.start._given_value}) are not allowed\"\n )\n\n if for_range_cm.end.value == 0:\n warnings.warn(\"End value of 0 will not be included in ForRange.\")\n\n # round to 11 decimals, if result is CONTROLLER_CYCLE_TIME then float modulo probably failed\n if (\n round(np.mod(for_range_cm.step._given_value, CONTROLLER_CYCLE_TIME), 11)\n != 0\n and round(\n np.mod(for_range_cm.step._given_value, CONTROLLER_CYCLE_TIME), 11\n )\n != CONTROLLER_CYCLE_TIME\n ):\n raise RuntimeError(\n f\"When using QiTimeVariables define step size as multiple of {CONTROLLER_CYCLE_TIME*1e9:.3g} ns.\"\n f\" (It is currently off by {np.mod(for_range_cm.step._given_value, CONTROLLER_CYCLE_TIME)*1e9:.3g} ns.)\"\n )\n elif (\n for_range_cm.var.type == QiType.FREQUENCY\n and isinstance(for_range_cm.end, _QiConstValue)\n and for_range_cm.end.value == 0\n ):\n warnings.warn(\"End value of 0 will not be included in ForRange.\")\n\n def visit_assign_command(self, assign_cmd):\n assign_cmd.var.accept(self)\n super().visit_assign_command(assign_cmd)\n\n def visit_constant(self, const):\n from .qi_var_definitions import QiType\n\n if const.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {const}.\")\n\n def visit_variable(self, var):\n from .qi_var_definitions import QiType\n\n if var.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {var}.\")\n\n def visit_calc(self, calc):\n from .qi_var_definitions import QiType\n\n super().visit_calc(calc)\n if calc.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {calc}.\")\n\n def visit_cell_property(self, cell_prop):\n if cell_prop.type == QiType.UNKNOWN:\n raise TypeError(f\"Could not infer type of {cell_prop}\")" }, { "identifier": "QiTypeFallbackVisitor", "path": "src/qiclib/code/qi_types.py", "snippet": "class QiTypeFallbackVisitor(QiJobVisitor):\n \"\"\"Sets the the fallback type to NORMAL for _QiConstValue if they weren't given a type during QiJob construction.\n This is important for qicode like the following:\n\n .. code-block:: python\n\n with ForRange(x, 0, 10, 1):\n ...\n\n Here, x could theoretically be either of type TIME or NORMAL because int literals can have either type.\n However, we want this code to compile to with integer semantics which is why we need this visitor to run\n after job construction. (see QiJob __exit__ method).\n \"\"\"\n\n def visit_for_range(self, for_range_cm):\n from .qi_var_definitions import QiType\n\n if for_range_cm.var.type == QiType.UNKNOWN:\n for_range_cm.var._type_info.set_type(QiType.NORMAL, _TypeFallback.INT)\n\n super().visit_for_range(for_range_cm)\n\n def visit_constant(self, const):\n from .qi_var_definitions import QiType\n\n if const.type == QiType.UNKNOWN:\n if isinstance(const._given_value, float):\n const._type_info.set_type(QiType.TIME, _TypeFallback.FLOAT)\n else:\n assert isinstance(const._given_value, int)\n const._type_info.set_type(QiType.NORMAL, _TypeFallback.INT)" }, { "identifier": "_TypeDefiningUse", "path": "src/qiclib/code/qi_types.py", "snippet": "class _TypeDefiningUse(_TypeFact, Enum):\n VARIABLE_DEFINITION = 0\n VALUE_DEFINITION = 1\n SHIFT_EXPRESSION = 2\n PULSE_LENGTH = 3\n RECORDING_SAVE_TO = 4\n WAIT_COMMAND = 5\n RECORDING_OFFSET_EXPRESSION = 6\n PULSE_FREQUENCY = 7\n\n def to_error_message(self) -> str:\n return {\n _TypeDefiningUse.VARIABLE_DEFINITION: \"has been defined by the user as this type\",\n _TypeDefiningUse.VALUE_DEFINITION: \"has been defined by the user as this type\",\n _TypeDefiningUse.SHIFT_EXPRESSION: \"is used as right hand side of shift expression\",\n _TypeDefiningUse.PULSE_LENGTH: \"is used as length of pulse\",\n _TypeDefiningUse.RECORDING_SAVE_TO: \"is used as save_to of recording command\",\n _TypeDefiningUse.WAIT_COMMAND: \"is used as length in wait command\",\n _TypeDefiningUse.RECORDING_OFFSET_EXPRESSION: \"is used as an recording offset\",\n _TypeDefiningUse.PULSE_FREQUENCY: \"is used as pulse frequency.\",\n }[self]" } ]
import os import json import functools import warnings import numpy as np import qiclib.packages.utility as util from abc import abstractmethod from typing import Dict, List, Callable, Optional, Union, Set, Any, Type from ..hardware.taskrunner import TaskRunner from ..experiment.qicode.data_provider import DataProvider from ..experiment.qicode.data_handler import DataHandler from .qi_seq_instructions import SequencerInstruction from .qi_var_definitions import ( _QiVariableBase, _QiCalcBase, _QiConstValue, QiCellProperty, QiExpression, QiVariableSet, QiCondition, ) from .qi_pulse import QiPulse from .qi_visitor import ( QiCMContainedCellVisitor, QiResultCollector, QiVarInForRange, ) from .qi_prog_builder import QiProgramBuilder from .qi_types import ( QiType, QiPostTypecheckVisitor, QiTypeFallbackVisitor, _TypeDefiningUse, ) from .qi_types import _TypeDefiningUse from .qi_types import _TypeDefiningUse from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .qi_types import ( _TypeConstraintReasonQiCommand, _IllegalTypeReason, _add_equal_constraints, ) from .analysis.qi_insert_mem_parameters import ( insert_recording_offset_store_commands, insert_manipulation_pulse_frequency_store_commands, insert_readout_pulse_frequency_store_commands, ) from .qi_simulate import Simulator from ..experiment.qicode.base import QiCodeExperiment from qiclib.experiment.qicode.base import _TaskrunnerSettings from .qi_visitor import QiStringifyJob
16,846
raise RuntimeError("Can not use command outside QiJob context manager.") _QiJobReference._add_command(cmd) def _set_job_reference(job): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = job def _delete_job_reference(): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = None class QiCell: """A QiCell is an abstract representation of the qubit/cell the program is run on. Usually, a single :python:`QiCell` is not instantiated, but instead a :class:`QiCells` object. For a single :python:`QiCell`, use instead :python:`QiCells(1)` A :python:`QiCell` must be instantiated inside within a :class:`QiJob` context. The :python:`QiCell` object can be used to get properties that are defined on :class:`QiSamples <QiSample>`. For this, index the :python:`QiCell` object using the name of the property: .. code-block:: python q: QiCell = ... t1_time = q["t1"] The actual value for the accessed property (in the example above, the T1 time) is filled in when executing a :class:`QiJob` and providing the actual sample. **Tasks of the QiCell**: - Saves the pulses needed for program execution. - Provides a dictionary functionality to define commonly used durations/properties. - Implements a Sequencer object, which contains the assembler program after compilation. :param cellID: A unique ID :raises RuntimeError: When the :python:`QiCell` is instantiated outside a `QiJob` """ def __init__(self, cellID: int): if not isinstance(_QiJobReference, QiJob): raise RuntimeError("QiCell can't be used outside of QiJob.") self.cellID = cellID self.manipulation_pulses: List[QiPulse] = [] self.flux_pulses: List[QiPulse] = [] self.readout_pulses: List[QiPulse] = [] self._result_container: Dict[str, QiResult] = {} # The order in which recorded values are assigned to which result container self._result_recording_order: List[QiResult] = [] self._unresolved_property: Set[QiCellProperty] = set() self._job_ref = _QiJobReference self._relevant_vars: Set[_QiVariableBase] = set() # These attributes are determined by dataflow analyses self._initial_manip_freq: float = None self._initial_readout_freq: float = None self._initial_rec_offset: float = None self._rec_length: Union[int, float, QiCellProperty] = None self._properties: Dict[QiCellProperty, Any] = {} def __getitem__(self, key): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried getting values for cells registered to other QiJob" ) prop = self._properties.get(key, QiCellProperty(self, key)) if isinstance(prop, QiCellProperty): self._unresolved_property.add(key) return prop def __setitem__(self, key, value): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried setting values for cells registered to other QiJob" ) self._properties[key] = value def __call__(self, qic): return qic.cell[self.qic_cell] def get_properties(self): return self._properties.copy() def add_pulse(self, pulse: QiPulse): if pulse not in self.manipulation_pulses: self.manipulation_pulses.append(pulse) if len(self.manipulation_pulses) > 13: raise RuntimeError("Too many pulses in use") return self.manipulation_pulses.index(pulse) + 1 # index 0 and 15 are reserved @property def initial_manipulation_frequency(self): if self._initial_manip_freq is None: if len(self.manipulation_pulses) > 0: warnings.warn( "Manipulation pulses without frequency given, using 90 MHz." ) return 90e6 # Default frequency freq = self._initial_manip_freq return freq() if isinstance(freq, QiCellProperty) else freq def add_recording_length(self, length): if self._rec_length is None: self._rec_length = length elif ( not self._rec_length._equal_syntax(length)
# Copyright © 2017-2023 Quantum Interface ([email protected]) # Richard Gebauer, IPE, Karlsruhe Institute of Technology # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. """ This is the main module of QiCode. Here, all important commands write QiPrograms are defined. """ class QiResult: """Result of an experiment. Can be accessed via :python:`job.cells[cell_index].data("result name")`. Where :python:`cells` denotes a :class:`QiCells` object and :python:`cell_index` an integer. The actual data can be retrieved as a numpy array using the :meth:`get` Method Example ------- .. code-block:: python qic: QiController = ... sample: QiSample = ... with QiJob() as job: q = QiCells(1) Readout(q[0], save_to="result") job.run(qic, sample, averages=1000) data = job.cells[0].data("result") :param name: The name of the variable, by default None """ def __init__(self, name: Optional[str] = None) -> None: self._cell = None self.data = None self.recording_count = 0 self.name: str = "" if name is None else name def get(self) -> np.ndarray: """gets the data of the result as a numpy array :return: The data of the experiment """ return np.array(self.data) def __str__(self) -> str: return f'QiResult("{self.name}")' class QiCommand: """Base class of every Job command. Provides _relevant_cells, containing every cell used for the execution of the command. Provides _associated_variable_set, containing every variable needed for the execution of the command. """ def __init__(self) -> None: self._associated_variable_set = QiVariableSet() self._relevant_cells: Set[QiCell] = set() @abstractmethod def accept(self, visitor, *input): raise RuntimeError( f"{self.__class__} doesn't implement `accept`. This is a bug." ) def is_variable_relevant(self, variable: _QiVariableBase) -> bool: return variable in self._associated_variable_set def add_associated_variable(self, x): if isinstance(x, _QiVariableBase): self._associated_variable_set.add(x) def __str__(self) -> str: return "cQiCommand" def _stringify(self) -> str: raise NotImplementedError(f"_stringify not implemented for {repr(self)}") _QiJobReference = None def _add_cmd_to_job(cmd: QiCommand): if _QiJobReference is None: raise RuntimeError("Can not use command outside QiJob context manager.") _QiJobReference._add_command(cmd) def _set_job_reference(job): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = job def _delete_job_reference(): """Used for testing purposes""" # pylint: disable=global-statement global _QiJobReference _QiJobReference = None class QiCell: """A QiCell is an abstract representation of the qubit/cell the program is run on. Usually, a single :python:`QiCell` is not instantiated, but instead a :class:`QiCells` object. For a single :python:`QiCell`, use instead :python:`QiCells(1)` A :python:`QiCell` must be instantiated inside within a :class:`QiJob` context. The :python:`QiCell` object can be used to get properties that are defined on :class:`QiSamples <QiSample>`. For this, index the :python:`QiCell` object using the name of the property: .. code-block:: python q: QiCell = ... t1_time = q["t1"] The actual value for the accessed property (in the example above, the T1 time) is filled in when executing a :class:`QiJob` and providing the actual sample. **Tasks of the QiCell**: - Saves the pulses needed for program execution. - Provides a dictionary functionality to define commonly used durations/properties. - Implements a Sequencer object, which contains the assembler program after compilation. :param cellID: A unique ID :raises RuntimeError: When the :python:`QiCell` is instantiated outside a `QiJob` """ def __init__(self, cellID: int): if not isinstance(_QiJobReference, QiJob): raise RuntimeError("QiCell can't be used outside of QiJob.") self.cellID = cellID self.manipulation_pulses: List[QiPulse] = [] self.flux_pulses: List[QiPulse] = [] self.readout_pulses: List[QiPulse] = [] self._result_container: Dict[str, QiResult] = {} # The order in which recorded values are assigned to which result container self._result_recording_order: List[QiResult] = [] self._unresolved_property: Set[QiCellProperty] = set() self._job_ref = _QiJobReference self._relevant_vars: Set[_QiVariableBase] = set() # These attributes are determined by dataflow analyses self._initial_manip_freq: float = None self._initial_readout_freq: float = None self._initial_rec_offset: float = None self._rec_length: Union[int, float, QiCellProperty] = None self._properties: Dict[QiCellProperty, Any] = {} def __getitem__(self, key): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried getting values for cells registered to other QiJob" ) prop = self._properties.get(key, QiCellProperty(self, key)) if isinstance(prop, QiCellProperty): self._unresolved_property.add(key) return prop def __setitem__(self, key, value): if _QiJobReference != self._job_ref: raise RuntimeError( "Tried setting values for cells registered to other QiJob" ) self._properties[key] = value def __call__(self, qic): return qic.cell[self.qic_cell] def get_properties(self): return self._properties.copy() def add_pulse(self, pulse: QiPulse): if pulse not in self.manipulation_pulses: self.manipulation_pulses.append(pulse) if len(self.manipulation_pulses) > 13: raise RuntimeError("Too many pulses in use") return self.manipulation_pulses.index(pulse) + 1 # index 0 and 15 are reserved @property def initial_manipulation_frequency(self): if self._initial_manip_freq is None: if len(self.manipulation_pulses) > 0: warnings.warn( "Manipulation pulses without frequency given, using 90 MHz." ) return 90e6 # Default frequency freq = self._initial_manip_freq return freq() if isinstance(freq, QiCellProperty) else freq def add_recording_length(self, length): if self._rec_length is None: self._rec_length = length elif ( not self._rec_length._equal_syntax(length)
if isinstance(self._rec_length, QiExpression)
8
2023-11-10 10:26:10+00:00
24k
jpcadena/fastapi-boilerplate
app/api/api_v1/router/auth.py
[ { "identifier": "get_redis_dep", "path": "app/api/deps.py", "snippet": "async def get_redis_dep(\n redis_dependency: Annotated[RedisDependency, Depends()]\n) -> AsyncGenerator[Redis, None]: # type: ignore\n \"\"\"\n Lazy generation of Redis dependency\n :param redis_dependency: The dependency injection on Redis\n :type redis_dependency: RedisDependency\n :return: The Redis connection instance as a generator\n :rtype: AsyncGenerator[Redis, None]\n \"\"\"\n async with redis_dependency as redis:\n yield redis" }, { "identifier": "get_current_user", "path": "app/api/oauth2_validation.py", "snippet": "async def get_current_user(\n token: Annotated[str, Depends(oauth2_scheme)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n user_service: Annotated[UserService, Depends(get_user_service)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserAuth:\n \"\"\"\n Fetches the current authenticated user based on the provided JWT\n access token\n :param token: The Access token from OAuth2PasswordBearer\n :type token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param user_service: Dependency method for User service object\n :type user_service: UserService\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: Authenticated user information\n :rtype: UserAuth\n \"\"\"\n token_service: TokenService = TokenService(redis, auth_settings)\n is_blacklisted: bool = await token_service.is_token_blacklisted(token)\n if is_blacklisted:\n raise HTTPException(\n status_code=status.HTTP_401_UNAUTHORIZED,\n detail=\"Token is blacklisted\",\n )\n return await authenticate_user(token, auth_settings, user_service, redis)" }, { "identifier": "get_refresh_current_user", "path": "app/api/oauth2_validation.py", "snippet": "async def get_refresh_current_user(\n refresh_token: Annotated[str, Depends(refresh_token_scheme)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n user_service: Annotated[UserService, Depends(get_user_service)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserAuth:\n \"\"\"\n Fetches the current authenticated user based on the provided JWT\n refresh token\n :param refresh_token: The Refresh token from OAuth2PasswordBearer\n :type refresh_token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :param user_service: Dependency method for User service object\n :type user_service: UserService\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: Authenticated user information\n :rtype: UserAuth\n \"\"\"\n return await authenticate_user(\n refresh_token, auth_settings, user_service, redis\n )" }, { "identifier": "get_auth_settings", "path": "app/config/config.py", "snippet": "def get_init_settings() -> InitSettings:\ndef get_settings() -> Settings:\ndef get_sql_settings() -> SQLDatabaseSettings:\ndef get_auth_settings() -> AuthSettings:" }, { "identifier": "AuthSettings", "path": "app/config/db/auth_settings.py", "snippet": "class AuthSettings(BaseSettings):\n \"\"\"\n Settings class for authentication using JWT and Redis\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n MAX_REQUESTS: PositiveInt = 30\n RATE_LIMIT_DURATION: PositiveInt = 60\n BLACKLIST_EXPIRATION_SECONDS: PositiveInt = 3600\n API_V1_STR: str = \"/api/v1\"\n ALGORITHM: str = \"HS256\"\n AUTH_URL: str = \"api/v1/auth/\"\n TOKEN_URL: str = \"api/v1/auth/login\"\n OAUTH2_SCHEME: str = \"JWT\"\n OAUTH2_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate most of\" \" the API endpoints.\"\n )\n OAUTH2_REFRESH_TOKEN_DESCRIPTION: str = (\n \"JWT token used to authenticate\" \" most ofhe API endpoints.\"\n )\n TOKEN_USER_INFO_REGEX: str = (\n r\"^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-\"\n r\"[0-9a-f]{4}-[0-9a-f]{12}:\\d{1,3}\\.\"\n r\"\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}$\"\n )\n SUB_REGEX: str = (\n r\"^username:[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-\"\n r\"[89ab][0-9a-f]{3}-[0-9a-f]{12}$\"\n )\n HEADERS: dict[str, str] = {\"WWW-Authenticate\": \"Bearer\"}\n DETAIL: str = \"Could not validate credentials\"\n NO_CLIENT_FOUND: str = \"No client found on the request\"\n SECRET_KEY: str\n SERVER_URL: AnyHttpUrl\n SERVER_DESCRIPTION: str\n CACHE_SECONDS: PositiveInt = 3600\n ACCESS_TOKEN_EXPIRE_MINUTES: float\n REFRESH_TOKEN_EXPIRE_MINUTES: PositiveInt\n EMAIL_RESET_TOKEN_EXPIRE_HOURS: PositiveInt\n AUDIENCE: Optional[AnyHttpUrl] = None\n STRICT_TRANSPORT_SECURITY_MAX_AGE: PositiveInt\n\n @field_validator(\"AUDIENCE\", mode=\"before\")\n def assemble_audience(\n cls, v: Optional[str], info: ValidationInfo\n ) -> AnyHttpUrl:\n \"\"\"\n Combine server host and API_V1_STR to create the audience\n string.\n :param v: The value of audience attribute\n :type v: Optional[str]\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The AUDIENCE attribute\n :rtype: AnyHttpUrl\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return AnyHttpUrl(\n f'{str(info.data.get(\"SERVER_URL\"))[:-1]}:8000/'\n f'{info.data.get(\"TOKEN_URL\")}'\n )\n\n REDIS_SCHEME: str\n REDIS_HOST: str\n REDIS_USERNAME: str\n REDIS_PASSWORD: str\n REDIS_PORT: PositiveInt\n REDIS_DATABASE_URI: Optional[RedisDsn] = None\n\n @field_validator(\"REDIS_DATABASE_URI\", mode=\"before\")\n def assemble_redis_connection(\n cls, v: Optional[str], info: ValidationInfo\n ) -> RedisDsn:\n \"\"\"\n Assemble the cache database connection as URI string\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: Redis URI\n :rtype: RedisDsn\n \"\"\"\n # pylint: disable=no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n return RedisDsn(\n str(\n Url.build(\n scheme=info.data.get(\"REDIS_SCHEME\", \"\"),\n username=info.data.get(\"REDIS_USERNAME\"),\n password=info.data.get(\"REDIS_PASSWORD\"),\n host=info.data.get(\"REDIS_HOST\", \"\"),\n port=info.data.get(\"REDIS_PORT\"),\n )\n )\n )" }, { "identifier": "InitSettings", "path": "app/config/init_settings.py", "snippet": "class InitSettings(BaseSettings):\n \"\"\"\n Init Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n case_sensitive=True,\n extra=\"allow\",\n )\n\n ITERATIONS: PositiveInt = 100000\n KEY_BYTES_LENGTH: PositiveInt = 32\n SALT_BYTES: PositiveInt = 16\n IV_BYTES: PositiveInt = 12\n PUBLIC_EXPONENT: PositiveInt = 65537\n RSA_KEY_BITS: PositiveInt = 2048\n SALUTE: str = \"Salute!\"\n ROOT_MSG: str = \"Hello, World!\"\n SERVER_NAME: str = \"FastAPI Boilerplate\"\n PROJECT_NAME: str = \"fastapi-boilerplate\"\n VERSION: str = \"1.0\"\n ENCODING: str = \"UTF-8\"\n DEFAULT_REGION: str = \"Guayas\"\n DEFAULT_COUNTRY: str = \"Ecuador\"\n OPENAPI_FILE_PATH: str = \"/openapi.json\"\n DATE_FORMAT: str = \"%Y-%m-%d\"\n DATETIME_FORMAT: str = \"%Y-%m-%d %H:%M:%S\"\n FILE_DATE_FORMAT: str = \"%d-%b-%Y-%H-%M-%S\"\n IMAGES_APP: str = \"images\"\n IMAGES_PATH: str = \"/assets/images\"\n IMAGES_DIRECTORY: str = \"assets/images\"\n EMAIL_TEMPLATES_DIR: str = \"templates\"\n PASSWORD_RECOVERY_SUBJECT: str = \"Password recovery for user\"\n NEW_ACCOUNT_SUBJECT: str = \"New account for user\"\n WELCOME_SUBJECT: str = \"Welcome to \"\n PASSWORD_CHANGED_CONFIRMATION_SUBJECT: str = (\n \"Successfully password \" \"changed for \"\n )\n DELETE_ACCOUNT_SUBJECT: str = \"Account deleted for \"\n LOG_FORMAT: str = (\n \"[%(name)s][%(asctime)s][%(levelname)s][%(module)s]\"\n \"[%(funcName)s][%(lineno)d]: %(message)s\"\n )\n PASSWORD_REGEX: str = (\n \"^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?\" \"[#?!@$%^&*-]).{8,14}$\"\n )\n\n SUMMARY: str = \"\"\"This backend project is FastAPI template.\n This project serves as the backend, which aims to provide a robust and\n reliable system to its users.\n This backend application plays a crucial role in providing the\n functionality for user authentication, real-time monitoring,\n data processing, and advanced alerting system. It is designed to ensure\n the scalability and maintainability of the mobile app,\n making it a vital part of the overall solution.\n \"\"\"\n DESCRIPTION: str = f\"\"\"**FastAPI**, **SQLAlchemy** and **Redis** helps you\n do awesome stuff. 🚀\n \\n\\n<img src=\"data:image/png;base64,{img_b64}\"/>\"\"\"\n LICENSE_INFO: dict[str, str] = {\n \"name\": \"MIT\",\n \"identifier\": \"MIT\",\n }\n TAGS_METADATA: list[dict[str, str]] = [\n {\n \"name\": \"user\",\n \"description\": f\"\"\"Operations with users, such as register, get,\n update and delete.\\n\\n<img src=\"data:image/png;base64,\n {users_b64}\" width=\"150\" height=\"100\"/>\"\"\",\n },\n {\n \"name\": \"auth\",\n \"description\": f\"\"\"The authentication logic is here as well as\n password recovery and reset.\n \\n\\n<img src=\"data:image/png;base64,{auth_b64}\" width=\"75\"\n height=\"75\"/>\"\"\",\n },\n ]\n USER_CREATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user create object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone number `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"5939876a4321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n \"region\": \"Andes\",\n \"country\": \"New York\",\n \"postal_code\": \"999999\",\n },\n },\n },\n }\n USER_UPDATE_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** user update object that works \"\n \"correctly.\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(593987654321),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert phone numbers `strings` to \"\n \"actual `numbers` automatically\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"[email protected]\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"phone_number\": PhoneNumber(\"593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n },\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"username\": \"username\",\n \"email\": \"username\",\n \"first_name\": \"Some\",\n \"middle_name\": \"One\",\n \"last_name\": \"Example\",\n \"password\": \"miclave123\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(95, 12, 31),\n \"phone_number\": PhoneNumber(\"59398x54321\"),\n \"address\": {\n \"street_address\": \"True\",\n \"locality\": \"123\",\n },\n },\n },\n }\n EMAIL_BODY_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** email object that works correctly.\",\n \"value\": \"[email protected]\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": 123,\n },\n }\n TOKEN_PAYLOAD_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** token payload object that works \"\n \"correctly.\",\n \"value\": {\n \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n },\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": {\n \"token\": \"123\",\n \"password\": \"abc123\",\n },\n },\n }\n AUTHORIZATION_HEADER_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** authorization token object that works \"\n \"correctly.\",\n \"value\": jwt.encode(\n claims=jsonable_encoder(\n {\n \"sub\": f\"username:{str(uuid4())}\",\n \"nationalities\": [\"ECU\"],\n \"email\": \"[email protected]\",\n \"nickname\": \"example\",\n \"preferred_username\": \"example\",\n \"given_name\": \"Some\",\n \"family_name\": \"Example\",\n \"middle_name\": \"One\",\n \"gender\": Gender.MALE,\n \"birthdate\": date(2004, 12, 31),\n \"updated_at\": datetime.now(),\n \"phone_number\": PhoneNumber(\"+593987654321\"),\n \"address\": {\n \"street_address\": \"Urdesa Norte mz A1 v 99\",\n \"locality\": \"Guayaquil\",\n \"region\": \"Guayas\",\n \"country\": \"Ecuador\",\n \"postal_code\": \"090505\",\n },\n \"exp\": int(time.time()) + 1800,\n \"nbf\": int(time.time()) - 1,\n \"iat\": int(time.time()),\n }\n ),\n key=\"f52e826e62cdd364c86f129cb18db2fe2be93859c5104cac9585f\"\n \"305378dce65\",\n algorithm=\"HS256\",\n ),\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": \"123\",\n },\n }\n LIMIT_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** limit query parameter that works \"\n \"correctly.\",\n \"value\": 1,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert limit `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"5\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }\n SKIP_EXAMPLES: dict[str, Example] = {\n \"normal\": {\n \"summary\": \"A normal example\",\n \"description\": \"A **normal** skip query parameter that works \"\n \"correctly.\",\n \"value\": 0,\n },\n \"converted\": {\n \"summary\": \"An example with converted data\",\n \"description\": \"FastAPI can convert skip `strings` to actual\"\n \" `numbers` automatically\",\n \"value\": \"20\",\n },\n \"invalid\": {\n \"summary\": \"Invalid data is rejected with an error\",\n \"value\": -1,\n },\n }" }, { "identifier": "Settings", "path": "app/config/settings.py", "snippet": "class Settings(BaseSettings):\n \"\"\"\n Settings class based on Pydantic Base Settings\n \"\"\"\n\n model_config = SettingsConfigDict(\n env_file=\".env\",\n env_file_encoding=\"utf-8\",\n case_sensitive=True,\n extra=\"allow\",\n )\n\n SMTP_PORT: PositiveInt\n SMTP_HOST: str\n SMTP_USER: str\n SMTP_PASSWORD: str\n MAIL_SUBJECT: str\n MAIL_TIMEOUT: float\n EMAILS_FROM_EMAIL: Optional[EmailStr] = None\n EMAILS_FROM_NAME: Optional[str] = None\n SUPERUSER_EMAIL: EmailStr\n SUPERUSER_FIRST_NAME: str\n SUPERUSER_PASSWORD: str\n SUPERUSER_LAST_NAME: str\n SUPERUSER_STREET_ADDRESS: str\n SUPERUSER_LOCALITY: str\n SUPERUSER_POSTAL_CODE: str\n BACKEND_CORS_ORIGINS: list[AnyHttpUrl] = []\n\n PUBLIC_KEY_PATH: FilePath\n PRIVATE_KEY_PATH: FilePath\n\n @field_validator(\n \"PUBLIC_KEY_PATH\", \"PRIVATE_KEY_PATH\", mode=\"before\", check_fields=True\n )\n def validate_key_paths(cls, key_path: FilePath) -> FilePath:\n \"\"\"\n Validate the provided key path.\n :param key_path: Provided key path\n :type key_path: FilePath\n :return: The validated key path\n :rtype: FilePath\n \"\"\"\n if not str(key_path).endswith(\".pem\"):\n raise ValueError(f\"{key_path} must have a .pem extension\")\n base_name: str = os.path.basename(key_path)\n if not base_name.endswith(\"key.pem\"):\n raise ValueError(\n f\"{key_path} must have a file name ending with 'key'\"\n )\n return key_path\n\n @field_validator(\"BACKEND_CORS_ORIGINS\", mode=\"before\")\n def assemble_cors_origins(\n cls, v: Union[str, list[str]]\n ) -> Union[list[str], str]:\n \"\"\"\n Assemble a list of allowed CORS origins.\n :param v: Provided CORS origins, either a string or a list of\n strings\n :type v: Union[str, list[str]]\n :return: List of Backend CORS origins to be accepted\n :rtype: Union[list[str], str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if isinstance(v, str) and not v.startswith(\"[\"):\n return [i.strip() for i in v.split(\",\")]\n if isinstance(v, list):\n return v\n raise ValueError(v)\n\n CONTACT_NAME: Optional[str] = None\n CONTACT_URL: Optional[AnyHttpUrl] = None\n CONTACT_EMAIL: Optional[EmailStr] = None\n CONTACT: Optional[dict[str, Any]] = None\n\n @field_validator(\"CONTACT\", mode=\"before\")\n def assemble_contact(\n cls, v: Optional[str], info: ValidationInfo\n ) -> dict[str, str]:\n \"\"\"\n Assemble contact information\n :param v: Variables to consider\n :type v: str\n :param info: The field validation info\n :type info: ValidationInfo\n :return: The contact attribute\n :rtype: dict[str, str]\n \"\"\"\n # pylint: disable=unused-argument,no-self-argument,invalid-name\n if info.config is None:\n raise ValueError(\"info.config cannot be None\")\n contact: dict[str, Any] = {}\n if info.data.get(\"CONTACT_NAME\"):\n contact[\"name\"] = info.data.get(\"CONTACT_NAME\")\n if info.data.get(\"CONTACT_URL\"):\n contact[\"url\"] = info.data.get(\"CONTACT_URL\")\n if info.data.get(\"CONTACT_EMAIL\"):\n contact[\"email\"] = info.data.get(\"CONTACT_EMAIL\")\n return contact" }, { "identifier": "verify_password", "path": "app/core/security/password.py", "snippet": "def verify_password(hashed_password: str, plain_password: str) -> bool:\n \"\"\"\n Verifies if a plain text password matches a hashed password\n :param plain_password: The plain text password to verify\n :type plain_password: str\n :param hashed_password: The hashed password to compare against\n :type hashed_password: str\n :return: True if the passwords match, False otherwise\n :rtype: bool\n \"\"\"\n if not plain_password:\n raise_custom_error(\"Plain password cannot be empty or None\")\n if not hashed_password:\n raise_custom_error(\"Hashed password cannot be empty or None\")\n return crypt_context.verify(plain_password, hashed_password)" }, { "identifier": "NotFoundException", "path": "app/exceptions/exceptions.py", "snippet": "class NotFoundException(Exception):\n \"\"\"\n Not Found Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)" }, { "identifier": "ServiceException", "path": "app/exceptions/exceptions.py", "snippet": "class ServiceException(Exception):\n \"\"\"\n Service Layer Exception class\n \"\"\"\n\n def __init__(self, message: str, note: Optional[str] = None):\n super().__init__(message)\n if note:\n self.add_note(note)" }, { "identifier": "User", "path": "app/models/sql/user.py", "snippet": "class User(Base): # type: ignore\n \"\"\"\n User model class representing the \"users\" table\n \"\"\"\n\n __tablename__ = \"users\"\n\n id: Mapped[UUID4] = mapped_column(\n UUID(as_uuid=True),\n index=True,\n nullable=False,\n primary_key=True,\n unique=True,\n server_default=text(\"(gen_random_uuid())\"),\n comment=\"ID of the User\",\n )\n username: Mapped[str] = mapped_column(\n String(15),\n index=True,\n unique=True,\n nullable=False,\n comment=\"Username to identify the user\",\n )\n email: Mapped[EmailStr] = mapped_column(\n String(320),\n index=True,\n unique=True,\n nullable=False,\n comment=\"Preferred e-mail address of the User\",\n )\n first_name: Mapped[str] = mapped_column(\n String(50), nullable=False, comment=\"First name(s) of the User\"\n )\n middle_name: Mapped[str] = mapped_column(\n String(50), nullable=True, comment=\"Middle name(s) of the User\"\n )\n last_name: Mapped[str] = mapped_column(\n String(100), nullable=False, comment=\"Last name(s) of the User\"\n )\n password: Mapped[str] = mapped_column(\n String(60), nullable=False, comment=\"Hashed password of the User\"\n )\n gender: Mapped[Gender] = mapped_column(\n Enum(Gender), nullable=True, comment=\"Gender of the User\"\n )\n birthdate: Mapped[PastDate] = mapped_column(\n Date, nullable=True, comment=\"Birthday of the User\"\n )\n phone_number: Mapped[PhoneNumber] = mapped_column(\n String(20),\n nullable=True,\n comment=\"Preferred telephone number of the User\",\n )\n is_active: Mapped[bool] = mapped_column(\n Boolean(),\n default=True,\n nullable=False,\n server_default=text(\"true\"),\n comment=\"True if the user is active; otherwise false\",\n )\n is_superuser: Mapped[bool] = mapped_column(\n Boolean(),\n default=False,\n nullable=False,\n server_default=text(\"false\"),\n comment=\"True if the user is super user; otherwise false\",\n )\n created_at: Mapped[datetime] = mapped_column(\n TIMESTAMP(\n timezone=True, precision=sql_database_setting.TIMESTAMP_PRECISION\n ),\n default=datetime.now(),\n nullable=False,\n server_default=text(\"now()\"),\n comment=\"Time the User was created\",\n )\n updated_at: Mapped[datetime] = mapped_column(\n TIMESTAMP(\n timezone=True, precision=sql_database_setting.TIMESTAMP_PRECISION\n ),\n nullable=True,\n onupdate=text(\"now()\"),\n comment=\"Time the User was updated\",\n )\n address_id: Mapped[UUID4] = mapped_column(\n UUID(as_uuid=True),\n ForeignKey(\n \"users_address.id\",\n name=\"users_address_id_fkey\",\n ),\n nullable=False,\n comment=\"ID of the User's address\",\n )\n address: Mapped[\"Address\"] = relationship( # type: ignore\n \"Address\", back_populates=\"users\", lazy=\"joined\"\n )\n\n __table_args__ = (\n CheckConstraint(\n \"char_length(username) >= 4\", name=\"users_username_length\"\n ),\n CheckConstraint(\"char_length(email) >= 3\", name=\"users_email_length\"),\n CheckConstraint(\n sql_database_setting.DB_EMAIL_CONSTRAINT, name=\"users_email_format\"\n ),\n CheckConstraint(\n \"char_length(first_name) >= 1\", name=\"users_first_name_length\"\n ),\n CheckConstraint(\n \"char_length(last_name) >= 1\", name=\"users_last_name_length\"\n ),\n CheckConstraint(\"LENGTH(password) = 60\", name=\"users_password_length\"),\n CheckConstraint(\n sql_database_setting.DB_PHONE_NUMBER_CONSTRAINT,\n name=\"users_phone_number_format\",\n ),\n )" }, { "identifier": "Msg", "path": "app/schemas/external/msg.py", "snippet": "class Msg(BaseModel):\n \"\"\"\n Schema for representing a message.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra={\"example\": {\"msg\": \"Hello, World!!!\"}}\n )\n\n msg: str = Field(..., title=\"Message\", description=\"Message to display\")" }, { "identifier": "TokenResetPassword", "path": "app/schemas/external/token.py", "snippet": "class TokenResetPassword(BaseModel):\n \"\"\"\n Token Reset Password for Request based on Pydantic Base Model.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra={\n \"example\": {\n \"token\": \"eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9\",\n \"password\": \"Hk7pH9*35Fu&3U\",\n }\n }\n )\n\n token: str = Field(\n ..., title=\"Token\", description=\"Access token\", min_length=30\n )\n password: str = Field(\n ...,\n title=\"New password\",\n description=\"New password to reset\",\n validate_default=True,\n min_length=8,\n max_length=14,\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password to be validated\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)" }, { "identifier": "TokenResponse", "path": "app/schemas/external/token.py", "snippet": "class TokenResponse(Token):\n \"\"\"\n Token for Response based on Pydantic Base Model.\n \"\"\"\n\n model_config = ConfigDict(\n json_schema_extra=token_response_example,\n )\n\n token_type: str = Field(\n default=\"bearer\", title=\"Token type\", description=\"Type of the token\"\n )" }, { "identifier": "UserResponse", "path": "app/schemas/external/user.py", "snippet": "class UserResponse(UserID, UserBase, UserOptional, UserInDB):\n \"\"\"\n Schema for the response when retrieving a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_response_example,\n )" }, { "identifier": "UserUpdate", "path": "app/schemas/external/user.py", "snippet": "class UserUpdate(BaseModel):\n \"\"\"\n Schema for updating a User record.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_example,\n )\n\n username: Optional[str] = Field(\n default=None,\n title=\"Username\",\n description=\"Username to identify the user\",\n min_length=4,\n max_length=15,\n )\n email: Optional[EmailStr] = Field(\n default=None,\n title=\"Email\",\n description=\"Preferred e-mail address of the User\",\n )\n first_name: Optional[str] = Field(\n default=None,\n title=\"First name\",\n description=\"First name(s) of the User\",\n min_length=1,\n max_length=50,\n )\n middle_name: Optional[str] = Field(\n default=None,\n title=\"Middle Name\",\n description=\"Middle name(s) of the User\",\n max_length=50,\n )\n last_name: Optional[str] = Field(\n default=None,\n title=\"Last name\",\n description=\"Last name(s) of the User\",\n min_length=1,\n max_length=100,\n )\n password: Optional[str] = Field(\n default=None,\n title=\"New Password\",\n description=\"New Password of the User\",\n min_length=8,\n max_length=14,\n )\n gender: Optional[Gender] = Field(\n default=None, title=\"Gender\", description=\"Gender of the User\"\n )\n birthdate: Optional[date] = Field(\n default=None, title=\"Birthdate\", description=\"Birthday of the User\"\n )\n phone_number: Optional[PhoneNumber] = Field(\n default=None,\n title=\"Phone number\",\n description=\"Preferred telephone number of the User\",\n )\n address: Optional[Address] = Field(\n default=None, title=\"Address\", description=\"Address of the User\"\n )\n\n @field_validator(\"password\", mode=\"before\")\n def validate_password(cls, v: Optional[str]) -> str:\n \"\"\"\n Validates the password attribute\n :param v: The password value to validate\n :type v: Optional[str]\n :return: The validated password\n :rtype: str\n \"\"\"\n # pylint: disable=no-self-argument\n return validate_password(v)" }, { "identifier": "UserUpdateResponse", "path": "app/schemas/external/user.py", "snippet": "class UserUpdateResponse(\n UserAuth, UserName, UserPassword, UserOptional, UserInDB\n):\n \"\"\"\n Schema for the response when updating a User.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_update_response_example,\n )" }, { "identifier": "UserAuth", "path": "app/schemas/infrastructure/user.py", "snippet": "class UserAuth(UserID, UserBaseAuth):\n \"\"\"\n User Auth that inherits from UserID.\n \"\"\"\n\n model_config = ConfigDict(\n from_attributes=True,\n json_schema_extra=user_auth_example,\n )" }, { "identifier": "common_auth_procedure", "path": "app/services/infrastructure/auth.py", "snippet": "async def common_auth_procedure(\n user: User,\n client_ip: str,\n redis: Redis, # type: ignore\n auth_settings: AuthSettings,\n) -> TokenResponse:\n \"\"\"\n Common authentication procedure for login and refresh token based on\n token generation\n :param user: The user to authenticate\n :type user: User\n :param client_ip: The IP address of the client\n :type client_ip: str\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The token response object\n :rtype: TokenResponse\n \"\"\"\n auth_token = AuthService.auth_token(user, auth_settings)\n user_info = f\"{str(user.id)}:{client_ip}\"\n token = TokenDB(key=auth_token.refresh_token, user_info=user_info)\n token_service = TokenService(redis, auth_settings)\n token_set = await token_service.create_token(token)\n if not token_set:\n detail = \"Could not insert data in Authentication database\"\n logger.warning(detail)\n raise HTTPException(\n status_code=status.HTTP_400_BAD_REQUEST, detail=detail\n )\n return TokenResponse(**auth_token.model_dump())" }, { "identifier": "TokenService", "path": "app/services/infrastructure/token.py", "snippet": "class TokenService:\n \"\"\"\n Service class for token operations in the authentication database\n \"\"\"\n\n def __init__(\n self,\n redis: Redis, # type: ignore\n auth_settings: AuthSettings,\n ):\n self._redis: Redis = redis # type: ignore\n self._refresh_token_expire_minutes: (\n PositiveInt\n ) = auth_settings.REFRESH_TOKEN_EXPIRE_MINUTES\n self._blacklist_expiration_seconds: PositiveInt = (\n PositiveInt(\n PositiveInt(auth_settings.ACCESS_TOKEN_EXPIRE_MINUTES) + 1\n )\n * 60\n ) # converting minutes to seconds\n\n @handle_redis_exceptions\n @benchmark\n async def create_token(self, token: Token) -> bool:\n \"\"\"\n Create a token in authentication database\n :param token: Token object with key and value\n :type token: Token\n :return: True if the token was inserted; otherwise false\n :rtype: bool\n \"\"\"\n try:\n inserted: bool = await self._redis.setex(\n token.key,\n self._refresh_token_expire_minutes,\n token.user_info,\n )\n except RedisError as r_exc:\n logger.error(\"Error at creating token. %s\", r_exc)\n raise r_exc\n return inserted\n\n @handle_redis_exceptions\n @benchmark\n async def get_token(self, key: str) -> Optional[str]:\n \"\"\"\n Read token from the authentication database\n :param key: The key to search for\n :type key: str\n :return: The refresh token\n :rtype: str\n \"\"\"\n try:\n value: str = str(await self._redis.get(key))\n except RedisError as r_exc:\n logger.error(\"Error at getting token. %s\", r_exc)\n raise r_exc\n return value\n\n @handle_redis_exceptions\n @benchmark\n async def blacklist_token(self, token_key: str) -> bool:\n \"\"\"\n Blacklist a given token.\n :param token_key: The token key to blacklist.\n :type token_key: str\n :return: True if the token was successfully blacklisted,\n otherwise False.\n :rtype: bool\n \"\"\"\n try:\n blacklisted: bool = await self._redis.setex(\n f\"blacklist:{token_key}\",\n self._blacklist_expiration_seconds,\n \"true\",\n )\n except RedisError as r_exc:\n logger.error(\"Error at blacklisting token. %s\", r_exc)\n raise r_exc\n return blacklisted\n\n @handle_redis_exceptions\n @benchmark\n async def is_token_blacklisted(self, token_key: str) -> bool:\n \"\"\"\n Check if a given token is blacklisted.\n :param token_key: The token key to verify.\n :type token_key: str\n :return: True if the token is blacklisted, otherwise False.\n :rtype: bool\n \"\"\"\n try:\n blacklisted: Optional[str] = await self._redis.get(\n f\"blacklist\" f\":{token_key}\"\n )\n except RedisError as r_exc:\n logger.error(\"Error at checking if token is blacklisted. %s\", r_exc)\n raise r_exc\n return bool(blacklisted)" }, { "identifier": "UserService", "path": "app/services/infrastructure/user.py", "snippet": "class UserService:\n \"\"\"\n Service class for user-related business logic.\n \"\"\"\n\n def __init__(\n self,\n user_repo: UserRepository,\n redis: Redis, # type: ignore\n ):\n self._user_repo: UserRepository = user_repo\n self._redis: Redis = redis # type: ignore\n self._cache_seconds: PositiveInt = auth_setting.CACHE_SECONDS\n\n async def get_user_by_id(self, user_id: UUID4) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique identifier\n :param user_id: The unique identifier of the user\n :type user_id: UUID4\n :return: User information\n :rtype: Optional[UserResponse]\n \"\"\"\n user: Optional[User]\n try:\n user = await self._user_repo.read_by_id(IdSpecification(user_id))\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n detail: str = f\"User with id {user_id} not found in the system.\"\n logger.error(detail)\n raise NotFoundException(detail)\n user_response: Optional[\n UserResponse\n ] = await model_to_response( # type: ignore\n user, UserResponse\n )\n return user_response\n\n async def get_login_user(self, username: str) -> User:\n \"\"\"\n Retrieve user information for login purposes by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: User\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_username(\n UsernameSpecification(username)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with username: {username}\")\n return user\n\n async def get_user_by_username(\n self, username: str\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its username\n :param username: The username to retrieve User from\n :type username: str\n :return: User information\n :rtype: UserResponse\n \"\"\"\n try:\n user: User = await self.get_login_user(username)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_by_email(\n self, email: EmailStr\n ) -> Optional[UserResponse]:\n \"\"\"\n Retrieve user information by its unique email.\n :param email: The email to retrieve User from\n :type email: EmailStr\n :return: User found in database\n :rtype: Optional[UserResponse]\n \"\"\"\n try:\n user: Optional[User] = await self._user_repo.read_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user:\n raise ServiceException(f\"User not found with email: {email}\")\n return await model_to_response(user, UserResponse) # type: ignore\n\n async def get_user_id_by_email(self, email: EmailStr) -> UUID4:\n \"\"\"\n Read the user ID from the database with unique email.\n :param email: Email to retrieve User from\n :type email: EmailStr\n :return: User ID found in database\n :rtype: UUID4\n \"\"\"\n try:\n user_id: Optional[UUID4] = await self._user_repo.read_id_by_email(\n EmailSpecification(email)\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not user_id:\n raise ServiceException(f\"User ID not found with email: {email}\")\n return user_id\n\n async def register_user(\n self, user: Union[UserCreate, UserSuperCreate]\n ) -> Optional[UserCreateResponse]:\n \"\"\"\n Register a new user in the database\n :param user: Request object representing the user\n :type user: Union[UserCreate, UserSuperCreate]\n :return: Response object representing the created user in the\n database\n :rtype: UserCreateResponse\n \"\"\"\n try:\n created_user = await self._user_repo.create_user(user)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n return await model_to_response(\n created_user, UserCreateResponse # type: ignore\n )\n\n async def get_users(\n self, offset: Optional[NonNegativeInt], limit: Optional[PositiveInt]\n ) -> list[UserResponse]:\n \"\"\"\n Retrieve users' information from the table\n :param offset: Offset from where to start returning users\n :type offset: NonNegativeInt\n :param limit: Limit the number of results from query\n :type limit: PositiveInt\n :return: User information\n :rtype: list[UserResponse]\n \"\"\"\n try:\n users: list[User] = await self._user_repo.read_users(offset, limit)\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n found_users: list[UserResponse] = [\n await model_to_response(user, UserResponse) # type: ignore\n for user in users\n ]\n return found_users\n\n async def update_user(\n self, user_id: UUID4, user: UserUpdate\n ) -> Optional[UserUpdateResponse]:\n \"\"\"\n Update user information from table\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :param user: Requested user information to update\n :type user: UserUpdate\n :return: User information\n :rtype: Optional[UserUpdateResponse]\n \"\"\"\n try:\n updated_user: Optional[User] = await self._user_repo.update_user(\n IdSpecification(user_id), user\n )\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n raise ServiceException(str(db_exc)) from db_exc\n if not updated_user:\n raise ServiceException(\n f\"User with user_id: {user_id} could not be updated\"\n )\n return await model_to_response(\n updated_user, UserUpdateResponse # type: ignore\n )\n\n async def delete_user(self, user_id: UUID4) -> dict[str, Any]:\n \"\"\"\n Deletes a user by its id\n :param user_id: Unique identifier of the user\n :type user_id: UUID4\n :return: Data to confirmation info about the delete process\n :rtype: dict[str, Any]\n \"\"\"\n deleted: bool\n deleted_at: Optional[datetime]\n try:\n deleted = await self._user_repo.delete_user(\n IdSpecification(user_id)\n )\n deleted_at = datetime.now()\n except DatabaseException as db_exc:\n logger.error(str(db_exc))\n deleted = False\n deleted_at = None\n finally:\n return {\"ok\": deleted, \"deleted_at\": deleted_at}" }, { "identifier": "get_user_service", "path": "app/services/infrastructure/user.py", "snippet": "async def get_user_service(\n user_repo: Annotated[UserRepository, Depends(get_user_repository)],\n redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore\n) -> UserService:\n \"\"\"\n Get an instance of the user service with the given repository.\n :param user_repo: User repository object for database connection\n :type user_repo: UserRepository\n :param redis: Dependency method for async Redis connection\n :type redis: Redis\n :return: UserService instance with repository associated\n :rtype: UserService\n \"\"\"\n return UserService(user_repo, redis)" }, { "identifier": "send_password_changed_confirmation_email", "path": "app/tasks/email_tasks/email_tasks.py", "snippet": "@with_logging\nasync def send_password_changed_confirmation_email(\n email_to: EmailStr,\n username: str,\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n settings: Annotated[Settings, Depends(get_settings)],\n) -> bool:\n \"\"\"\n Send a password changed confirmation email\n :param email_to: The email address of the recipient with password\n changed\n :type email_to: EmailStr\n :param username: Username of the recipient\n :type username: str\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :return: True if the email was sent; otherwise false\n :rtype: bool\n \"\"\"\n subject: str = (\n f\"{init_settings.PASSWORD_CHANGED_CONFIRMATION_SUBJECT}\" f\" {username}\"\n )\n template_str: str = await build_email_template(\n \"password_changed_confirmation.html\", init_settings\n )\n is_sent: bool = await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"link\": f\"mailto:{settings.CONTACT_EMAIL}?subject=\"\n f\"{init_settings.PROJECT_NAME} password changed\",\n },\n settings=settings,\n )\n return is_sent" }, { "identifier": "send_reset_password_email", "path": "app/tasks/email_tasks/email_tasks.py", "snippet": "@with_logging\nasync def send_reset_password_email(\n email_to: EmailStr,\n username: str,\n token: str,\n settings: Annotated[Settings, Depends(get_settings)],\n init_settings: Annotated[InitSettings, Depends(get_init_settings)],\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> bool:\n \"\"\"\n Sends a password reset email to a user with the given email address\n :param email_to: The email address of the user\n :type email_to: EmailStr\n :param username: The username of the user\n :type username: str\n :param token: The reset password token generated for the user\n :type token: str\n :param settings: Dependency method for cached setting object\n :type settings: Settings\n :param init_settings: Dependency method for cached init setting object\n :type init_settings: InitSettings\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: True if the email was sent successfully; False otherwise\n :rtype: bool\n \"\"\"\n subject: str = (\n f\"{init_settings.PROJECT_NAME} -\"\n f\" {init_settings.PASSWORD_RECOVERY_SUBJECT} {username}\"\n )\n template_str: str = await build_email_template(\n \"reset_password.html\", init_settings\n )\n link: str = (\n f\"{auth_settings.SERVER_URL}\"\n f\"{auth_settings.AUTH_URL}reset-password?token={token}\"\n )\n is_sent: bool = await send_email(\n email_to=email_to,\n subject_template=subject,\n html_template=template_str,\n environment={\n \"project_name\": init_settings.PROJECT_NAME,\n \"username\": username,\n \"email\": email_to,\n \"valid_hours\": auth_settings.EMAIL_RESET_TOKEN_EXPIRE_HOURS,\n \"link\": link,\n },\n settings=settings,\n )\n return is_sent" }, { "identifier": "generate_password_reset_token", "path": "app/utils/security/password.py", "snippet": "def generate_password_reset_token(\n email: EmailStr,\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> str:\n \"\"\"\n Generate a password reset token for the given email address.\n :param email: The email to generate the reset token for\n :type email: EmailStr\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The password reset token\n :rtype: str\n \"\"\"\n payload: dict[str, Any] = generate_password_reset_payload(\n email, auth_settings\n )\n return encode_jwt(payload, auth_settings)" }, { "identifier": "verify_password_reset_token", "path": "app/utils/security/password.py", "snippet": "def verify_password_reset_token(\n token: str,\n auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)],\n) -> Optional[EmailStr]:\n \"\"\"\n Verify a password reset token and return the email address if valid.\n :param token: The JSON Web Token\n :type token: str\n :param auth_settings: Dependency method for cached setting object\n :type auth_settings: AuthSettings\n :return: The email address\n :rtype: EmailStr\n \"\"\"\n decoded_token: Optional[dict[str, Any]] = decode_jwt(token, auth_settings)\n return decoded_token.get(\"sub\") if decoded_token else None" } ]
import logging from typing import Annotated, Any, Optional from fastapi import ( APIRouter, Body, Depends, Header, HTTPException, Path, Request, status, ) from fastapi.security import OAuth2PasswordRequestForm from pydantic import EmailStr from redis.asyncio import Redis from starlette.datastructures import Address from app.api.deps import get_redis_dep from app.api.oauth2_validation import get_current_user, get_refresh_current_user from app.config.config import ( get_auth_settings, get_init_settings, get_settings, init_setting, ) from app.config.db.auth_settings import AuthSettings from app.config.init_settings import InitSettings from app.config.settings import Settings from app.core.security.password import verify_password from app.exceptions.exceptions import NotFoundException, ServiceException from app.models.sql.user import User as UserDB from app.schemas.external.msg import Msg from app.schemas.external.token import TokenResetPassword, TokenResponse from app.schemas.external.user import ( UserResponse, UserUpdate, UserUpdateResponse, ) from app.schemas.infrastructure.user import UserAuth from app.services.infrastructure.auth import common_auth_procedure from app.services.infrastructure.token import TokenService from app.services.infrastructure.user import UserService, get_user_service from app.tasks.email_tasks.email_tasks import ( send_password_changed_confirmation_email, send_reset_password_email, ) from app.utils.security.password import ( generate_password_reset_token, verify_password_reset_token, )
14,722
return current_user @router.post("/recover-password/{email}", response_model=Msg) async def recover_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], email: Annotated[ EmailStr, Path( ..., title="Email", description="The email used to recover the password", example={"email": "[email protected]"}, openapi_examples=init_setting.EMAIL_BODY_EXAMPLES, ), ], user_service: Annotated[UserService, Depends(get_user_service)], init_settings: Annotated[InitSettings, Depends(get_init_settings)], ) -> Msg: """ Endpoint to handle password recovery. ## Parameter: - `email:` **Path parameter that references the email used to recover the password** - `type:` **EmailStr** ## Response: - `return:` **Message object** - `rtype:` **Msg** \f :param user_service: Dependency method for User service object :type user_service: UserService :param settings: Dependency method for cached setting object :type settings: config.Settings :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param init_settings: Dependency method for cached init setting object :type init_settings: InitSettings """ try: user: Optional[UserResponse] = await user_service.get_user_by_email( email ) except ServiceException as exc: logger.error(exc) user = None if user: password_reset_token: str = generate_password_reset_token( email, auth_settings ) await send_reset_password_email( user.email, user.username, password_reset_token, settings, init_settings, auth_settings, ) return Msg(msg="If the email is registered, a reset link will be sent.") @router.post("/reset-password", response_model=Msg) async def reset_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user_service: Annotated[UserService, Depends(get_user_service)], token_reset_password: Annotated[ TokenResetPassword, Body( ..., title="Body object", description="Object with access token and new password", openapi_examples=init_setting.TOKEN_PAYLOAD_EXAMPLES, ), ], init_settings: Annotated[InitSettings, Depends(get_init_settings)], ) -> Msg: """ Endpoint to handle password reset. ## Parameter: - `token_reset_password:` **Body Object with token and new password** - `type:` **TokenResetPassword** ## Response: - `return:` **Message object** - `rtype:` **Msg** \f :param settings: Dependency method for cached setting object :type settings: config.Settings :param user_service: Dependency method for User service object :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param init_settings: Dependency method for cached init setting object :type init_settings: InitSettings """ email: Optional[EmailStr] = verify_password_reset_token( token_reset_password.token, auth_settings ) if not email: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid or expired token", ) try: found_user: Optional[ UserResponse ] = await user_service.get_user_by_email(email) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="There was an issue with the request", ) from exc if not found_user: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="User not found" ) user_data: dict[str, Any] = found_user.model_dump() user_data["password"] = token_reset_password.password user_update: UserUpdate = UserUpdate(**user_data)
""" Authentication API Router. This module provides login and password recovery functionality. """ logger: logging.Logger = logging.getLogger(__name__) router: APIRouter = APIRouter(prefix="/auth", tags=["auth"]) @router.post("/login", response_model=TokenResponse) async def login( request: Request, auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user: Annotated[OAuth2PasswordRequestForm, Depends()], user_service: Annotated[UserService, Depends(get_user_service)], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Endpoint to handle user login with OAuth2 authentication using request form. ## Parameter: - `user:` **Request body with username and password** - `type:` **OAuth2PasswordRequestForm** ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: Request object for client host information :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] = request.client if not client: raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: found_user: UserDB = await user_service.get_login_user(user.username) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="Invalid credentials" ) from exc if not verify_password(found_user.password, user.password): detail: str = "Incorrect password" logger.warning(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) if not found_user.is_active: user_detail: str = "Inactive user" logger.warning(user_detail) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail=user_detail ) return await common_auth_procedure( found_user, client_ip, redis, auth_settings ) @router.post( "/refresh", response_model=TokenResponse, status_code=status.HTTP_201_CREATED, ) async def refresh_token( request: Request, user_service: Annotated[UserService, Depends(get_user_service)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], refresh_current_user: Annotated[ UserAuth, Depends(get_refresh_current_user) ], redis: Annotated[Redis, Depends(get_redis_dep)], # type: ignore ) -> TokenResponse: """ Generates a refresh token for the current user and saves it to the database ## Response: - `return:` **Token information with access token, its type and refresh token** - `rtype:` **TokenResponse** \f :param request: The HTTP request on the server :type request: Request :param user_service: Dependency method for User Service :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param refresh_current_user: The current user dependency for refresh token :type refresh_current_user: UserAuth :param redis: Dependency method for async Redis connection :type redis: Redis """ client: Optional[Address] if not (client := request.client): raise NotFoundException(auth_settings.NO_CLIENT_FOUND) client_ip: str = client.host try: user: UserDB = await user_service.get_login_user( refresh_current_user.username ) except ServiceException as exc: detail: str = "Can not found user information." logger.error(detail) raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail=detail ) from exc return await common_auth_procedure(user, client_ip, redis, auth_settings) @router.post("/validate-token", response_model=UserAuth) async def validate_token( current_user: Annotated[UserAuth, Depends(get_current_user)] ) -> UserAuth: """ Endpoint to validate an access token. ## Response: - `return:` **The authenticated user instance** - `rtype:` **UserAuth** \f :param current_user: The current user :type current_user: UserAuth """ return current_user @router.post("/recover-password/{email}", response_model=Msg) async def recover_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], email: Annotated[ EmailStr, Path( ..., title="Email", description="The email used to recover the password", example={"email": "[email protected]"}, openapi_examples=init_setting.EMAIL_BODY_EXAMPLES, ), ], user_service: Annotated[UserService, Depends(get_user_service)], init_settings: Annotated[InitSettings, Depends(get_init_settings)], ) -> Msg: """ Endpoint to handle password recovery. ## Parameter: - `email:` **Path parameter that references the email used to recover the password** - `type:` **EmailStr** ## Response: - `return:` **Message object** - `rtype:` **Msg** \f :param user_service: Dependency method for User service object :type user_service: UserService :param settings: Dependency method for cached setting object :type settings: config.Settings :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param init_settings: Dependency method for cached init setting object :type init_settings: InitSettings """ try: user: Optional[UserResponse] = await user_service.get_user_by_email( email ) except ServiceException as exc: logger.error(exc) user = None if user: password_reset_token: str = generate_password_reset_token( email, auth_settings ) await send_reset_password_email( user.email, user.username, password_reset_token, settings, init_settings, auth_settings, ) return Msg(msg="If the email is registered, a reset link will be sent.") @router.post("/reset-password", response_model=Msg) async def reset_password( settings: Annotated[Settings, Depends(get_settings)], auth_settings: Annotated[AuthSettings, Depends(get_auth_settings)], user_service: Annotated[UserService, Depends(get_user_service)], token_reset_password: Annotated[ TokenResetPassword, Body( ..., title="Body object", description="Object with access token and new password", openapi_examples=init_setting.TOKEN_PAYLOAD_EXAMPLES, ), ], init_settings: Annotated[InitSettings, Depends(get_init_settings)], ) -> Msg: """ Endpoint to handle password reset. ## Parameter: - `token_reset_password:` **Body Object with token and new password** - `type:` **TokenResetPassword** ## Response: - `return:` **Message object** - `rtype:` **Msg** \f :param settings: Dependency method for cached setting object :type settings: config.Settings :param user_service: Dependency method for User service object :type user_service: UserService :param auth_settings: Dependency method for cached setting object :type auth_settings: AuthSettings :param init_settings: Dependency method for cached init setting object :type init_settings: InitSettings """ email: Optional[EmailStr] = verify_password_reset_token( token_reset_password.token, auth_settings ) if not email: raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid or expired token", ) try: found_user: Optional[ UserResponse ] = await user_service.get_user_by_email(email) except ServiceException as exc: logger.error(exc) raise HTTPException( status_code=status.HTTP_400_BAD_REQUEST, detail="There was an issue with the request", ) from exc if not found_user: raise HTTPException( status_code=status.HTTP_404_NOT_FOUND, detail="User not found" ) user_data: dict[str, Any] = found_user.model_dump() user_data["password"] = token_reset_password.password user_update: UserUpdate = UserUpdate(**user_data)
user: UserUpdateResponse = await user_service.update_user( # type: ignore
16
2023-11-17 00:32:32+00:00
24k
fg320/DEASC
examples/12C_5x1_farm_dyn_tuning_wso_grouping_looping.py
[ { "identifier": "WfModel", "path": "deasc/wf_model.py", "snippet": "class WfModel:\n \"\"\"\n Class for wind farm modelling (Interface setup but not limited to FLORIS\n framework).\n \"\"\"\n\n def __init__(self, input_file, path):\n \"\"\"\n Initialise wind farm object by pointing towards an input file.\n (FLORIS interface object).\n\n Args\n ----\n input file:(FLORIS .json input file).\n \"\"\"\n # Read and initialize input file\n self.input_file = input_file\n self.interface = floris_input_handler(self.input_file, path)\n\n # Assign wind farm model proporties\n self.D, self.H_hub, self.n_turbs = floris_properties(self)\n\n def set_aligned_layout(self, n_row, n_col, spac_x, spac_y, coordinates=False):\n \"\"\"\n Modify farm layout in aligned wind turbines with constant spacing,\n differing only from rows to columns. Flow field is also reinitialized.\n\n Args\n ----\n n_row: (float) number of turbine rows\n n_col: (float) number of turbine columns\n spac_x: (float) WT diam normalized turbines distance in x direction\n spac_y: (float) WT diam normalized turbines distance in y direction\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Input type check\n if not all(isinstance(i, int) for i in [n_row, n_col]) or \\\n not all(isinstance(j, (int, float)) for j in [spac_x, spac_y]):\n err_msg = \"Incorrect input value types\"\n raise ValueError(err_msg)\n\n # Calculate new coordinate farm layout\n layout_x = []\n layout_y = []\n for i in range(int(n_row)):\n for j in range(int(n_col)):\n layout_x.append(i * spac_x * self.D)\n layout_y.append(j * spac_y * self.D)\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def set_HR_layout(self, coordinates=False):\n \"\"\"\n Set Horns Rev wind farm layout to wind farm object and\n returns turbines' x and y coordinates if coordinates=True.\n\n Args\n ----\n coordinates: (bool, opt) False if no coordinates wanted.\n Default set to False.\n\n Returns\n -------\n if coordinates is False:\n None\n if coordinates is True:\n x-coordinates: (numpy array) turbines x-coordinates\n y-coordinates: (numpy array) turbines y-coordinates\n \"\"\"\n # Vestas V80 2 MW diameter check\n if self.D != 80:\n warning = \"Rotor diameter not from the Vestas V80 2 MW turbine\"\n warnings.warn(warning, UserWarning)\n\n n_rows = 10\n n_cols = 8\n spac_x = 7\n spac_y = 7\n angle = 6\n layout_x = []\n layout_y = []\n for i in range(int(n_rows)):\n for j in range(int(n_cols)):\n layout_x.append((i * spac_x * self.D) -\n (np.sin(np.radians(angle)) * j * spac_y * self.D))\n layout_y.append(j * spac_y * self.D * np.cos(np.radians(angle)))\n\n # Reinitialize wind farm object\n floris_reinitialise_layout(self, layout_x, layout_y)\n\n if coordinates:\n return (np.array(layout_x), np.array(layout_y))\n else:\n return None\n\n def farm_eval(self, yaw=None, ws=None, wd=None, ti=None, shear=None):\n \"\"\"\n Calculate farm flow field for given wind farm layout and input conditions.\n Return main outputs, such as yaw angles, turbines power, farm power, etc.\n\n Args\n ----\n yaw: (list, optional) turbines yaw angles (deg). Default to None.\n ws: (float, optional) input wind speeds (m/s). Default to None.\n wd: (float, optional) input wind directions (deg). Default to None.\n ti: (float, optional) input turbulence intensity. Default to None.\n shear: (float, optional) shear exponent. Default to None.\n\n Returns\n -------\n wf_pow: (float) WF power (MWatts).\n wt_pow: (np.array) WTs power (MWatts).\n wt_ti: (list) WTs turbulence intensity.\n wt_yaw: (np.array) WTs yaw angles (deg).\n \"\"\"\n # Main wind farm calculation\n wf_pow, wt_pow, wt_ti, wt_yaw, _ = floris_farm_eval(self,\n yaw,\n ws,\n wd,\n ti,\n shear)\n\n return (wf_pow, wt_pow, wt_ti, wt_yaw)\n\n def pow_yaw_sweep_1var(self, layout, var_info):\n \"\"\"\n Return wind farm power for a single yaw variable, either a\n single turbine or a single row of turbines. Sweep by row not possible\n for not aligned \"custom\" layouts.\n\n Args\n ----\n layout: (tuple)\n row: (integer) number of farm rows\n cols: (integer) number of farm columns\n or string \"custom\"\n var_info: (tuple)\n var_type: (string) \"T\" for turbine,\n \"R\" for row (not for custom layouts)\n var: (integer) turbine or row number\n var_value: (list of floats) variable values\n\n Returns\n -------\n obj_out: tuple\n obj: (list) objective values\n obj_func: (string) objective function\n var_info: (tuple) see input\n model: (string) model name\n \"\"\"\n # Extract inputs and check inputs\n var_type, var, var_value = var_info\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'R' and layout == \"custom\":\n err_msg = \"Row not allowed for custom layouts\"\n raise ValueError(err_msg)\n if var_type == 'R' and var > rows:\n err_msg = \"Row specified not in farm\"\n raise ValueError(err_msg)\n if var_type == 'T' and var > self.n_turbs:\n err_msg = \"Turbine specified not in farm\"\n raise ValueError(err_msg)\n\n # Calculations\n yaw_angles = np.array(floris_current_yaw(self))\n wf_pow = []\n\n for yaw_change in var_value:\n if layout != \"custom\":\n rows, cols = layout\n if var_type == 'T':\n yaw_angles[(var-1)] = yaw_change\n elif var_type == 'R':\n idx_1 = var*cols\n idx_0 = idx_1-cols\n yaw_angles[idx_0:idx_1] = yaw_change\n else:\n err_msg = \"var_type either 'T' or 'R'\"\n raise ValueError(err_msg)\n\n wf_pow_single, _, _, _ = self.farm_eval(yaw=yaw_angles)\n wf_pow.append(wf_pow_single)\n\n obj_out = (wf_pow, 'Farm Power')\n var_info = (var_type, var, var_value)\n print(\"Function exploration complete\")\n\n return obj_out, var_info" }, { "identifier": "WSOpt", "path": "deasc/wake_steering.py", "snippet": "class WSOpt:\n \"\"\"\n Class to perform wake steering optimization with a WfModel object, given an a-priori\n specified wind farm layout and specified atmopheric conditions. Optimization can have\n all/some turbines as variables, or rows for wind farms with equal columns. Optimizers\n available are the local SLSQP, where linear constraints can be added, and the global\n optimizer TuRBO.\n \"\"\"\n\n def __init__(self,\n wf_model,\n inflow,\n variables,\n var_bounds,\n var_initial,\n opt_method=\"SLSQP\",\n opt_options=None,\n obj_function=\"Farm Power\",\n constraints=(None, None, None),\n by_row=(False, None, None),\n tuning_dynamic=False\n ):\n \"\"\"\n Args\n ----\n wf_model: (WfModel)\n WfModel to perform wake steering optimization.\n inflow: (list) Inflow conditions for wake steering optimization.\n yaw_initial: (list) wind farm yaw angles (deg).\n (string) 'random' for random intial wind farm yaw angles.\n wd: (float) input wind directions (deg).\n ws: (float) input wind speeds (m/s).\n ti: (float) input turbulence intensity.\n shear: (float) shear exponent.\n variables: (list)\n List of turbines (or rows) to optimize. Naming convention starts from 1.\n var_bounds: (tuple)\n low_bound: (float) variable (yaw angle) lower bound.\n upp_bound: (float) variable (yaw angle) upper bound.\n var_initial:\n SLSQP: (list) list of initial variable values for each variable.\n (string) 'random' for random initial variable values.\n TURBO_1: (list of lists) list of n_init variable values lists\n (see TURBO_1 options).\n (string) 'LHS' latin hypercube sampling.\n TURBO_M: (string) 'LHS' latin hypercube sampling.\n opt_method: (string, optional) optimization method.\n 'SLSQP', 'TURBO_1 and 'TURBO_M' available.\n Default set to 'SLSQP'.\n opt_options: (dict , optional) optimization method options dictionary.\n Default set to None.\n opt_function: (string , optional) objective function. 'Farm Power' available\n Default set to 'Farm Power'.\n constraints: (tuple) Linear constraints definition. Limited to SLSQP.\n A: (matrix) linear constraint matrix.\n Default set to None.\n low_bound_constr: (float) lower non-normalized contraint bound.\n Default set to None.\n upp_bnd_constr: (float) upper non-normalized contraint bound.\n Default set to None.\n by_row : (tuple, optional) Optimization by row, requires all farm columns to have\n the same amount of rows.\n by_row_bool: (bool) True if optimization variables are wind farm rows,\n False if wind farm turbines. Default set to False.\n rows:: (int) wind farm rows. Default set to None.\n cols:: (int) wind farm columns. Default set to None.\n tuning_dynamic : (bool, optional)\n If True, include dynamic parameter tuning. See tuning_dynamic_initialize\n method. Default to False.\n \"\"\"\n # Opt Methods - Opt Options - Optimizers - Opt Functions\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\", \"TURBO_M\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-6,\n 'eps': 0.01},\n \"TURBO_1\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"},\n \"TURBO_M\": {\"n_init\": len(variables)*2,\n \"max_evals\": 500,\n \"n_trust_regions\": 2,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.optimizer_dict = {'SLSQP': self._optimizer_scipy,\n 'TURBO_1': self._optimizer_turbo_1,\n 'TURBO_M': self._optimizer_turbo_m}\n self.obj_function_dict = {'Farm Power': self._obj_function_power}\n\n # Optimization methods and optimizer\n self.opt_method = opt_method\n self._opt_method_settler()\n self.optimizer = self.optimizer_dict[self.opt_method]\n\n # Optimizer options\n self.opt_options = opt_options\n self._opt_options_settler()\n\n # Optimization function\n self.obj_function_name = obj_function\n self._obj_function_settler()\n\n # Wind farm conditions\n self.wf_model = wf_model\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.yaw_initial, self.wd, self.ws, self.ti, self.shear = inflow\n if not isinstance(self.yaw_initial, (list, np.ndarray)):\n if self.yaw_initial == 'random':\n self.yaw_initial = self._random_yaw_generator(self.wf_model.n_turbs,\n var_bounds)\n self._yaw_initial_input_handler()\n self.yaw_initial = np.array([float(item) for item in self.yaw_initial])\n\n # Optimization per wind turbine or per wind farm row\n self.by_row_bool = by_row[0]\n if self.by_row_bool:\n self.rows = by_row[1]\n self.cols = by_row[2]\n self._by_row_input_handler()\n\n # Variable bounds\n self.var_bounds = var_bounds\n self.low_bound, self.upp_bound = self.var_bounds\n self.low_bound_norm = norm(self.low_bound, self.low_bound, self.upp_bound)\n self.upp_bound_norm = norm(self.upp_bound, self.low_bound, self.upp_bound)\n self.var_bounds_norm = (self.low_bound_norm, self.upp_bound_norm)\n tmp = [self.var_bounds_norm for i in range(len(variables))]\n self.var_bounds_norm_list = tmp\n tmp = np.array([self.low_bound_norm for i in range(len(variables))])\n self.low_bound_norm_list = tmp\n tmp = np.array([self.upp_bound_norm for i in range(len(variables))])\n self.upp_bound_norm_list = tmp\n\n # Constraints\n self.A = constraints[0]\n self.low_bound_constr = constraints[1]\n self.upp_bound_constr = constraints[2]\n if self.A is not None:\n self._constraints_input_handler()\n self.low_bound_constr_norm = norm(self.low_bound_constr,\n self.low_bound,\n self.upp_bound)\n self.upp_bound_constr_norm = norm(self.upp_bound_constr,\n self.low_bound,\n self.upp_bound)\n\n # Yaw variables\n self.variables = variables\n self.var_initial = var_initial\n self._variables_input_handler()\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.opt_method == 'SLSQP' and self.var_initial == 'random':\n self.var_initial = self._random_yaw_generator(len(self.variables),\n self.var_bounds)\n self._var_initial_input_handler()\n self.var_initial_norm = self._var_initial_norm()\n\n # Dynamic tuning\n self.tuning_dyn_bool = tuning_dynamic\n self._tuning_dyn_bool_check()\n self.tuning_dyn_initialization = False\n\n self.opt_run = False\n\n def tuning_dyn_initialize(self, tuning_dyn_obj_list):\n \"\"\"\n Assign list of tuning dynamic objects TuningDyn to the WSOpt object.\n\n Args\n ----\n tuning_dyn_object: (list of TuningDyn objects)\n \"\"\"\n self.tuning_dyn_obj_list = tuning_dyn_obj_list\n self._tuning_dyn_init_input_handler()\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n tuning_dyn_obj.wso_compatibility_check(self)\n self.tuning_dyn_initialization = True\n\n def optimize_yaw(self):\n \"\"\"\n Optimize the yaw angle for the given WSOpt object.\n\n Returns\n -------\n opt_yaw_angles_vars: (ndarray) optimal yaw angles for the optimization variables.\n opt_yaw_angles_all: (ndarray) optimal yaw angles for all.wind farm turbines.\n \"\"\"\n # Tuning dynamic initialization check\n self._tuning_dyn_initialization_check()\n\n # Print optimization info\n self._print_info()\n\n # Wind farm power - no yaw\n self.wf_pow_noyaw = self._get_farm_power_noyaw()\n\n # Optimize\n self._iter_details_setup()\n self.opt_yaw_angles_vars, self.opt_yaw_angles_all = self.optimizer()\n self.opt_run = True\n\n return (self.opt_yaw_angles_vars, self.opt_yaw_angles_all)\n\n def get_optimization_details(self):\n \"\"\"\n Return optimization details: optimizer iterations details and objective function\n evaluations details. The two are identical for TURBO optimizers as an objective\n function evaluation corresponds to an optimizer iteration, different for SLSQP as\n additional objective function evaluations are required to approximate gradients.\n\n Returns\n -------\n iter_details: (tuple) optimizer iterations details.\n iter_yaw_angles: (list) list of yaw angles per optimizer iteration.\n iter_obj_func: (list) list of objective function per optimizer iteration.\n iter_farm_power: (list) list of farm power values per optimizer iteration.\n eval_details: (tuple) objective fucntion evaluations details.\n eval_yaw_angles: (list) list of yaw angles per evaluation.\n eval_obj_func: (list) list of objective function per evaluation.\n eval_farm_power: (list) list of farm power values per evaluation.\n \"\"\"\n iter_details = (self.iter_yaw_angles,\n self.iter_obj_func,\n self.iter_farm_power)\n eval_details = (self.eval_yaw_angles,\n self.eval_obj_func,\n self.eval_farm_power)\n return (iter_details, eval_details)\n\n # %% Private methods\n\n def _opt_method_settler(self):\n if self.opt_method not in self.opt_method_list:\n err_msg = \"Optimization method not recognized\"\n raise Exception(err_msg)\n\n def _opt_options_settler(self):\n if self.opt_options is None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n\n def _obj_function_settler(self):\n if self.obj_function_name in list(self.obj_function_dict.keys()):\n self.obj_function = self.obj_function_dict[self.obj_function_name]\n else:\n err_msg = \"Optimization function not recognized\"\n raise Exception(err_msg)\n\n def _random_yaw_generator(self, yaw_number, yaw_bounds):\n yaw_angles = []\n for i in range(yaw_number):\n x = random.choice(range(yaw_bounds[0], yaw_bounds[1]+1))\n yaw_angles.append(x)\n return yaw_angles\n\n def _yaw_initial_input_handler(self):\n if len(self.yaw_initial) != self.wf_model.n_turbs:\n err_msg = \"Initial yaw angles do not match turbine number\"\n raise Exception(err_msg)\n\n def _by_row_input_handler(self):\n if self.rows*self.cols != self.wf_model.n_turbs:\n err_msg = \"Farm rows and columns provided do not match turbine number\"\n raise Exception(err_msg)\n\n def _constraints_input_handler(self):\n if self.opt_method != 'SLSQP':\n err_msg = \"Linear constraints (on top of bounds) limited to SLSQP optimizer\"\n raise Exception(err_msg)\n\n def _variables_input_handler(self):\n if self.by_row_bool:\n for row in self.variables:\n if row > self.rows:\n err_msg = \"Row/s specified not in farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.rows:\n err_msg = \"Too many rows specified\"\n raise Exception(err_msg)\n else:\n for turb in self.variables:\n if turb > self.wf_model.n_turbs:\n err_msg = \"Turbine/s specified not in the farm\"\n raise Exception(err_msg)\n if len(self.variables) > self.wf_model.n_turbs:\n err_msg = \"Too many turbines specified\"\n raise Exception(err_msg)\n if 0 in self.variables:\n err_msg = \"Turbine/row counting convention starts from 1\"\n raise Exception(err_msg)\n\n def _var_initial_input_handler(self):\n if self.opt_method == 'TURBO_1':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n pass\n elif self.var_initial == 'random':\n err_msg = \"Random initial variables limited to SLSQP optimizer\"\n raise Exception(err_msg)\n else:\n if len(self.var_initial) != self.opt_options[\"n_init\"]:\n err_msg = \"n_init initial variable lists are needed (see TURBO options)\"\n raise Exception(err_msg)\n elif len(self.var_initial[0]) != len(self.variables):\n err_msg = \"var_initial sublists length not equal number of variables\"\n raise Exception(err_msg)\n elif self.opt_method == 'TURBO_M':\n if self.var_initial != 'LHS':\n err_msg = \"TURBO_M optimizer requires LHS as initial sampling\"\n elif self.opt_method == 'SLSQP':\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n err_msg = \"Latin Hypercube Sampling limited to TURBO optimizers\"\n raise Exception(err_msg)\n elif len(self.variables) != len(self.var_initial):\n err_msg = \"var_initial length needs to equal number of variables\"\n raise Exception(err_msg)\n\n def _var_initial_norm(self):\n if self.opt_method == \"SLSQP\":\n self.var_initial = np.array([float(item) for item in self.var_initial])\n var_initial_norm = norm(self.var_initial, self.low_bound, self.upp_bound)\n elif self.var_initial == 'LHS':\n var_initial_norm = None\n else:\n self.var_initial = np.array([np.array(x) for x in self.var_initial])\n var_initial_norm = []\n for x_list in self.var_initial:\n x_list_norm = []\n for x in x_list:\n x_norm = norm(x, self.low_bound, self.upp_bound)\n x_list_norm.append(x_norm)\n var_initial_norm.append(np.array(x_list_norm))\n return np.array(var_initial_norm)\n\n def _get_farm_power_noyaw(self):\n if (self.tuning_dyn_initialization and\n hasattr(self.tuning_dyn_obj_list[0], 'wf_pow_noyaw')):\n wf_pow_noyaw = self.tuning_dyn_obj_list[0].wf_pow_noyaw\n else:\n self.yaw_zero = np.full(shape=self.wf_model.n_turbs, fill_value=0.0)\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n # Tune parameters\n if self.tuning_dyn_initialization:\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, self.yaw_zero)\n\n wf_pow_noyaw = floris_calculate_farm_power(self.wf_model, self.yaw_zero)\n return wf_pow_noyaw\n\n def _print_info(self):\n print(\"=====================================================\")\n print(\"Optimizing wake redirection control...\")\n print(\"Optimization method: %s\" % (self.opt_method))\n print(\"Optimization function: %s \\n\" % (self.obj_function_name))\n if self.by_row_bool:\n print(\"Rows being optimized: \")\n print(self.variables)\n else:\n print(\"Turbines being optimized: \")\n print(self.variables)\n print(\"Number of variables to optimize = \", len(self.variables))\n print(\"=====================================================\")\n\n def _iter_details_setup(self):\n # Details for each obj function evaluation\n self.eval_yaw_angles = [] # deg\n self.eval_obj_func = []\n self.eval_farm_power = [] # MW\n\n # Details for each optimizer iteration\n self.iter_yaw_angles = [] # deg\n self.iter_obj_func = []\n self.iter_farm_power = [] # MW\n\n def _variables_to_farm_yaw(self, yaw_initial, var_values):\n yaw_angles = copy.deepcopy(yaw_initial)\n if self.by_row_bool:\n for i, row_idx in enumerate(self.variables):\n idx_1 = row_idx*self.cols\n idx_0 = idx_1-self.cols\n yaw_angles[idx_0:idx_1] = var_values[i]\n else:\n for i, turb_idx in enumerate(self.variables):\n yaw_angles[turb_idx-1] = var_values[i]\n return yaw_angles.tolist()\n\n # %% Optimizers\n\n def _optimizer_scipy(self):\n # Call back function for iter details\n def callback_func(xk):\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n # Linearly constrained case\n if self.A is not None:\n self.C = LinearConstraint(self.A,\n self.low_bound_constr_norm,\n self.upp_bound_constr_norm)\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n constraints=(self.C,),\n options=self.opt_options)\n # Unconstrained case\n else:\n self.residual_plant = minimize(self.obj_function,\n self.var_initial_norm,\n callback=callback_func,\n method=self.opt_method,\n bounds=self.var_bounds_norm_list,\n options=self.opt_options)\n # Extract optimal yaw angles for variables\n opt_yaw_angles_vars = unnorm(self.residual_plant.x,\n self.low_bound,\n self.upp_bound)\n # Extract optimal yaw angles for the entire farm\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Use best index because if total iterations reached, optimum not last evaluation\n eval_yaw_angles_lists = [x.tolist() for x in self.eval_yaw_angles]\n index_best = eval_yaw_angles_lists.index(opt_yaw_angles_all)\n opt_yaw_angles_all = np.array(opt_yaw_angles_all)\n self.obj_func_opt = self.eval_obj_func[index_best]\n self.farm_power_opt = self.eval_farm_power[index_best]\n\n # Add initial and last points to iteration details\n self.iter_yaw_angles.insert(0, self.eval_yaw_angles[0])\n self.iter_obj_func.insert(0, self.eval_obj_func[0])\n self.iter_farm_power.insert(0, self.eval_farm_power[0])\n self.iter_yaw_angles.append(self.eval_yaw_angles[-1])\n self.iter_obj_func.append(self.eval_obj_func[-1])\n self.iter_farm_power.append(self.eval_farm_power[-1])\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_1(self):\n\n # TURBO initial sampling\n if not isinstance(self.var_initial, (list, np.ndarray)):\n if self.var_initial == 'LHS':\n X_init_provided = False\n X_init_same_norm = None\n else:\n X_init_provided = True\n X_init_same_norm = self.var_initial_norm\n\n # TURBO optimization\n turbo_1 = Turbo1(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n X_init_provided=X_init_provided,\n X_init_same=X_init_same_norm,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.obj_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n def _optimizer_turbo_m(self):\n\n # TURBO optimization\n turbo_m = TurboM(f=self.obj_function,\n lb=self.low_bound_norm_list,\n ub=self.upp_bound_norm_list,\n **self.opt_options,\n )\n turbo_m.optimize()\n X = turbo_m.X # Evaluated points\n fX = turbo_m.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n\n # Extract optimal yaw angles for variables and the entire farm\n opt_yaw_angles_vars = unnorm(x_best,\n self.low_bound,\n self.upp_bound)\n opt_yaw_angles_all = self._variables_to_farm_yaw(self.yaw_initial,\n opt_yaw_angles_vars)\n\n # Equal yaw groups if dynamic tuning with grouping is in place\n if self.tuning_dyn_initialization:\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n opt_yaw_angles_all = self.tuning_dyn_obj_list[0].set_yaw_groups(\n opt_yaw_angles_all)\n\n # Update iteration details (same as evaluation details)\n self.iter_yaw_angles = self.eval_yaw_angles\n self.iter_obj_func = self.eval_obj_func\n self.iter_farm_power = self.eval_farm_power\n\n # Use best index because last iteration might not be the optimal one\n self.cost_func_opt = f_best[0]\n self.farm_power_opt = self.iter_farm_power[index_best]\n\n return (opt_yaw_angles_vars, opt_yaw_angles_all)\n\n # %% Objective functions\n\n def _obj_function_power(self, var_norm):\n\n # Extract farm yaw angles\n var_unnorm = unnorm(var_norm, self.low_bound, self.upp_bound)\n yaw_angles = self._variables_to_farm_yaw(self.yaw_initial, var_unnorm)\n yaw_angles = np.array([float(item) for item in yaw_angles])\n\n # Tune parameters dynamically\n if self.tuning_dyn_initialization:\n # Set equal yaw angles in groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n yaw_angles = self.tuning_dyn_obj_list[0].set_yaw_groups(yaw_angles)\n # Tune parameters\n for tuning_dyn_obj in self.tuning_dyn_obj_list:\n self.wf_model = tuning_dyn_obj.tune_parameter(self, yaw_angles)\n\n # Calculate negative of the farm power normalized by power for zero yaw\n self.wf_model = floris_reinitialise_atmosphere(self.wf_model,\n self.ws,\n self.wd,\n self.ti,\n self.shear)\n wf_pow = floris_calculate_farm_power(self.wf_model, yaw_angles)\n obj_function = (-1 * wf_pow / self.wf_pow_noyaw)\n\n # Update evalauation details\n self.eval_yaw_angles.append(yaw_angles)\n self.eval_obj_func.append(obj_function)\n self.eval_farm_power.append(wf_pow)\n\n return obj_function\n\n # %% Tuning Dynamic methods\n\n def _tuning_dyn_bool_check(self):\n if self.tuning_dyn_bool and self.by_row_bool:\n err_msg = \"Dynamic tuning not available for optimization by row.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_init_input_handler(self):\n if isinstance(self.tuning_dyn_obj_list, (list, np.ndarray)) is False:\n err_msg = \"TuningDyn objects need to be in a list, even if only one.\"\n raise Exception(err_msg)\n # Check dynamic grouping tuning objects have the same tuning groups\n if hasattr(self.tuning_dyn_obj_list[0], 'grouping_bool'):\n tuning_groups_first = self.tuning_dyn_obj_list[0].tuning_groups\n same_groups = all(obj.tuning_groups == tuning_groups_first\n for obj in self.tuning_dyn_obj_list)\n if same_groups is False:\n err_msg = \"TuningDyn objects have different groupings.\"\n raise Exception(err_msg)\n\n def _tuning_dyn_initialization_check(self):\n if self.tuning_dyn_bool and self.tuning_dyn_initialization is False:\n err_msg = \"Tuning dynamic not initialized. See tuning_dyn_initialize method.\"\n raise Exception(err_msg)" }, { "identifier": "Tuning", "path": "deasc/tuning.py", "snippet": "class Tuning:\n \"\"\"\n Parameter tuning class for a low-fidelity model, where one or more\n parameters are tuned to higher fidelity power measurements. In particular,\n the RMSE is minimised for single turbine power measurements for a single or\n the sum of multiple atmospheric conditions. The wind farm layout is assumed fixed.\n \"\"\"\n\n def __init__(self,\n wf_model,\n variables_class_list,\n variables_names_list,\n variables_bounds_list,\n obj_func_name='RMSE',\n opt_method='SLSQP',\n opt_options=None\n ):\n \"\"\"\n Args\n ----\n wf_model : WfModel object (low-fidelity model)\n single WfModel object to tune\n variables_class_list: list of strings\n list of classes of parameters to tune, one per parameter\n variables_names_list : list of strings\n list of parameter names to tune\n variables_bounds_list : list of tuples\n list of parameter bounds, upper and lower limits for each parameter\n obj_func_name: string\n objective function. Default set to \"RMSE\"\n opt_method: string\n optimization method. Dafault set to \"SLSQP\" (\"TURBO_1\" also available)\n opt_options: dict\n optimizer options. Default set to None\n \"\"\"\n self.obj_func_dict = {'RMSE': self._tuning_rmse_function}\n self.opt_method_list = [\"SLSQP\", \"TURBO_1\"]\n self.opt_options_dict = {\"SLSQP\": {'maxiter': 100,\n 'disp': True,\n 'iprint': 2,\n 'ftol': 1e-12,\n 'eps': 0.1},\n \"TURBO_1\": {\"n_init\": 2*len(variables_names_list),\n \"max_evals\": 100,\n \"batch_size\": 1, # 1 = Serial\n \"verbose\": True,\n \"use_ard\": True,\n \"max_cholesky_size\": 2000,\n \"n_training_steps\": 50,\n \"min_cuda\": 1024,\n \"device\": \"cpu\",\n \"dtype\": \"float64\"}}\n self.tuning_optimizer_dict = {'SLSQP': self._tuning_optimizer_scipy,\n 'TURBO_1': self._tuning_optimizer_turbo_1}\n\n self.wf_model = wf_model\n self.variables_class_list = variables_class_list\n self.variables_names_list = variables_names_list\n self.variables_bounds_list = variables_bounds_list\n\n self.obj_func_name = obj_func_name\n self.obj_func = self.obj_func_dict[self.obj_func_name]\n self.opt_method = opt_method\n if opt_options == None:\n self.opt_options = self.opt_options_dict[self.opt_method]\n else:\n self.opt_options = opt_options\n self._tuning_optimizer = self.tuning_optimizer_dict[self.opt_method]\n\n self.tuning_data_received = False\n self.tuning_conditions_received = False\n\n print(\"\\nInitialised parameter tuning\")\n print(\"%i parameters to tune\" % (len(self.variables_names_list)))\n print(\"%s optimization method\" % (self.opt_method))\n\n def tuning_data(self, data_power_list):\n \"\"\"\n Provide training higher-fidelity data for parameter tuning.\n Limited to power of each turbine for each condition ('RMSE')\n\n Args\n ----\n data_power_list : list of lists\n For each condition:\n list of turbines power output ('RMSE')\n \"\"\"\n self.tuning_data_power_list = data_power_list\n self.tuning_data_received = True\n pass\n\n def tuning_conditions(self,\n yaw_angles_list,\n wind_directions_list,\n wind_speeds_list,\n turbulence_intensities_list,\n wind_shear_list):\n \"\"\"\n Define the wind farm conditions (yaw and atmospheric)\n of the higher-fidelity data.\n\n Args\n ----\n yaw_angles_list : list of lists\n For each condition, list of turbines yaw_angles\n wind_directions_list: list\n For each condtion, wind direction\n wind_speeds_list: list\n For each condtion, wind speed\n turbulence_intensities_list: list\n For each condtion, wind direction\n wind_shear_list: list\n For each condtion, wind shear\n \"\"\"\n self.yaw_angles_list = yaw_angles_list\n self.wind_directions_list = wind_directions_list\n self.wind_speeds_list = wind_speeds_list\n self.turbulence_intensities_list = turbulence_intensities_list\n self.wind_shear_list = wind_shear_list\n self.tuning_conditions_received = True\n pass\n\n def tune_parameters(self):\n \"\"\"\n Tune specified parameters of a WfModel object.\n Requires higher-fidelity tuning data and the related conditions to be\n previously specified (refer to Tuning methods: tuning_data and tuning_conditions).\n\n Returns\n -------\n wf_model_tuned: WfModel object\n WfModel object with parameters tuned\n wf_model_dict_opt: dictionary\n tuned WfModel object dictionary\n \"\"\"\n # Double check tuning data and conditions have been specified\n if self.tuning_data_received is False:\n err_msg = \"Tuning data not specified. Use tuning_data method.\"\n raise Exception(err_msg)\n if self.tuning_conditions_received is False:\n err_msg = \"Tuning conditions not specified. Use tuning_conditions method.\"\n raise Exception(err_msg)\n\n # Extract original wf_model object dictionary and print its parameters\n self.wf_model_dict_original = floris_extract_object_dict(self.wf_model)\n self.models_dict = floris_extract_models_dict(self.wf_model_dict_original)\n floris_print_params(self.wf_model_dict_original,\n self.models_dict,\n \"Original model parameters\")\n\n # Extract initial variable values and normalise them\n self.variables_init = self._wf_model_dict_to_variables(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list)\n self.variables_init_norm = self._norm_variables(self.variables_init,\n self.variables_bounds_list)\n\n # Normalize variable bounds\n tmp = self.variables_bounds_list\n (self.variables_bounds_list_norm,\n self.variables_low_bound_list_norm,\n self.variables_upp_bound_list_norm) = self._norm_variables_bounds_lists(tmp)\n\n # Minimisation of error | Extract optimal variables\n self._tuning_optimizer()\n self.opt_variables = self._unnorm_variables(self.opt_variables_norm,\n self.variables_bounds_list)\n\n # Apply tuned parameters (opt_variables) to wf_model and print them\n self.wf_model_dict_opt = self._vars_to_wf_model_dict(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list,\n self.opt_variables)\n self.wf_model = floris_param_change_object(self.wf_model, self.wf_model_dict_opt)\n floris_print_params(self.wf_model_dict_opt,\n self.models_dict,\n \"Optimal model parameters\")\n\n return self.wf_model, self.wf_model_dict_opt\n\n # %% Private methods\n\n def _wf_model_dict_to_variables(self, wf_model_dict, class_list, names_list):\n variables = []\n for i in range(len(names_list)):\n variable = floris_extract_parameter(wf_model_dict,\n class_list[i],\n names_list[i])\n variables.append(variable)\n return variables\n\n def _norm_variables(self, variables, variables_bounds_list):\n variables_norm = ([norm(variables[i],\n variables_bounds_list[i][0],\n variables_bounds_list[i][1])\n for i in range(len(variables))])\n return variables_norm\n\n def _norm_variables_bounds_lists(self, variables_bounds_list):\n variables_bounds_list_norm = []\n variables_low_bound_list_norm = []\n variables_upp_bound_list_norm = []\n for i, variable_bounds in enumerate(variables_bounds_list):\n lower_bound_norm = norm(variable_bounds[0],\n variable_bounds[0],\n variable_bounds[1])\n upper_bound_norm = norm(variable_bounds[1],\n variable_bounds[0],\n variable_bounds[1])\n bound_norm_tuple = (lower_bound_norm, upper_bound_norm)\n variables_bounds_list_norm.append(bound_norm_tuple)\n variables_low_bound_list_norm.append(lower_bound_norm)\n variables_upp_bound_list_norm.append(upper_bound_norm)\n return (variables_bounds_list_norm,\n np.array(variables_low_bound_list_norm),\n np.array(variables_upp_bound_list_norm))\n\n def _unnorm_variables(self, variables_norm, variables_bounds_list):\n variables = ([unnorm(variables_norm[i],\n variables_bounds_list[i][0],\n variables_bounds_list[i][1])\n for i in range(len(variables_norm))])\n return variables\n\n def _vars_to_wf_model_dict(self,\n wf_model_dict_original,\n variables_class_list,\n variables_names_list,\n variables):\n wf_model_dict_new = copy.deepcopy(wf_model_dict_original)\n for i in range(len(variables)):\n wf_model_dict_new = floris_param_change_object_dict(wf_model_dict_new,\n variables_class_list[i],\n variables_names_list[i],\n variables[i])\n return wf_model_dict_new\n\n def _tuning_optimizer_scipy(self):\n self.opt_results = minimize(self.obj_func,\n self.variables_init_norm,\n method=self.opt_method,\n bounds=self.variables_bounds_list_norm,\n options=self.opt_options)\n self.opt_variables_norm = self.opt_results.x\n\n def _tuning_optimizer_turbo_1(self):\n turbo_1 = Turbo1(f=self.obj_func,\n lb=self.variables_low_bound_list_norm,\n ub=self.variables_upp_bound_list_norm,\n **self.opt_options,\n )\n turbo_1.optimize()\n X = turbo_1.X # Evaluated points\n fX = turbo_1.fX # Observed values\n index_best = np.argmin(fX)\n f_best, x_best = fX[index_best], X[index_best, :]\n self.opt_variables_norm = x_best\n\n def _tuning_rmse_function(self, variables_norm):\n\n # Unnorm variables, create new wf_model dictionary\n variables = self._unnorm_variables(variables_norm, self.variables_bounds_list)\n wf_model_dict_new = self._vars_to_wf_model_dict(self.wf_model_dict_original,\n self.variables_class_list,\n self.variables_names_list,\n variables)\n\n # Create new wf_model object and reinitialize (atmospheric conditions set later)\n self.wf_model = floris_param_change_object(self.wf_model, wf_model_dict_new)\n\n rmse = 0\n for i in range(len(self.tuning_data_power_list)):\n\n # Calculate wind turbine power outputs with model to tune\n floris_reinitialise_atmosphere(self.wf_model,\n ws=self.wind_speeds_list[i],\n wd=self.wind_directions_list[i],\n ti=self.turbulence_intensities_list[i],\n shear=self.wind_shear_list[i])\n yaw_angles = np.array([float(item) for item in self.yaw_angles_list[i]])\n power_turbines = floris_calculate_turbine_power(self.wf_model, yaw_angles)\n\n # Calculate root mean squared error single condition\n error = 0\n for j in range(len(power_turbines)):\n error += (self.tuning_data_power_list[i][j]-power_turbines[j])**2\n rmse_single = error/len(power_turbines)\n\n # Calculate sum of root mean squared errors\n rmse += rmse_single\n\n return rmse" }, { "identifier": "GPWrap", "path": "deasc/gp.py", "snippet": "class GPWrap:\n \"\"\"\n Wrapper class to create, modify and visualise Gaussian Processes for dynamic parameter\n tuning. Currently limited to a single output GP.\n \"\"\"\n\n def __init__(self, parameter_class, parameter_name, dimensions):\n self.parameter_class = parameter_class\n self.parameter_name = parameter_name\n self.dimensions = dimensions\n \"\"\"\n Args\n ----\n parameter_class: string\n Parameter class of the optimal parameter to fit.\n parameter_name: string\n Name of the optimal parameter to fit.\n dimensions: integer\n Dimensions/inputs/variables of the GP.\n \"\"\"\n\n def GP_so(self, yaw_data, param_data, num_restarts=50, noise=0.05):\n \"\"\"\n Construct and returns a single-output (SO) GP for the given input dataset\n (optimal parameter for a given yaw configuration).\n\n Args\n ----\n yaw_data: list of lists\n list of input yaw configurations for which parameter has been tuned\n param_data: list of lists\n for each yaw configuration in yaw_data, list containing the optimal parameter\n num_restarts: int\n number of random starts of the GP hyperparameter tuning optimization\n noise: float\n noise in output prediction. Default is 0.05\n\n Returns\n -------\n m: GPy single-output Gaussian Process model\n \"\"\"\n # Sample check on argument dimension\n if len(yaw_data[0]) != self.dimensions:\n err_msg = (\"Yaw input and GP dimensions do not match\")\n raise Exception(err_msg)\n if len(param_data[0]) != 1:\n err_msg = (\"Single-output GPs only\")\n raise Exception(err_msg)\n\n # Data structure arguments\n yaw_data_GP = np.array(yaw_data)\n param_data_GP = np.array(param_data)\n\n # GP model\n kernel = GPy.kern.RBF(input_dim=self.dimensions, variance=1., lengthscale=1.)\n self.m = GPy.models.GPRegression(yaw_data_GP,\n param_data_GP,\n kernel,\n noise_var=noise)\n\n # Hyperparameter tuning\n self.m.optimize(optimizer=None, # Default lbfgsb\n start=None,\n messages=False,\n max_iters=1000)\n self.m.optimize_restarts(num_restarts=num_restarts)\n return self.m\n\n def GP_so_plot(self, parameter_range_plot, yaw_range_plot):\n \"\"\"\n Plot a single-output (SO) GP model. 1D and 2D plots are generated for each\n variable combination.\n\n Args\n ----\n parameter_range: tuple\n range of the optimal parameter to plot\n parameter_range: tuple\n range of the yaw variables to plot\n \"\"\"\n # Plotting library choice and defaults values\n GPy.plotting.change_plotting_library('matplotlib')\n GPy.plotting.matplot_dep.defaults.data_2d = {'s': 0,\n 'edgecolors': 'none',\n 'linewidth': 0.0,\n 'cmap': cm.get_cmap('hot'),\n 'alpha': 0.5}\n\n # 1D Plots\n if self.dimensions == 1:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(5, 2.5))\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n fig = self.m.plot(figure=figure,\n col=1,\n row=1,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=True)\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n figsize = (5*n_cuts, 2.5*self.dimensions)\n figure = GPy.plotting.plotting_library().figure(self.dimensions,\n n_cuts,\n figsize=figsize)\n\n for dim_idx in range(self.dimensions):\n for i, slice_single in zip(range(n_cuts), slices):\n title = \"GP %s - $\\gamma_{others}$\" \\\n \"%.1f $^{\\circ}$\" % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (dim_idx+1)\n ylabel = '$%s_{opt}$' % (self.parameter_name)\n inputs = []\n for j in range(self.dimensions):\n if j == dim_idx:\n pass\n else:\n inputs.append((j, slice_single))\n fig = self.m.plot(figure=figure,\n col=(i+1),\n row=(dim_idx+1),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n ylim=list(parameter_range_plot),\n legend=False,\n plot_data=False)\n\n # 2D Plots\n # Countours are fine ##\n # Data points (training) plotted are off ##\n # double checked with GP and training database ##\n if self.dimensions == 1:\n pass\n elif self.dimensions == 2:\n figure = GPy.plotting.plotting_library().figure(1, 1, figsize=(3, 2.5))\n\n title = 'GP %s' % (self.parameter_name)\n xlabel = '$\\gamma_{1}$ [deg]'\n ylabel = '$\\gamma_{2}$ [deg]'\n\n fig = self.m.plot(figure=figure,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))\n else:\n n_cuts = 3\n slices = np.linspace(yaw_range_plot[0], yaw_range_plot[1], n_cuts)\n plot_rows = self.dimensions-1\n plot_cols = self.dimensions-1\n combinations = list(itertools.combinations(\n list(range(0, self.dimensions)), 2))\n\n figsize = (3*plot_cols*len(slices), 2.5*plot_rows)\n figure = GPy.plotting.plotting_library().figure(plot_rows,\n plot_cols*len(slices),\n figsize=figsize)\n for i, slice_single in zip(range(n_cuts), slices):\n for comb_idx, comb in enumerate(combinations):\n title = 'GP %s - $\\gamma_{others}$' \\\n '%.1f $^{\\circ}$' % (self.parameter_name, slice_single)\n xlabel = '$\\gamma_{%i}$ [deg]' % (comb[0]+1)\n ylabel = '$\\gamma_{%i}$ [deg]' % (comb[1]+1)\n inputs = []\n for j in range(self.dimensions):\n if j in comb:\n pass\n else:\n inputs.append((j, slice_single))\n\n fig = self.m.plot(figure=figure,\n col=(comb[0]+1+plot_cols*i),\n row=(comb[1]),\n fixed_inputs=inputs,\n title=title,\n xlabel=xlabel,\n ylabel=ylabel,\n legend=False,\n plot_data=True)\n\n ax = plt.gca()\n mappable = ax.collections[0]\n cbar = plt.colorbar(mappable)\n # cbar.set_label('$%s_{opt}$'%(self.parameter_name))" }, { "identifier": "TuningDyn_Grouping", "path": "deasc/tuning_dynamic.py", "snippet": "class TuningDyn_Grouping(TuningDyn, TuningDyn_SharedMethods):\n \"\"\"Class for dynamic parameter tuning with grouping of turbines within a wind farm.\"\"\"\n\n def __init__(self, param_class, param_name, tuning_groups, GP_model):\n \"\"\"\n Args\n ----\n param_class: (string) tuning parameter class.\n param_name: (string) tuning parameter name.\n tuning_groups: (list of lists) list of turbine groups included in the tuning. In\n each list, specify the turbines in the group.\n GP_model: (GPy object) GP model with len(tuning_groups) input dimensions.\n \"\"\"\n super().__init__(param_class, param_name)\n # Tuning info\n self.tuning_variables = tuning_groups\n self.tuning_dimensions = len(self.tuning_variables)\n self.GP_model = GP_model\n # GP dimension check\n self._GP_dimension_check(self.tuning_dimensions, self.GP_model)\n # Grouping info\n self.tuning_groups = tuning_groups\n self.grouping_bool = True\n\n @property\n def tuning_turbines(self):\n \"\"\"List of the tuning turbines in the wind farm.\"\"\"\n return [x for sublist in self.tuning_variables for x in sublist]\n\n def wso_compatibility_check(self, wso_obj):\n \"\"\"\n Check compatibility with a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object to which dynamic parameter tuning is added.\n \"\"\"\n self._tuning_turbines_check(wso_obj, self.tuning_turbines)\n self._tuning_groups_check(wso_obj)\n\n def tune_parameter(self, wso_obj, yaw_angles):\n \"\"\"\n Perform parameter tuning in a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object.\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n wf-model_tuned: (WfModel) tuned WfModel to use in the current iteration of the\n wake steering optimisation.\n \"\"\"\n # Extract WSOpt WfModel dictionary\n wf_model_dict = floris_extract_object_dict(wso_obj.wf_model)\n\n # Create and apply tuned WfModel dictionary\n GP_input = self._get_GP_input_groups(self.tuning_groups, yaw_angles)\n mu, var, = self.GP_model.predict_noiseless(np.array([GP_input]))\n optimal_parameter = mu[0][0]\n wf_model_dict_tuned = floris_param_change_object_dict(wf_model_dict,\n self.param_class,\n self.param_name,\n optimal_parameter)\n wf_model_tuned = floris_param_change_object(wso_obj.wf_model,\n wf_model_dict_tuned)\n return wf_model_tuned\n\n def set_yaw_groups(self, yaw_angles):\n \"\"\"\n Force yaw angles of turbines in tuning groups to be equal in the wake\n steering optimisation.\n\n Args\n ----\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n yaw_angles_grouped: (np.ndarray) yaw angles of all turbines in the wind farm with\n equal yaw angles in each turbine group.\n \"\"\"\n return self._set_yaw_groups(yaw_angles)" }, { "identifier": "TuningDyn_Looping_Turbine", "path": "deasc/tuning_dynamic.py", "snippet": "class TuningDyn_Looping_Turbine(TuningDyn, TuningDyn_SharedMethods):\n \"\"\"\n Class for dynamic parameter tuning with the looping approach of turbines within\n a wind farm.\n \"\"\"\n\n def __init__(self, param_class, param_name, tuning_turbine, GP_model, wf_pow_noyaw):\n \"\"\"\n Args\n ----\n param_class: (string) tuning parameter class.\n param_name: (string) tuning parameter name.\n tuning_turbines: (list) list of single turbine included in the tuning.\n GP_model: (GPy object) GP model with a single input dimension.\n wf_pow_noyaw: (float) value of the wind farm power without any yaw applied,\n usually extracted from the previous grouping optimisation to refine.\n \"\"\"\n super().__init__(param_class, param_name)\n # Tuning info\n self.tuning_variables = tuning_turbine\n self.tuning_dimensions = len(self.tuning_variables)\n self.GP_model = GP_model\n self._GP_dimension_check(self.tuning_dimensions, self.GP_model)\n # Looping info\n self.wf_pow_noyaw = wf_pow_noyaw\n self.tuning_bool = True\n\n @property\n def tuning_turbines(self):\n \"\"\"List of the tuning turbines in the wind farm.\"\"\"\n return self.tuning_variables\n\n def wso_compatibility_check(self, wso_obj):\n \"\"\"\n Check compatibility with a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object to which dynamic parameter tuning is added.\n \"\"\"\n self._tuning_turbines_check(wso_obj, self.tuning_turbines)\n self._looping_check(wso_obj)\n\n def tune_parameter(self, wso_obj, yaw_angles):\n \"\"\"\n Perform parameter tuning in a WSOpt object.\n\n Args\n ----\n wso_obj: (WSOpt) WSOpt object.\n yaw_angles: (np.ndarray) yaw angles of all turbines in the wind farm.\n\n Returns\n -------\n wf-model_tuned: (WfModel) tuned WfModel to use in the current iteration of the\n wake steering optimisation.\n \"\"\"\n # Extract WSOpt WfModel dictionary\n wf_model_dict = floris_extract_object_dict(wso_obj.wf_model)\n\n # Create and apply tuned WfModel dictionary\n GP_input = self._get_GP_input_turbines(self.tuning_turbines, yaw_angles)\n mu, var, = self.GP_model.predict_noiseless(np.array([GP_input]))\n optimal_parameter = mu[0][0]\n wf_model_dict_tuned = floris_param_change_object_dict(wf_model_dict,\n self.param_class,\n self.param_name,\n optimal_parameter)\n wf_model_tuned = floris_param_change_object(wso_obj.wf_model,\n wf_model_dict_tuned)\n return wf_model_tuned\n\n def _looping_check(self, wso_obj):\n if len(self.tuning_variables) != 1:\n err_msg = \"While looping, only a single turbine can be tuned.\"\n raise Exception(err_msg)\n if len(wso_obj.variables) != 1:\n err_msg = \"While looping, only a single turbine can be optimised.\"\n raise Exception(err_msg)" }, { "identifier": "floris_extract_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_extract_object_dict(wf_model):\n \"\"\"Extract and return the current FLORIS object dictionary.\"\"\"\n return wf_model.interface.floris.as_dict()" }, { "identifier": "floris_extract_parameter", "path": "deasc/utils_floris.py", "snippet": "def floris_extract_parameter(wf_model_dict, param_class, param_name):\n \"\"\"Extract and return the current parameter value of a FLORIS object parameter.\"\"\"\n models_dict = floris_extract_models_dict(wf_model_dict)\n return wf_model_dict['wake'][param_class][models_dict[param_class]][param_name]" }, { "identifier": "floris_param_change_object_dict", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object_dict(wf_model_dict, param_class, param_name, param_value):\n \"\"\"\n Change FLORIS object with a new model parameter, return new FLORIS object dictionary.\n FLORIS object is not reinitialised (see function floris_parameter_change_object).\n \"\"\"\n wf_model_dict_new = copy.deepcopy(wf_model_dict)\n models_dict = floris_extract_models_dict(wf_model_dict_new)\n (wf_model_dict_new['wake'][param_class]\n [models_dict[param_class]][param_name]) = param_value\n return wf_model_dict_new" }, { "identifier": "floris_param_change_object", "path": "deasc/utils_floris.py", "snippet": "def floris_param_change_object(wf_model, wf_model_dict_new):\n \"\"\"Change FLORIS object with new object dictionary. Also reinitialise farm layout.\"\"\"\n x_reinit, y_reinit = wf_model.interface.get_turbine_layout()\n wf_model.interface = FI(wf_model_dict_new)\n wf_model.interface.reinitialize(layout_x=x_reinit, layout_y=y_reinit)\n return wf_model" } ]
import numpy as np from deasc import WfModel from deasc import WSOpt from deasc import Tuning from deasc import GPWrap from deasc import TuningDyn_Grouping from deasc import TuningDyn_Looping_Turbine from deasc.utils_floris import ( floris_extract_object_dict, floris_extract_parameter, floris_param_change_object_dict, floris_param_change_object )
15,511
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with the looping approach is implemented to refine the results achieved with grouping. Tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of all wind turbines in the farm, excluding the most downstream one. """ # %% Initial wake steering optimisation - Grouping approach for dynamic parameter tuning # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(5), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [1, 2, 3, 4] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # Dynamic tuning object # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_5x1_2dim_grouping.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[0], key[2]]) # Extract group yaw param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=100, noise=0.05) # Tuning object initialisation
""" This example shows wake steering optimisation on a 5x1 wind farm of NREL 5 MW turbines. Dynamic parameter tuning with the looping approach is implemented to refine the results achieved with grouping. Tuning is introduced in the optimisation for the wake expansion parameter k of the Jensen wake model. The tuning variables are the yaw angles of all wind turbines in the farm, excluding the most downstream one. """ # %% Initial wake steering optimisation - Grouping approach for dynamic parameter tuning # Initialise and set layout for wind farm model path = "./inputs/" input_file = "jensen.yaml" wf_model = WfModel(input_file, path) wf_model.set_aligned_layout(5, 1, 7, 5) # Set kd deflection parameter wf_model_dict = floris_extract_object_dict(wf_model) wf_model_dict = floris_param_change_object_dict(wf_model_dict, 'wake_deflection_parameters', 'kd', 0.3) wf_model = floris_param_change_object(wf_model, wf_model_dict) # Specify atmopheric conditions ws = 8.0 wd = 270 ti = 0.05 shear = 0.0 # Wake steering optimisation inputs yaw_initial = np.full(shape=(5), fill_value=0) inflow = (yaw_initial, wd, ws, ti, shear) variables = [1, 2, 3, 4] var_bounds = (-25, 25) var_initial = np.full(shape=(len(variables)), fill_value=0) # Dynamic tuning object # Parameter info parameter_class = 'wake_velocity_parameters' parameter_name = 'we' # Import optimal parameter dataset and extract GP input dataset_path = "./optimal_parameter_datasets/" dataset_import = np.load(dataset_path+'we_5x1_2dim_grouping.npy', allow_pickle=True) optimal_parameter_dataset = dataset_import.item() yaw_data = [] param_data = [] for key in optimal_parameter_dataset.keys(): yaw_data.append([key[0], key[2]]) # Extract group yaw param_data.append([optimal_parameter_dataset[key]]) # Construct Gaussian Process (GP) GP_obj = GPWrap(parameter_class=parameter_class, parameter_name=parameter_name, dimensions=2) GP_model = GP_obj.GP_so(yaw_data, param_data, num_restarts=100, noise=0.05) # Tuning object initialisation
tuning_dyn_obj = TuningDyn_Grouping(param_class=parameter_class,
4
2023-11-10 18:13:27+00:00
24k
PlaxtonFlarion/NexaFlow
nexaflow/skills/alynex.py
[ { "identifier": "toolbox", "path": "nexaflow/toolbox.py", "snippet": "def video_capture(video_path: str):\ndef video_jump(video_cap: cv2.VideoCapture, frame_id: int):\ndef compare_ssim(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef multi_compare_ssim(\n pic1_list: typing.List, pic2_list: typing.List, hooks: typing.List = None\n) -> typing.List[float]:\ndef get_current_frame_id(video_cap: cv2.VideoCapture) -> int:\ndef get_current_frame_time(video_cap: cv2.VideoCapture) -> float:\ndef imread(img_path: str, *_, **__) -> np.ndarray:\ndef get_frame_time(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> float:\ndef get_frame_count(video_cap: cv2.VideoCapture) -> int:\ndef get_frame_size(video_cap: cv2.VideoCapture) -> typing.Tuple[int, int]:\ndef get_frame(\n video_cap: cv2.VideoCapture, frame_id: int, recover: bool = None\n) -> np.ndarray:\ndef turn_grey(old: np.ndarray) -> np.ndarray:\ndef turn_binary(old: np.ndarray) -> np.ndarray:\ndef turn_hog_desc(old: np.ndarray) -> np.ndarray:\ndef turn_lbp_desc(old: np.ndarray, radius: int = None) -> np.ndarray:\ndef turn_blur(old: np.ndarray) -> np.ndarray:\ndef sharpen_frame(old: np.ndarray) -> np.ndarray:\ndef calc_mse(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef calc_psnr(pic1: np.ndarray, pic2: np.ndarray) -> float:\ndef compress_frame(\n old: np.ndarray,\n compress_rate: float = None,\n target_size: typing.Tuple[int, int] = None,\n not_grey: bool = None,\n interpolation: int = None,\n *_,\n **__,\n) -> np.ndarray:\ndef get_timestamp_str() -> str:\ndef np2b64str(frame: np.ndarray) -> str:\ndef fps_convert(\n target_fps: int, source_path: str, target_path: str, ffmpeg_exe: str = None\n) -> int:\ndef match_template_with_object(\n template: np.ndarray,\n target: np.ndarray,\n engine_template_cv_method_name: str = None,\n **kwargs,\n) -> typing.Dict[str, typing.Any]:\ndef match_template_with_path(\n template: str, target: np.ndarray, **kwargs\n) -> typing.Dict[str, typing.Any]:\ndef show_progress(total: int, color: int, title: str) -> tqdm:\ndef draw_line(image_path: str, save_path: str = None):" }, { "identifier": "Report", "path": "nexaflow/skills/report.py", "snippet": "class Report(object):\n\n __lock: threading.Lock = threading.Lock()\n __initialized: bool = False\n __instance = None\n __init_var = None\n\n def __new__(cls, *args, **kwargs):\n if cls.__instance is None:\n with cls.__lock:\n if cls.__instance is None:\n cls.__instance = super(Report, cls).__new__(cls)\n cls.__init_var = (args, kwargs)\n return cls.__instance\n\n def __init__(self, total_path: str):\n if not self.__initialized:\n self.__initialized = True\n\n self.clock: Any = lambda: time.strftime(\"%Y%m%d%H%M%S\")\n\n self.__title: str = \"\"\n self.__query: str = \"\"\n self.query_path: str = \"\"\n self.video_path: str = \"\"\n self.frame_path: str = \"\"\n self.extra_path: str = \"\"\n\n self.range_list: list[dict] = []\n self.total_list: list[dict] = []\n\n self.total_path = os.path.join(total_path, f\"Nexa_{self.clock()}_{os.getpid()}\", \"Nexa_Collection\")\n # self.total_path = \"/Users/acekeppel/PycharmProjects/NexaFlow/report/Nexa_20230822223025/Nexa_Collection\"\n os.makedirs(self.total_path, exist_ok=True)\n\n self.reset_path = os.path.join(os.path.dirname(self.total_path), \"Nexa_Recovery\")\n os.makedirs(self.reset_path, exist_ok=True)\n log_papers = os.path.join(self.reset_path, \"nexaflow.log\")\n logger.add(log_papers, format=FORMAT, level=\"DEBUG\")\n\n @property\n def proto_path(self) -> str:\n return os.path.join(self.query_path, self.query)\n\n @property\n def title(self):\n return self.__title\n\n @title.setter\n def title(self, title: str):\n self.__title = title\n self.query_path = os.path.join(self.total_path, self.title)\n os.makedirs(self.query_path, exist_ok=True)\n logger.info(f\"✪✪✪✪✪✪✪✪✪✪ {self.title} ✪✪✪✪✪✪✪✪✪✪\\n\")\n\n @title.deleter\n def title(self):\n del self.__title\n\n @property\n def query(self):\n return self.__query\n\n @query.setter\n def query(self, query: str):\n self.__query = query\n self.video_path = os.path.join(self.query_path, self.query, \"video\")\n self.frame_path = os.path.join(self.query_path, self.query, \"frame\")\n self.extra_path = os.path.join(self.query_path, self.query, \"extra\")\n os.makedirs(self.video_path, exist_ok=True)\n os.makedirs(self.frame_path, exist_ok=True)\n os.makedirs(self.extra_path, exist_ok=True)\n logger.info(f\"Start -> {self.query}\")\n\n @query.deleter\n def query(self):\n del self.__query\n\n def load(self, inform: Optional[Dict[str, Union[str | Dict]]]) -> None:\n if inform:\n self.range_list.append(inform)\n logger.info(f\"End -> {self.query}\\n\")\n\n def create_report(self) -> None:\n\n def start_create(result):\n handler_list = []\n query = result.get(\"query\", \"TimeCost\")\n stage = result.get(\"stage\", {\"start\": 1, \"end\": 2, \"cost\": \"0.00000\"})\n frame = result.get(\"frame\", \"\")\n extra = result.get(\"extra\", \"\")\n proto = result.get(\"proto\", \"\")\n\n image_list = []\n for image in os.listdir(frame):\n image_src = os.path.join(query, \"frame\", image)\n image_ids = re.search(r\"\\d+(?=_)\", image).group()\n timestamp = float(re.search(r\"(?<=_).+(?=\\.)\", image).group())\n image_list.append(\n {\n \"src\": image_src,\n \"frames_id\": image_ids,\n \"timestamp\": f\"{timestamp:.5f}\"\n }\n )\n image_list.sort(key=lambda x: int(x[\"frames_id\"]))\n\n extra_list = []\n for ex in os.listdir(extra):\n extra_src = os.path.join(query, \"extra\", ex)\n extra_idx = ex.split(\"(\")[0]\n extra_list.append(\n {\n \"src\": extra_src,\n \"idx\": extra_idx\n }\n )\n extra_list.sort(key=lambda x: int(x[\"idx\"].split(\"(\")[0]))\n\n handler_list.append(\n {\n \"query\": query,\n \"stage\": stage,\n \"image_list\": image_list,\n \"extra_list\": extra_list,\n \"proto\": os.path.join(query, os.path.basename(proto))\n }\n )\n\n return handler_list\n\n if len(self.range_list) > 0:\n if len(self.range_list) == 1:\n images_list = start_create(self.range_list[0])\n else:\n with ThreadPoolExecutor() as executor:\n future = executor.map(start_create, self.range_list)\n images_list = [i for f in future for i in f]\n\n loader = FileSystemLoader(os.path.join(Constants.NEXA, \"template\"))\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_main.html\")\n\n html = template.render(title=self.title, images_list=images_list)\n report_html = os.path.join(self.query_path, f\"{self.title}.html\")\n with open(file=report_html, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成聚合报告: {os.path.basename(report_html)}\")\n\n cost_list = [cost['stage']['cost'] for cost in images_list]\n href_path = os.path.join(\n os.path.basename(self.total_path),\n self.title,\n os.path.basename(report_html)\n )\n single = {\n \"case\": self.title,\n \"cost_list\": cost_list,\n \"avg\": f\"{sum(map(float, cost_list)) / len(cost_list):.5f}\",\n \"href\": href_path\n }\n logger.debug(\"Recovery: \" + json.dumps(single, ensure_ascii=False))\n self.total_list.append(single)\n self.range_list.clear()\n else:\n logger.info(\"没有可以聚合的报告 ...\")\n\n logger.info(f\"✪✪✪✪✪✪✪✪✪✪ {self.title} ✪✪✪✪✪✪✪✪✪✪\\n\\n\")\n\n def create_total_report(self) -> None:\n if len(self.total_list) > 0:\n loader = FileSystemLoader(os.path.join(Constants.NEXA, \"template\"))\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_information.html\")\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n html = template.render(report_time=report_time, total_list=self.total_list)\n\n total_html_path = os.path.join(os.path.dirname(self.total_path), \"NexaFlow.html\")\n with open(file=total_html_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成汇总报告: {total_html_path}\\n\\n\")\n self.total_list.clear()\n else:\n logger.info(\"没有可以汇总的报告 ...\")\n\n @staticmethod\n def reset_report(file_name: str) -> None:\n loader = FileSystemLoader(os.path.join(Constants.NEXA, \"template\"))\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_information.html\")\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n\n with open(\n file=os.path.join(file_name, \"Nexa_Recovery\", \"nexaflow.log\"),\n mode=\"r\", encoding=\"utf-8\"\n ) as f:\n log_restore = re.findall(r\"(?<=Recovery: ).*}\", f.read())\n total_list = [json.loads(file) for file in log_restore]\n html = template.render(report_time=report_time, total_list=total_list)\n\n total_html_path = os.path.join(file_name, \"NexaFlow.html\")\n with open(file=total_html_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成汇总报告: {total_html_path}\\n\\n\")\n\n @staticmethod\n def merge_report(merge_list: List[str], loader_merge_loc: str) -> None:\n merge_path = os.path.join(\n os.path.dirname(os.path.dirname(merge_list[0])),\n \"Merge_Nexa_\" + time.strftime(\"%Y%m%d%H%M%S\"),\n \"Nexa_Collection\"\n )\n os.makedirs(merge_path, exist_ok=True)\n log_restore = []\n for merge in merge_list:\n logs = os.path.join(os.path.dirname(merge), \"Nexa_Recovery\", \"nexaflow.log\")\n with open(file=logs, mode=\"r\", encoding=\"utf-8\") as f:\n log_restore.extend(re.findall(r\"(?<=Recovery: ).*}\", f.read()))\n shutil.copytree(\n merge, merge_path, dirs_exist_ok=True,\n ignore=shutil.ignore_patterns(\"NexaFlow.html\", \"nexaflow.log\")\n )\n\n loader = FileSystemLoader(loader_merge_loc)\n environment = Environment(loader=loader)\n template = environment.get_template(\"template_information.html\")\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n total_list = [json.loads(file) for file in log_restore]\n html = template.render(report_time=report_time, total_list=total_list)\n\n total_html_path = os.path.join(os.path.dirname(merge_path), \"NexaFlow.html\")\n with open(file=total_html_path, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"合并汇总报告: {total_html_path}\\n\\n\")\n\n @staticmethod\n async def ask_create_report(major_loc, title, total_path, query_path, range_list):\n\n async def handler_inform(result):\n handler_list = []\n query = result.get(\"query\", \"TimeCost\")\n stage = result.get(\"stage\", {\"start\": 1, \"end\": 2, \"cost\": \"0.00000\"})\n frame = result.get(\"frame\", \"\")\n extra = result.get(\"extra\", \"\")\n proto = result.get(\"proto\", \"\")\n\n async def handler_frame():\n handler_image_list = []\n for image in os.listdir(\n os.path.join(\n query_path, query, os.path.basename(frame)\n )\n ):\n image_src = os.path.join(query, \"frame\", image)\n image_ids = re.search(r\"\\d+(?=_)\", image).group()\n timestamp = float(re.search(r\"(?<=_).+(?=\\.)\", image).group())\n handler_image_list.append(\n {\n \"src\": image_src,\n \"frames_id\": image_ids,\n \"timestamp\": f\"{timestamp:.5f}\"\n }\n )\n handler_image_list.sort(key=lambda x: int(x[\"frames_id\"]))\n return handler_image_list\n\n async def handler_extra():\n handler_extra_list = []\n for ex in os.listdir(\n os.path.join(\n query_path, query, os.path.basename(extra)\n )\n ):\n extra_src = os.path.join(query, \"extra\", ex)\n extra_idx = ex.split(\"(\")[0]\n handler_extra_list.append(\n {\n \"src\": extra_src,\n \"idx\": extra_idx\n }\n )\n handler_extra_list.sort(key=lambda x: int(x[\"idx\"].split(\"(\")[0]))\n return handler_extra_list\n\n image_list, extra_list = await asyncio.gather(\n handler_frame(), handler_extra()\n )\n\n handler_list.append(\n {\n \"query\": query,\n \"stage\": stage,\n \"image_list\": image_list,\n \"extra_list\": extra_list,\n \"proto\": os.path.join(query, os.path.basename(proto))\n }\n )\n return handler_list\n\n async def handler_start():\n single = {}\n if len(range_list) > 0:\n tasks = [handler_inform(result) for result in range_list]\n results = await asyncio.gather(*tasks)\n images_list = [ele for res in results for ele in res]\n\n major_loader = FileSystemLoader(major_loc)\n major_environment = Environment(loader=major_loader)\n major_template = major_environment.get_template(\"template_main.html\")\n\n html = major_template.render(title=title, images_list=images_list)\n report_html = os.path.join(query_path, f\"{title}.html\")\n with open(file=report_html, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成聚合报告: {os.path.basename(report_html)}\")\n\n cost_list = [cost['stage']['cost'] for cost in images_list]\n href_path = os.path.join(\n os.path.basename(total_path),\n title,\n os.path.basename(report_html)\n )\n single = {\n \"case\": title,\n \"cost_list\": cost_list,\n \"avg\": f\"{sum(map(float, cost_list)) / len(cost_list):.5f}\",\n \"href\": href_path\n }\n logger.debug(\"Recovery: \" + json.dumps(single, ensure_ascii=False))\n else:\n logger.info(\"没有可以聚合的报告 ...\")\n\n logger.info(f\"✪✪✪✪✪✪✪✪✪✪ {title} ✪✪✪✪✪✪✪✪✪✪\\n\\n\")\n return single\n\n return await handler_start()\n\n @staticmethod\n async def ask_create_total_report(file_name: str, major_loc: str, loader_total_loc: str):\n report_time = time.strftime('%Y.%m.%d %H:%M:%S')\n try:\n with open(file=os.path.join(file_name, \"Nexa_Recovery\", \"nexaflow.log\"), mode=\"r\", encoding=\"utf-8\") as f:\n open_file = f.read()\n except FileNotFoundError as e:\n return e\n else:\n match_list = re.findall(r\"(?<=Restore: ).*}\", open_file)\n range_list = [json.loads(file.replace(\"'\", '\"')) for file in match_list if file]\n grouped_dict = defaultdict(list)\n for part in range_list:\n parts = part.pop(\"title\"), part.pop(\"total_path\"), part.pop(\"query_path\")\n grouped_dict[parts].append(part)\n\n tasks = [\n Report.ask_create_report(\n major_loc,\n title,\n os.path.join(file_name, os.path.basename(total_path)),\n os.path.join(file_name, os.path.basename(total_path), title),\n range_list\n )\n for (title, total_path, query_path), range_list in grouped_dict.items()\n ]\n merge_result = await asyncio.gather(*tasks)\n total_list = [merge for merge in merge_result]\n\n if len(total_list) > 0:\n total_loader = FileSystemLoader(loader_total_loc)\n total_environment = Environment(loader=total_loader)\n total_template = total_environment.get_template(\"template_information.html\")\n\n html = total_template.render(report_time=report_time, total_list=total_list)\n total_html = os.path.join(file_name, \"NexaFlow.html\")\n with open(file=total_html, mode=\"w\", encoding=\"utf-8\") as f:\n f.write(html)\n logger.info(f\"生成汇总报告: {total_html}\")\n else:\n logger.info(\"没有可以汇总的报告 ...\")\n\n @staticmethod\n def draw(\n classifier_result,\n proto_path: str,\n compress_rate: float = None,\n target_size: Tuple[int, int] = None,\n boost_mode: bool = False,\n framix_template: str = None\n ) -> str:\n\n label_stable: str = \"稳定阶段\"\n label_unstable: str = \"不稳定阶段\"\n label_unspecific: str = \"不明阶段\"\n\n thumbnail_list: List[Dict[str, str]] = list()\n extra_dict: Dict[str, str] = dict()\n\n if not compress_rate:\n compress_rate = 0.2\n\n try:\n stage_range = classifier_result.get_stage_range()\n except AssertionError:\n stage_range = [classifier_result.data]\n\n if boost_mode:\n for cur_index in range(len(stage_range)):\n each = stage_range[cur_index]\n middle = each[len(each) // 2]\n image_list = []\n if middle.is_stable():\n label = label_stable\n image = toolbox.compress_frame(\n middle.get_data(), compress_rate=compress_rate, target_size=target_size\n )\n frame = {\n \"frame_id\": middle.frame_id,\n \"timestamp\": f\"{middle.timestamp:.5f}\",\n \"image\": toolbox.np2b64str(image)\n }\n image_list.append(frame)\n else:\n if middle.stage == constants.UNKNOWN_STAGE_FLAG:\n label = label_unspecific\n else:\n label = label_unstable\n\n if cur_index + 1 < len(stage_range):\n new_each = [*each, stage_range[cur_index + 1][0]]\n else:\n new_each = each\n\n for i in new_each:\n image = toolbox.compress_frame(\n i.get_data(), compress_rate=compress_rate, target_size=target_size\n )\n frame = {\n \"frame_id\": i.frame_id,\n \"timestamp\": f\"{i.timestamp:.5f}\",\n \"image\": toolbox.np2b64str(image)\n }\n image_list.append(frame)\n\n first, last = each[0], each[-1]\n title = (f\"{label} \"\n f\"区间: {first.frame_id}({first.timestamp:.5f}) - {last.frame_id}({last.timestamp:.5f}) \"\n f\"耗时: {last.timestamp - first.timestamp:.5f} \"\n f\"分类: {first.stage}\")\n thumbnail_list.append({title: image_list})\n else:\n for cur_index in range(len(stage_range)):\n each_range = stage_range[cur_index]\n middle = each_range[len(each_range) // 2]\n\n if middle.is_stable():\n label = label_stable\n elif middle.stage == constants.UNKNOWN_STAGE_FLAG:\n label = label_unspecific\n else:\n label = label_unstable\n\n if cur_index + 1 < len(stage_range):\n range_for_display = [*each_range, stage_range[cur_index + 1][0]]\n else:\n range_for_display = each_range\n\n image_list = []\n for i in range_for_display:\n image = toolbox.compress_frame(\n i.get_data(), compress_rate=compress_rate, target_size=target_size\n )\n frame = {\n \"frame_id\": i.frame_id,\n \"timestamp\": f\"{i.timestamp:.5f}\",\n \"image\": toolbox.np2b64str(image)\n }\n image_list.append(frame)\n\n first, last = each_range[0], each_range[-1]\n title = (f\"{label} \"\n f\"区间: {first.frame_id}({first.timestamp:.5f}) - {last.frame_id}({last.timestamp:.5f}) \"\n f\"耗时: {last.timestamp - first.timestamp:.5f} \"\n f\"分类: {first.stage}\")\n thumbnail_list.append({title: image_list})\n\n cost_dict = classifier_result.calc_changing_cost()\n timestamp = toolbox.get_timestamp_str()\n\n extra_dict[\"视频路径\"] = classifier_result.video_path\n extra_dict[\"总计帧数\"] = str(classifier_result.get_length())\n extra_dict[\"每帧间隔\"] = str(classifier_result.get_offset())\n\n def get_template() -> str:\n template_dirs = os.path.join(Constants.NEXA, \"template\")\n template_path = os.path.join(template_dirs, \"template_extra.html\")\n with open(template_path, encoding=constants.CHARSET) as t:\n template_file = t.read()\n return template_file\n\n if framix_template:\n template = Template(framix_template)\n else:\n template = Template(get_template())\n\n template_content = template.render(\n thumbnail_list=thumbnail_list,\n extras=extra_dict,\n background_color=constants.BACKGROUND_COLOR,\n cost_dict=cost_dict,\n timestamp=timestamp,\n version_code=\"1.0.0\"\n )\n\n default_name = f\"{timestamp}.html\"\n if os.path.isdir(proto_path):\n report_path = os.path.join(proto_path, default_name)\n else:\n report_path = proto_path\n\n with open(report_path, \"w\", encoding=constants.CHARSET) as fh:\n fh.write(template_content)\n logger.info(f\"生成单次报告: {os.path.basename(report_path)}\")\n\n return report_path" }, { "identifier": "Record", "path": "nexaflow/skills/record.py", "snippet": "class Record(object):\n\n def __init__(self):\n self.__connection: Optional[Popen] = None\n self.__record_event: threading.Event = threading.Event()\n self.__initial: str = \"scrcpy\"\n\n def start_record(self, video_path: str, serial: str = None) -> None:\n cmd = [\n self.__initial, \"--no-audio\", \"--video-bit-rate\", \"8M\", \"--max-fps\", \"60\", \"-Nr\",\n f\"{os.path.join(video_path, 'screen')}.mkv\"\n ]\n if serial:\n cmd.insert(1, \"-s\")\n cmd.insert(2, serial)\n self.__connection = Terminal.cmd_connect(cmd)\n\n def stream(flow: Union[int, IO[str]]) -> None:\n for line in iter(flow.readline, \"\"):\n logger.info(\" \".join(line.strip().split()))\n flow.close()\n\n if self.__connection:\n self.__record_event.set()\n threading.Thread(target=stream, args=(self.__connection.stdout, )).start()\n threading.Thread(target=stream, args=(self.__connection.stderr, )).start()\n time.sleep(1)\n\n def stop_record(self) -> None:\n self.__connection.send_signal(signal.CTRL_C_EVENT)\n self.__record_event.clear()\n self.__connection = None\n\n try:\n Terminal.cmd_oneshot([\"taskkill\", \"/im\", \"scrcpy.exe\"])\n except KeyboardInterrupt:\n logger.info(\"Stop with Ctrl_C_Event ...\")" }, { "identifier": "Player", "path": "nexaflow/skills/player.py", "snippet": "class Player(object):\n\n def __init__(self):\n pygame.mixer.init()\n\n @staticmethod\n def load_all_audio(audio_dirs: str) -> List[Tuple[str, str]]:\n audio_list = []\n for audio_file in os.listdir(audio_dirs):\n if \".mp3\" in audio_file or \".wav\" in audio_file:\n if match := re.search(r\".*?(?=\\.)\", audio_file):\n audio_list.append(\n (match.group(), os.path.join(audio_dirs, audio_file))\n )\n return audio_list\n\n @staticmethod\n def load_audio(audio_dirs: str, audio_name: str) -> Tuple[str, str]:\n query, audio = \"\", \"\"\n for audio_file in os.listdir(audio_dirs):\n if audio_name in audio_file:\n if match := re.search(r\".*?(?=\\.)\", audio_file):\n query = match.group()\n audio = os.path.join(audio_dirs, audio_file)\n return query, audio\n\n @staticmethod\n def play_audio(audio_file: str, volume: float = 1.0):\n if os.path.isfile(audio_file):\n pygame.mixer.music.load(audio_file)\n pygame.mixer.music.set_volume(volume)\n pygame.mixer.music.play()\n logger.info(f\"INFO: Playing audio {audio_file}\")\n while pygame.mixer.music.get_busy():\n pygame.time.Clock().tick(10)\n else:\n logger.error(f\"{audio_file} 不是一个音频文件 ...\")" }, { "identifier": "Switch", "path": "nexaflow/skills/switch.py", "snippet": "class Switch(object):\n\n def __init__(self):\n self.__ffmpeg = \"ffmpeg\"\n self.__ffprobe = \"ffprobe\"\n\n async def audio_reform(self, src: str, dst: str) -> None:\n \"\"\"\n 调整mp3编码格式为标准mp3\n :param src: 原音频路径\n :param dst: 新音频路径\n \"\"\"\n cmd = [self.__ffmpeg, \"-i\", src, \"-ar\", \"44100\", \"-b:a\", \"128k\", dst]\n await Terminal.cmd_line(*cmd)\n\n async def video_reform(self, src: str, dst: str) -> None:\n \"\"\"\n 转换视频格式\n :param src: 原始视频路径\n :param dst: 新视频路径\n \"\"\"\n cmd = [self.__ffmpeg, \"-i\", src, \"-r\", \"60\", dst]\n await Terminal.cmd_line(*cmd)\n\n async def video_change(self, src: str, dst: str) -> None:\n \"\"\"\n 调整视频\n :param src: 原视频路径\n :param dst: 新视频路径\n \"\"\"\n cmd = [\n self.__ffmpeg, \"-i\", src, \"-vf\", \"fps=60\", \"-c:v\",\n \"libx264\", \"-crf\", \"18\", \"-c:a\", \"copy\", dst\n ]\n await Terminal.cmd_line(*cmd)\n\n async def video_tailor(self, src: str, dst: str, start: str = \"00:00:00\", end: str = \"00:00:05\") -> None:\n \"\"\"\n 截取视频\n :param src: 原视频路径\n :param dst: 新视频路径\n :param start: 开始\n :param end: 结束\n \"\"\"\n before = os.path.basename(src).split(\".\")[0]\n after = os.path.basename(src).split(\".\")[-1]\n target = os.path.join(\n dst,\n f\"{before}_{time.strftime('%Y%m%d%H%M%S')}_{random.randint(100, 999)}.{after}\"\n )\n cmd = [self.__ffmpeg, \"-i\", src, \"-ss\", start, \"-t\", end, \"-c\", \"copy\", target]\n await Terminal.cmd_line(*cmd)\n\n async def video_cutter(self, src: str, dst: str, start: str = \"00:00:00\", end: str = \"00:00:05\") -> None:\n \"\"\"\n 流式截取视频\n :param src: 原视频路径\n :param dst: 新视频路径\n :param start: 开始\n :param end: 结束\n \"\"\"\n before = os.path.basename(src).split(\".\")[0]\n after = os.path.basename(src).split(\".\")[-1]\n target = os.path.join(\n dst,\n f\"{before}_{time.strftime('%Y%m%d%H%M%S')}_{random.randint(100, 999)}.{after}\"\n )\n cmd = [\n self.__ffmpeg, \"-i\", src, \"-ss\", start, \"-t\", end, \"-vf\", \"fps=60\",\n \"-c:v\", \"libx264\", \"-crf\", \"18\", \"-c:a\", \"copy\", target\n ]\n await Terminal.cmd_line(*cmd)\n\n async def video_length(self, src: str) -> float:\n \"\"\"\n 查看视频的时间长度\n :param src: 原视频路径\n :return: 视频时间长度\n \"\"\"\n cmd = [\n self.__ffprobe, \"-v\", \"error\", \"-show_entries\", \"format=duration\",\n \"-of\", \"default=noprint_wrappers=1:nokey=1\", \"-i\", src\n ]\n result = await Terminal.cmd_line(*cmd)\n return float(result.strip())" }, { "identifier": "VideoCutter", "path": "nexaflow/cutter/cutter.py", "snippet": "class VideoCutter(object):\n\n def __init__(\n self,\n step: int = None,\n compress_rate: float = None,\n target_size: typing.Tuple[int, int] = None,\n ):\n\n self.step = step or 1\n\n if (not compress_rate) and (not target_size):\n # logger.debug(\n # f\"no compress rate or target size received. set compress rate to 0.2\"\n # )\n compress_rate = 0.2\n\n self._hook_list: typing.List[BaseHook] = list()\n compress_hook = CompressHook(\n overwrite=True, compress_rate=compress_rate, target_size=target_size\n )\n grey_hook = GreyHook(overwrite=True)\n self.add_hook(compress_hook)\n self.add_hook(grey_hook)\n\n def add_hook(self, new_hook: BaseHook):\n self._hook_list.append(new_hook)\n # logger.debug(f\"add hook: {new_hook.__class__.__name__}\")\n\n @staticmethod\n def pic_split(origin: np.ndarray, block: int) -> typing.List[np.ndarray]:\n result: typing.List[np.ndarray] = list()\n for each_block in np.array_split(origin, block, axis=0):\n sub_block = np.array_split(each_block, block, axis=1)\n result += sub_block\n return result\n\n def _apply_hook(self, frame: VideoFrame, *args, **kwargs) -> VideoFrame:\n for each_hook in self._hook_list:\n frame = each_hook.do(frame, *args, **kwargs)\n return frame\n\n @staticmethod\n def compare_frame_list(\n src: typing.List[np.ndarray], target: typing.List[np.ndarray]\n ) -> typing.List[float]:\n\n ssim = 1.0\n mse = 0.0\n psnr = 0.0\n\n for part_index, (each_start, each_end) in enumerate(zip(src, target)):\n part_ssim = toolbox.compare_ssim(each_start, each_end)\n if part_ssim < ssim:\n ssim = part_ssim\n\n part_mse = toolbox.calc_mse(each_start, each_end)\n if part_mse > mse:\n mse = part_mse\n\n part_psnr = toolbox.calc_psnr(each_start, each_end)\n if part_psnr > psnr:\n psnr = part_psnr\n # logger.debug(\n # f\"part {part_index}: ssim={part_ssim}; mse={part_mse}; psnr={part_psnr}\"\n # )\n return [ssim, mse, psnr]\n\n @staticmethod\n def split_into_parts(value: int, parts: int) -> List[Tuple[int, int, int]]:\n division, remainder = value // parts, value % parts\n result, current_start = [], 1\n\n for i in range(parts):\n current_end = current_start + division - 1\n if i == parts - 1: # 处理最后一部分,加上余数\n current_end += remainder\n result.append((current_start, current_end, current_end - current_start))\n\n if i < parts - 1: # 不是最后一部分时,添加断开部分\n gap_start = current_end\n gap_end = current_end + 1\n result.append((gap_start, gap_end, gap_end - gap_start))\n current_start = current_end + 1\n\n return result\n\n def handler_frames(self, window: Window) -> typing.List[VideoCutRange]:\n range_list_part = []\n\n def technique():\n frame_list = window.load_data()\n frame_list = [self._apply_hook(each) for each in frame_list]\n\n ssim_list, mse_list, psnr_list = [], [], []\n\n cur_frame = frame_list[0]\n first_target_frame = frame_list[1]\n cur_frame_list = self.pic_split(cur_frame.data, window.block)\n for each in frame_list[1:]:\n each_frame_list = self.pic_split(each.data, window.block)\n ssim, mse, psnr = self.compare_frame_list(\n cur_frame_list, each_frame_list\n )\n ssim_list.append(ssim)\n mse_list.append(mse)\n psnr_list.append(psnr)\n\n ssim = window.float_merge(ssim_list)\n mse = window.float_merge(mse_list)\n psnr = window.float_merge(psnr_list)\n\n range_list_part.append(\n VideoCutRange(\n window.video,\n start=cur_frame.frame_id, end=first_target_frame.frame_id,\n ssim=[ssim], mse=[mse], psnr=[psnr],\n start_time=cur_frame.timestamp, end_time=first_target_frame.timestamp,\n )\n )\n\n pbar = toolbox.show_progress(window.frame_total, 174, \"Cutter\")\n while True:\n technique()\n pbar.update(1)\n\n continue_flag = window.shift()\n if not continue_flag:\n pbar.close()\n break\n\n return range_list_part\n\n def _convert_video_into_range_list(\n self, video: VideoObject, block: int, window_size: int, window_coefficient: int\n ) -> typing.List[VideoCutRange]:\n\n step = self.step\n video_length = video.frame_count\n range_list: typing.List[VideoCutRange] = list()\n logger.info(f\"总帧数: {video_length} 片段数: {video_length - 1} 分辨率: {video.frame_size}\")\n\n window_list: List[\"Window\"] = []\n for index, parts in enumerate(self.split_into_parts(video_length, 2)):\n start, end, size = parts\n logger.info(f\"帧片段: {index + 1:02} Start: {start:03} End: {end:03} Length: {size:03}\")\n window = Window(video, step, block, window_size, window_coefficient, start, end, size)\n window_list.append(window)\n\n with ThreadPoolExecutor() as executor:\n futures = [executor.submit(self.handler_frames, w) for w in window_list]\n for future in futures:\n range_list.extend(future.result())\n\n return range_list\n\n def cut(\n self,\n video: typing.Union[str, VideoObject],\n block: int = None,\n window_size: int = None,\n window_coefficient: int = None,\n *_,\n **kwargs,\n ) -> VideoCutResult:\n\n if not block:\n block = 3\n if not window_size:\n window_size = 1\n if not window_coefficient:\n window_coefficient = 2\n\n start_time = time.time()\n if isinstance(video, str):\n video = VideoObject(video)\n\n logger.info(f\"开始压缩视频: {os.path.basename(video.path)}\")\n range_list = self._convert_video_into_range_list(\n video, block, window_size, window_coefficient\n )\n logger.info(f\"视频压缩完成: {os.path.basename(video.path)}\")\n logger.info(f\"视频压缩耗时: {(time.time() - start_time):.2f}秒\")\n\n return VideoCutResult(video, range_list, cut_kwargs=kwargs)" }, { "identifier": "VideoObject", "path": "nexaflow/video.py", "snippet": "class VideoObject(object):\n\n def __init__(\n self,\n path: typing.Union[str, os.PathLike],\n fps: int = None,\n ):\n \"\"\"\n 初始化,检查文件路径是否有效,执行其他一些初始化操作\n \"\"\"\n assert os.path.isfile(path), f\"video {path} not existed\"\n self.path: str = str(path)\n self.grey_data: typing.Optional[typing.Tuple[\"VideoFrame\"]] = tuple() # 灰度帧\n self.hued_data: typing.Optional[typing.Tuple[\"ColorFrame\"]] = tuple() # 彩色帧\n\n if fps:\n video_path = os.path.join(tempfile.mkdtemp(), f\"tmp_{fps}.mp4\")\n logger.debug(f\"convert video, and bind path to {video_path}\")\n logger.info(f\"转换视频: {video_path}\")\n toolbox.fps_convert(\n fps, self.path, video_path, imageio_ffmpeg.get_ffmpeg_exe()\n )\n self.path = video_path\n\n with toolbox.video_capture(self.path) as cap:\n self.frame_count = toolbox.get_frame_count(cap)\n self.frame_size = toolbox.get_frame_size(cap)\n\n logger.info(f\"视频已生成,视频帧长度: {self.frame_count} 分辨率: {self.frame_size}\")\n\n def __str__(self):\n return f\"<VideoObject path={self.path}>\"\n\n __repr__ = __str__\n\n def sync_timestamp(self, frame_data: tuple[VideoFrame]) -> None:\n assert frame_data, \"load_frames() first\"\n vid = mpy.VideoFileClip(self.path)\n\n vid_count = vid.reader.nframes\n pbar = toolbox.show_progress(vid_count, 153, \"Synzer\")\n for frame_id, (timestamp, _) in enumerate(vid.iter_frames(with_times=True)):\n if frame_id >= len(frame_data):\n break\n # frame_id_real = frame_id + 1\n if not frame_data[frame_id].timestamp:\n # logger.debug(f\"fix frame {frame_id_real}'s timestamp: {timestamp}\")\n frame_data[frame_id].timestamp = timestamp\n pbar.update(1)\n pbar.close()\n\n def sync_backstage(self, frame_data: tuple[ColorFrame]) -> None:\n assert frame_data, \"load_frames() first\"\n vid = mpy.VideoFileClip(self.path)\n\n for frame_id, (timestamp, _) in enumerate(vid.iter_frames(with_times=True)):\n if frame_id >= len(frame_data):\n break\n # frame_id_real = frame_id + 1\n if not frame_data[frame_id].timestamp:\n # logger.debug(f\"fix frame {frame_id_real}'s timestamp: {timestamp}\")\n frame_data[frame_id].timestamp = timestamp\n\n def clean_frames(self):\n \"\"\"\n 清除所有帧数据\n \"\"\"\n self.grey_data = tuple()\n self.hued_data = tuple()\n\n @staticmethod\n def frame_details(frame_type):\n each_cost = frame_type[0].data.nbytes / (1024 ** 2)\n total_cost = each_cost * len(frame_type)\n frame_size = frame_type[0].data.shape[::-1]\n return f\"{frame_type[0].__class__.__name__}: [{each_cost:.2f} MB] [{total_cost:.2f} MB] {frame_size}\"\n\n def load_frames(self, color: bool = False):\n \"\"\"\n 从文件中加载所有帧到内存\n \"\"\"\n logger.info(f\"加载视频帧到内存: {os.path.basename(self.path)}\")\n\n def load_stream(frames: type[VideoFrame]):\n pbar = toolbox.show_progress(self.frame_count, 180, \"Loader\")\n data: list[VideoFrame] = []\n with toolbox.video_capture(self.path) as cap:\n for success, frame in iter(lambda: cap.read(), (False, None)):\n if success:\n data.append(frames.initial(cap, frame))\n pbar.update(1)\n pbar.close()\n return data\n\n def back_ground(frames: type[ColorFrame]):\n data: list[ColorFrame] = []\n with toolbox.video_capture(self.path) as cap:\n for success, frame in iter(lambda: cap.read(), (False, None)):\n if success:\n data.append(frames.initial(cap, frame))\n return data\n\n def load_stream_sync(brand):\n self.sync_timestamp(tuple(frame_data := load_stream(brand)))\n return frame_data\n\n def back_ground_sync(brand):\n self.sync_backstage(tuple(frame_data := back_ground(brand)))\n return frame_data\n\n start_time, task, hued = time.time(), None, None\n if color:\n task = ThreadPoolExecutor()\n hued = task.submit(back_ground_sync, ColorFrame)\n\n grey = load_stream_sync(VideoFrame)\n self.grey_data = tuple(grey)\n logger.info(f\"灰度帧已加载: {self.frame_details(self.grey_data)}\")\n logger.info(f\"视频加载耗时: {time.time() - start_time:.2f} 秒\")\n return task, hued\n\n def _read_from_file(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 从文件中读取帧\n \"\"\"\n with toolbox.video_capture(self.path) as cap:\n success, frame = cap.read()\n while success:\n yield VideoFrame.initial(cap, frame)\n success, frame = cap.read()\n\n def _read_from_mem(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 从内存中读取帧\n \"\"\"\n for each_frame in self.grey_data:\n yield each_frame\n\n def _read(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 选择从文件还是从内存中读取帧\n \"\"\"\n if self.grey_data:\n yield from self._read_from_mem()\n else:\n yield from self._read_from_file()\n\n def get_iterator(self) -> typing.Generator[\"VideoFrame\", None, None]:\n \"\"\"\n 获取帧的迭代器\n \"\"\"\n return self._read()\n\n def get_operator(self) -> _BaseFrameOperator:\n \"\"\"\n 根据是否已经加载帧,返回相应的FrameOperator(`MemFrameOperator`或`FileFrameOperator`)\n \"\"\"\n if self.grey_data:\n return MemFrameOperator(self)\n return FileFrameOperator(self)\n\n def __iter__(self):\n \"\"\"\n 返回一个用于迭代帧的迭代器\n \"\"\"\n return self.get_iterator()" }, { "identifier": "Frame", "path": "nexaflow/video.py", "snippet": "class Frame(object):\n\n def __init__(self, frame_id: int, timestamp: float, data: np.ndarray):\n self.frame_id: int = frame_id\n self.timestamp: float = timestamp\n self.data: np.ndarray = data\n\n @staticmethod\n def initial(cap: cv2.VideoCapture, frame: np.ndarray) -> \"Frame\":\n raise NotImplementedError\n\n def copy(self) -> \"Frame\":\n raise NotImplementedError" }, { "identifier": "KerasClassifier", "path": "nexaflow/classifier/keras_classifier.py", "snippet": "class KerasClassifier(BaseModelClassifier):\n\n UNKNOWN_STAGE_NAME = constants.UNKNOWN_STAGE_FLAG\n MODEL_DENSE = 6\n\n def __init__(\n self,\n score_threshold: float = None,\n data_size: typing.Sequence[int] = None,\n nb_train_samples: int = None,\n nb_validation_samples: int = None,\n epochs: int = None,\n batch_size: int = None,\n *_,\n **__,\n ):\n super(KerasClassifier, self).__init__(*_, **__)\n\n # 模型\n self._model: typing.Optional[keras.Sequential] = None\n # 配置\n self.score_threshold: float = score_threshold or 0.0\n self.data_size: typing.Sequence[int] = data_size or (200, 200)\n self.nb_train_samples: int = nb_train_samples or 64\n self.nb_validation_samples: int = nb_validation_samples or 64\n self.epochs: int = epochs or 20\n self.batch_size: int = batch_size or 4\n\n # logger.debug(f\"score threshold: {self.score_threshold}\")\n # logger.debug(f\"data size: {self.data_size}\")\n # logger.debug(f\"nb train samples: {self.nb_train_samples}\")\n # logger.debug(f\"nb validation samples: {self.nb_validation_samples}\")\n # logger.debug(f\"epochs: {self.epochs}\")\n # logger.debug(f\"batch size: {self.batch_size}\")\n\n @property\n def follow_keras_size(self):\n return self.data_size[1], self.data_size[0]\n\n @property\n def follow_cv_size(self):\n return self.data_size[0], self.data_size[1]\n\n def clean_model(self):\n self._model = None\n\n def save_model(self, model_path: str, overwrite: bool = None):\n logger.debug(f\"save model to {model_path}\")\n # assert model file\n if os.path.isfile(model_path) and not overwrite:\n raise FileExistsError(\n f\"model file {model_path} already existed, you can set `overwrite` True to cover it\"\n )\n # assert model data is not empty\n assert self._model, \"model is empty\"\n print(self._model.summary())\n self._model.save_weights(model_path)\n\n def load_model(self, model_path: str, overwrite: bool = None):\n # logger.debug(f\"load model from {model_path}\")\n logger.info(f\"加载Keras神经网络引擎 ...\")\n # assert model file\n assert os.path.isfile(model_path), f\"model file {model_path} not existed\"\n # assert model data is empty\n if self._model and not overwrite:\n raise RuntimeError(\n f\"model is not empty, you can set `overwrite` True to cover it\"\n )\n self._model = self.create_model()\n self._model.load_weights(model_path)\n\n def create_model(self) -> keras.Sequential:\n # logger.info(f\"creating Keras sequential model\")\n logger.info(\"Keras神经网络引擎创建图像分析模型 ...\")\n if keras.backend.image_data_format() == \"channels_first\":\n input_shape = (1, *self.follow_keras_size)\n else:\n input_shape = (*self.follow_keras_size, 1)\n\n model = keras.Sequential()\n\n model.add(keras.layers.Conv2D(32, (3, 3), padding=\"same\", input_shape=input_shape))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n\n model.add(keras.layers.Conv2D(64, (3, 3), padding=\"same\"))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n\n model.add(keras.layers.Conv2D(128, (3, 3), padding=\"same\"))\n model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))\n model.add(keras.layers.Dropout(0.25))\n\n model.add(keras.layers.Flatten())\n model.add(keras.layers.Dense(256, activation=\"relu\"))\n model.add(keras.layers.Dropout(0.5))\n model.add(keras.layers.Dense(self.MODEL_DENSE, activation=\"softmax\"))\n\n model.compile(optimizer=\"adam\", loss=\"sparse_categorical_crossentropy\", metrics=[\"accuracy\"])\n\n # logger.info(\"Keras model created\")\n logger.info(\"Keras神经网络引擎加载完成,开始分析图像 ...\")\n return model\n\n def train(self, data_path: str = None, *_, **__):\n\n def _data_verify(p: str):\n p = pathlib.Path(p)\n assert p.is_dir(), f\"{p} is not a valid directory\"\n\n number_of_dir = len([each for each in os.listdir(p) if (p / each).is_dir()])\n assert (\n number_of_dir > 1\n ), f\"dataset only contains one class. maybe some path errors happened: {p}?\"\n\n assert number_of_dir <= self.MODEL_DENSE, (\n f\"dataset has {number_of_dir} classes (more than \" + str(self.MODEL_DENSE) + \")\"\n )\n\n _data_verify(data_path)\n\n if not self._model:\n self._model = self.create_model()\n\n datagen = keras.preprocessing.image.ImageDataGenerator(\n rescale=1.0 / 16,\n shear_range=0.2,\n zoom_range=0.2,\n validation_split=0.33,\n horizontal_flip=True # 水平翻转增强\n )\n\n train_generator = datagen.flow_from_directory(\n data_path,\n target_size=self.follow_keras_size,\n batch_size=self.batch_size,\n color_mode=\"grayscale\",\n class_mode=\"sparse\",\n subset=\"training\",\n )\n\n validation_generator = datagen.flow_from_directory(\n data_path,\n target_size=self.follow_keras_size,\n batch_size=self.batch_size,\n color_mode=\"grayscale\",\n class_mode=\"sparse\",\n subset=\"validation\",\n )\n\n self._model.fit(\n train_generator,\n epochs=self.epochs,\n validation_data=validation_generator,\n )\n\n logger.debug(\"train finished\")\n\n def predict(self, pic_path: str, *args, **kwargs) -> str:\n pic_object = toolbox.imread(pic_path)\n # fake VideoFrame for apply_hook\n fake_frame = VideoFrame(0, 0.0, pic_object)\n fake_frame = self._apply_hook(fake_frame, *args, **kwargs)\n return self.predict_with_object(fake_frame.data)\n\n def predict_with_object(self, frame: np.ndarray) -> str:\n # resize for model\n frame = cv2.resize(frame, dsize=self.follow_cv_size)\n frame = np.expand_dims(frame, axis=[0, -1])\n # verbose = 0, 静默Keras分类显示\n result = self._model.predict(frame, verbose=0)\n tag = str(np.argmax(result, axis=1)[0])\n confidence = result.max()\n # logger.debug(f\"confidence: {confidence}\")\n if confidence < self.score_threshold:\n logger.warning(\n f\"max score is lower than {self.score_threshold}, unknown class\"\n )\n return self.UNKNOWN_STAGE_NAME\n return tag\n\n def _classify_frame(self, frame: VideoFrame, *_, **__) -> str:\n return self.predict_with_object(frame.data)" }, { "identifier": "BaseHook", "path": "nexaflow/hook.py", "snippet": "class BaseHook(object):\n\n def __init__(self, *_, **__):\n # logger.debug(f\"start initialing: {self.__class__.__name__} ...\")\n logger.info(f\"加载视频帧处理单元: Frame Processor {self.__class__.__name__} ...\")\n self.result = dict()\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n # info = f\"execute hook: {self.__class__.__name__}\"\n\n frame_id = frame.frame_id\n if frame_id != -1:\n # logger.debug(f\"{info}, frame id: {frame_id}\")\n pass\n return frame" }, { "identifier": "CropHook", "path": "nexaflow/hook.py", "snippet": "class CropHook(_AreaBaseHook):\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n\n height_range, width_range = self.convert_size_and_offset(*frame.data.shape)\n frame.data[: height_range[0], :] = 0\n frame.data[height_range[1]:, :] = 0\n frame.data[:, : width_range[0]] = 0\n frame.data[:, width_range[1]:] = 0\n return frame" }, { "identifier": "OmitHook", "path": "nexaflow/hook.py", "snippet": "class OmitHook(_AreaBaseHook):\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n\n height_range, width_range = self.convert_size_and_offset(*frame.data.shape)\n frame.data[\n height_range[0]: height_range[1], width_range[0]: width_range[1]\n ] = 0\n return frame" }, { "identifier": "FrameSaveHook", "path": "nexaflow/hook.py", "snippet": "class FrameSaveHook(BaseHook):\n\n def __init__(self, target_dir: str, *_, **__):\n super().__init__(*_, **__)\n\n self.target_dir = target_dir\n os.makedirs(target_dir, exist_ok=True)\n # logger.debug(f\"target dir: {target_dir}\")\n\n def do(self, frame: VideoFrame, *_, **__) -> typing.Optional[VideoFrame]:\n super().do(frame, *_, **__)\n\n safe_timestamp = str(frame.timestamp).replace(\".\", \"_\")\n frame_name = f\"{frame.frame_id}({safe_timestamp}).png\"\n target_path = os.path.join(self.target_dir, frame_name)\n\n # 不能保存中文路径\n # cv2.imwrite(target_path, frame.data)\n # logger.debug(f\"frame saved to {target_path}\")\n\n # 保存中文路径\n cv2.imencode(\".png\", frame.data)[1].tofile(target_path)\n\n return frame" }, { "identifier": "ClassifierResult", "path": "nexaflow/classifier/base.py", "snippet": "class ClassifierResult(object):\n\n LABEL_DATA: str = \"data\"\n LABEL_VIDEO_PATH: str = \"video_path\"\n\n def __init__(self, data: typing.List[SingleClassifierResult]):\n self.video_path: str = data[0].video_path\n self.data: typing.List[SingleClassifierResult] = data\n\n def get_timestamp_list(self) -> typing.List[float]:\n return [each.timestamp for each in self.data]\n\n def get_stage_list(self) -> typing.List[str]:\n return [each.stage for each in self.data]\n\n def get_length(self) -> int:\n return len(self.data)\n\n def get_offset(self) -> float:\n return self.data[1].timestamp - self.data[0].timestamp\n\n def get_ordered_stage_set(self) -> typing.List[str]:\n ret = list()\n for each in self.get_stage_list():\n if not ret:\n ret.append(each)\n continue\n if each == ret[-1]:\n continue\n ret.append(each)\n return ret\n\n def get_stage_set(self) -> typing.Set[str]:\n return set(self.get_stage_list())\n\n def to_dict(\n self,\n ) -> typing.Dict[str, typing.List[typing.List[SingleClassifierResult]]]:\n stage_list = list(self.get_stage_set())\n try:\n int(stage_list[0])\n except ValueError:\n stage_list.sort()\n else:\n stage_list.sort(key=lambda o: int(o))\n\n d = OrderedDict()\n for each_stage in stage_list:\n d[each_stage] = self.get_specific_stage_range(each_stage)\n return d\n\n def contain(self, stage_name: str) -> bool:\n return stage_name in self.get_stage_set()\n\n def first(self, stage_name: str) -> SingleClassifierResult:\n for each in self.data:\n if each.stage == stage_name:\n # logger.debug(f\"first frame of {stage_name}: {each}\")\n return each\n logger.warning(f\"no stage named {stage_name} found\")\n\n def last(self, stage_name: str) -> SingleClassifierResult:\n for each in self.data[::-1]:\n if each.stage == stage_name:\n # logger.debug(f\"last frame of {stage_name}: {each}\")\n return each\n logger.warning(f\"no stage named {stage_name} found\")\n\n def get_stage_range(self) -> typing.List[typing.List[SingleClassifierResult]]:\n result: typing.List[typing.List[SingleClassifierResult]] = []\n\n cur = self.data[0]\n cur_index = cur.frame_id - 1\n ptr = cur_index\n length = self.get_length()\n while ptr < length:\n next_one = self.data[ptr]\n if cur.stage == next_one.stage:\n ptr += 1\n continue\n\n result.append(self.data[cur_index: ptr + 1 - 1] or [self.data[cur_index]])\n cur = next_one\n cur_index = next_one.frame_id - 1\n\n assert len(result) > 0, \"video seems to only contain one stage\"\n\n last_data = self.data[-1]\n last_result = result[-1][-1]\n if last_result != last_data:\n result.append(\n self.data[last_result.frame_id - 1 + 1: last_data.frame_id - 1 + 1]\n or [self.data[last_result.frame_id - 1]]\n )\n # logger.debug(f\"get stage range: {result}\")\n return result\n\n def get_specific_stage_range(\n self, stage_name: str\n ) -> typing.List[typing.List[SingleClassifierResult]]:\n ret = list()\n for each_range in self.get_stage_range():\n cur = each_range[0]\n if cur.stage == stage_name:\n ret.append(each_range)\n return ret\n\n def get_not_stable_stage_range(\n self,\n ) -> typing.List[typing.List[SingleClassifierResult]]:\n unstable = self.get_specific_stage_range(constants.UNSTABLE_FLAG)\n ignore = self.get_specific_stage_range(constants.IGNORE_FLAG)\n return sorted(unstable + ignore, key=lambda x: x[0].stage)\n\n def mark_range(self, start: int, end: int, target_stage: str):\n for each in self.data[start:end]:\n each.stage = target_stage\n # logger.debug(f\"range {start} to {end} has been marked as {target_stage}\")\n\n def mark_range_unstable(self, start: int, end: int):\n self.mark_range(start, end, constants.UNSTABLE_FLAG)\n\n def mark_range_ignore(self, start: int, end: int):\n self.mark_range(start, end, constants.IGNORE_FLAG)\n\n def time_cost_between(self, start_stage: str, end_stage: str) -> float:\n return self.first(end_stage).timestamp - self.last(start_stage).timestamp\n\n def get_important_frame_list(self) -> typing.List[SingleClassifierResult]:\n result = [self.data[0]]\n\n prev = self.data[0]\n for cur in self.data[1:]:\n if cur.stage != prev.stage:\n result.append(prev)\n result.append(cur)\n prev = cur\n\n if result[-1] != self.data[-1]:\n result.append(self.data[-1])\n return result\n\n def calc_changing_cost(\n self,\n ) -> typing.Dict[str, typing.Tuple[SingleClassifierResult, SingleClassifierResult]]:\n\n cost_dict: typing.Dict[\n str, typing.Tuple[SingleClassifierResult, SingleClassifierResult]\n ] = {}\n i = 0\n while i < len(self.data) - 1:\n cur = self.data[i]\n next_one = self.data[i + 1]\n\n if not next_one.is_stable():\n for j in range(i + 1, len(self.data)):\n i = j\n next_one = self.data[j]\n if next_one.is_stable():\n break\n\n changing_name = f\"from {cur.stage} to {next_one.stage}\"\n cost_dict[changing_name] = (cur, next_one)\n else:\n i += 1\n return cost_dict\n\n def dumps(self) -> str:\n\n def _handler(obj: object):\n if isinstance(obj, np.ndarray):\n return \"<np.ndarray object>\"\n return obj.__dict__\n\n return json.dumps(self, sort_keys=True, default=_handler)\n\n def dump(self, json_path: str, **kwargs):\n logger.debug(f\"dump result to {json_path}\")\n assert not os.path.isfile(json_path), f\"{json_path} already existed\"\n with open(json_path, \"w+\", **kwargs) as f:\n f.write(self.dumps())\n\n @classmethod\n def load(cls, from_file: str) -> \"ClassifierResult\":\n assert os.path.isfile(from_file), f\"file {from_file} not existed\"\n with open(from_file, encoding=constants.CHARSET) as f:\n content = json.load(f)\n\n data = content[cls.LABEL_DATA]\n return ClassifierResult([SingleClassifierResult(**each) for each in data])\n\n def diff(self, another: \"ClassifierResult\") -> DiffResult:\n return DiffResult(self, another)\n\n def is_order_correct(self, should_be: typing.List[str]) -> bool:\n cur = self.get_ordered_stage_set()\n len_cur, len_should_be = len(cur), len(should_be)\n if len_cur == len_should_be:\n return cur == should_be\n if len_cur < len_should_be:\n return False\n\n ptr_should, ptr_cur = 0, 0\n while ptr_cur < len_cur:\n if cur[ptr_cur] == should_be[ptr_should]:\n ptr_should += 1\n ptr_cur += 1\n if ptr_should == len_should_be:\n return True\n return False\n\n get_frame_length = get_offset" }, { "identifier": "SingleClassifierResult", "path": "nexaflow/classifier/base.py", "snippet": "class SingleClassifierResult(object):\n\n def __init__(\n self,\n video_path: str,\n frame_id: int,\n timestamp: float,\n stage: str,\n data: np.ndarray = None,\n ):\n self.video_path: str = video_path\n self.frame_id: int = frame_id\n self.timestamp: float = timestamp\n self.stage: str = stage\n self.data: np.ndarray = data\n\n def to_video_frame(self, *args, **kwargs) -> VideoFrame:\n if self.data is not None:\n return VideoFrame(self.frame_id, self.timestamp, self.data)\n\n with toolbox.video_capture(self.video_path) as cap:\n frame = toolbox.get_frame(cap, self.frame_id)\n compressed = toolbox.compress_frame(frame, *args, **kwargs)\n return VideoFrame(self.frame_id, self.timestamp, compressed)\n\n def get_data(self) -> np.ndarray:\n return self.to_video_frame().data\n\n def is_stable(self) -> bool:\n return self.stage not in (\n constants.UNSTABLE_FLAG,\n constants.IGNORE_FLAG,\n constants.UNKNOWN_STAGE_FLAG,\n )\n\n def contain_image(\n self, *, image_path: str = None, image_object: np.ndarray = None, **kwargs\n ) -> typing.Dict[str, typing.Any]:\n return self.to_video_frame().contain_image(\n image_path=image_path, image_object=image_object, **kwargs\n )\n\n def to_dict(self) -> typing.Dict:\n return self.__dict__\n\n def __str__(self):\n return f\"<ClassifierResult stage={self.stage} frame_id={self.frame_id} timestamp={self.timestamp}>\"\n\n __repr__ = __str__" } ]
import os import cv2 import time import random import asyncio from loguru import logger from typing import List, Union, Optional from concurrent.futures import ThreadPoolExecutor from nexaflow import toolbox from nexaflow.skills.report import Report from nexaflow.skills.record import Record from nexaflow.skills.player import Player from nexaflow.skills.switch import Switch from nexaflow.cutter.cutter import VideoCutter from nexaflow.video import VideoObject, Frame from nexaflow.classifier.keras_classifier import KerasClassifier from nexaflow.hook import BaseHook, CropHook, OmitHook, FrameSaveHook from nexaflow.classifier.base import ClassifierResult, SingleClassifierResult
16,146
class Alynex(object): target_size: tuple = (350, 700) fps: int = 60 step: int = 1 block: int = 6 threshold: Union[int | float] = 0.97 offset: int = 3 compress_rate: float = 0.5 window_size: int = 1 window_coefficient: int = 2 kc: KerasClassifier = KerasClassifier( target_size=target_size, data_size=target_size ) def __init__(self): self.__report: Optional[Report] = None self.__record: Optional[Record] = Record()
class Alynex(object): target_size: tuple = (350, 700) fps: int = 60 step: int = 1 block: int = 6 threshold: Union[int | float] = 0.97 offset: int = 3 compress_rate: float = 0.5 window_size: int = 1 window_coefficient: int = 2 kc: KerasClassifier = KerasClassifier( target_size=target_size, data_size=target_size ) def __init__(self): self.__report: Optional[Report] = None self.__record: Optional[Record] = Record()
self.__player: Optional[Player] = Player()
3
2023-11-13 05:27:34+00:00
24k
deepseek-ai/DreamCraft3D
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n # 4D Gaussian Annealing\n anneal_density_blob_std_config: Optional[dict] = None\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )\n\n # FIXME: use progressive normal eps\n def update_step(\n self, epoch: int, global_step: int, on_load_weights: bool = False\n ) -> None:\n if self.cfg.anneal_density_blob_std_config is not None:\n min_step = self.cfg.anneal_density_blob_std_config.min_anneal_step\n max_step = self.cfg.anneal_density_blob_std_config.max_anneal_step\n if global_step >= min_step and global_step <= max_step:\n end_val = self.cfg.anneal_density_blob_std_config.end_val\n start_val = self.cfg.anneal_density_blob_std_config.start_val\n self.density_blob_std = start_val + (global_step - min_step) * (\n end_val - start_val\n ) / (max_step - min_step)\n\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n elif config.otype == \"HashGridSpatialTime\":\n encoding = TCNNEncodingSpatialTime(n_input_dims, config) # 4D-fy encoding\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import os import numpy as np import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import ( BaseExplicitGeometry, BaseGeometry, contract_to_unisphere, ) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
16,118
mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() instance.isosurface_bbox = mesh.extras["bbox"] instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF):
3
2023-10-23 07:40:20+00:00
24k
microsoft/SoM
task_adapter/semantic_sam/tasks/inference_semsam_m2m_auto.py
[ { "identifier": "Visualizer", "path": "task_adapter/utils/visualizer.py", "snippet": "class Visualizer:\n \"\"\"\n Visualizer that draws data about detection/segmentation on images.\n\n It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}`\n that draw primitive objects to images, as well as high-level wrappers like\n `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}`\n that draw composite data in some pre-defined style.\n\n Note that the exact visualization style for the high-level wrappers are subject to change.\n Style such as color, opacity, label contents, visibility of labels, or even the visibility\n of objects themselves (e.g. when the object is too small) may change according\n to different heuristics, as long as the results still look visually reasonable.\n\n To obtain a consistent style, you can implement custom drawing functions with the\n abovementioned primitive methods instead. If you need more customized visualization\n styles, you can process the data yourself following their format documented in\n tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not\n intend to satisfy everyone's preference on drawing styles.\n\n This visualizer focuses on high rendering quality rather than performance. It is not\n designed to be used for real-time applications.\n \"\"\"\n\n # TODO implement a fast, rasterized version using OpenCV\n\n def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE):\n \"\"\"\n Args:\n img_rgb: a numpy array of shape (H, W, C), where H and W correspond to\n the height and width of the image respectively. C is the number of\n color channels. The image is required to be in RGB format since that\n is a requirement of the Matplotlib library. The image is also expected\n to be in the range [0, 255].\n metadata (Metadata): dataset metadata (e.g. class names and colors)\n instance_mode (ColorMode): defines one of the pre-defined style for drawing\n instances on an image.\n \"\"\"\n self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8)\n if metadata is None:\n metadata = MetadataCatalog.get(\"__nonexist__\")\n self.metadata = metadata\n self.output = VisImage(self.img, scale=scale)\n self.cpu_device = torch.device(\"cpu\")\n\n # too small texts are useless, therefore clamp to 9\n self._default_font_size = max(\n np.sqrt(self.output.height * self.output.width) // 90, 10 // scale\n )\n self._default_font_size = 18\n self._instance_mode = instance_mode\n self.keypoint_threshold = _KEYPOINT_THRESHOLD\n\n import matplotlib.colors as mcolors\n css4_colors = mcolors.CSS4_COLORS\n self.color_proposals = [list(mcolors.hex2color(color)) for color in css4_colors.values()]\n\n def draw_instance_predictions(self, predictions):\n \"\"\"\n Draw instance-level prediction results on an image.\n\n Args:\n predictions (Instances): the output of an instance detection/segmentation\n model. Following fields will be used to draw:\n \"pred_boxes\", \"pred_classes\", \"scores\", \"pred_masks\" (or \"pred_masks_rle\").\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n boxes = predictions.pred_boxes if predictions.has(\"pred_boxes\") else None\n scores = predictions.scores if predictions.has(\"scores\") else None\n classes = predictions.pred_classes.tolist() if predictions.has(\"pred_classes\") else None\n labels = _create_text_labels(classes, scores, self.metadata.get(\"thing_classes\", None))\n keypoints = predictions.pred_keypoints if predictions.has(\"pred_keypoints\") else None\n\n keep = (scores > 0.5).cpu()\n boxes = boxes[keep]\n scores = scores[keep]\n classes = np.array(classes)\n classes = classes[np.array(keep)]\n labels = np.array(labels)\n labels = labels[np.array(keep)]\n\n if predictions.has(\"pred_masks\"):\n masks = np.asarray(predictions.pred_masks)\n masks = masks[np.array(keep)]\n masks = [GenericMask(x, self.output.height, self.output.width) for x in masks]\n else:\n masks = None\n\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n # if self.metadata.get(\"thing_colors\"):\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes\n ]\n alpha = 0.4\n else:\n colors = None\n alpha = 0.4\n\n if self._instance_mode == ColorMode.IMAGE_BW:\n self.output.reset_image(\n self._create_grayscale_image(\n (predictions.pred_masks.any(dim=0) > 0).numpy()\n if predictions.has(\"pred_masks\")\n else None\n )\n )\n alpha = 0.3\n \n self.overlay_instances(\n masks=masks,\n boxes=boxes,\n labels=labels,\n keypoints=keypoints,\n assigned_colors=colors,\n alpha=alpha,\n )\n return self.output\n\n def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.7):\n \"\"\"\n Draw semantic segmentation predictions/labels.\n\n Args:\n sem_seg (Tensor or ndarray): the segmentation of shape (H, W).\n Each value is the integer label of the pixel.\n area_threshold (int): segments with less than `area_threshold` are not drawn.\n alpha (float): the larger it is, the more opaque the segmentations are.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n if isinstance(sem_seg, torch.Tensor):\n sem_seg = sem_seg.numpy()\n labels, areas = np.unique(sem_seg, return_counts=True)\n sorted_idxs = np.argsort(-areas).tolist()\n labels = labels[sorted_idxs]\n for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels):\n try:\n mask_color = [x / 255 for x in self.metadata.stuff_colors[label]]\n except (AttributeError, IndexError):\n mask_color = None\n\n binary_mask = (sem_seg == label).astype(np.uint8)\n text = self.metadata.stuff_classes[label]\n self.draw_binary_mask(\n binary_mask,\n color=mask_color,\n edge_color=_OFF_WHITE,\n text=text,\n alpha=alpha,\n area_threshold=area_threshold,\n )\n return self.output\n\n def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7):\n \"\"\"\n Draw panoptic prediction annotations or results.\n\n Args:\n panoptic_seg (Tensor): of shape (height, width) where the values are ids for each\n segment.\n segments_info (list[dict] or None): Describe each segment in `panoptic_seg`.\n If it is a ``list[dict]``, each dict contains keys \"id\", \"category_id\".\n If None, category id of each pixel is computed by\n ``pixel // metadata.label_divisor``.\n area_threshold (int): stuff segments with less than `area_threshold` are not drawn.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata)\n\n if self._instance_mode == ColorMode.IMAGE_BW:\n self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask()))\n\n # draw mask for all semantic segments first i.e. \"stuff\"\n for mask, sinfo in pred.semantic_masks():\n category_idx = sinfo[\"category_id\"]\n try:\n mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]]\n except AttributeError:\n mask_color = None\n\n text = self.metadata.stuff_classes[category_idx].replace('-other','').replace('-merged','')\n self.draw_binary_mask(\n mask,\n color=mask_color,\n edge_color=_OFF_WHITE,\n text=text,\n alpha=alpha,\n area_threshold=area_threshold,\n )\n\n # draw mask for all instances second\n all_instances = list(pred.instance_masks())\n if len(all_instances) == 0:\n return self.output\n masks, sinfo = list(zip(*all_instances))\n category_ids = [x[\"category_id\"] for x in sinfo]\n\n try:\n scores = [x[\"score\"] for x in sinfo]\n except KeyError:\n scores = None\n class_names = [name.replace('-other','').replace('-merged','') for name in self.metadata.thing_classes]\n labels = _create_text_labels(\n category_ids, scores, class_names, [x.get(\"iscrowd\", 0) for x in sinfo]\n )\n\n try:\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids\n ]\n except AttributeError:\n colors = None\n self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha)\n\n return self.output\n\n draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility\n\n def draw_dataset_dict(self, dic):\n \"\"\"\n Draw annotations/segmentaions in Detectron2 Dataset format.\n\n Args:\n dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n annos = dic.get(\"annotations\", None)\n if annos:\n if \"segmentation\" in annos[0]:\n masks = [x[\"segmentation\"] for x in annos]\n else:\n masks = None\n if \"keypoints\" in annos[0]:\n keypts = [x[\"keypoints\"] for x in annos]\n keypts = np.array(keypts).reshape(len(annos), -1, 3)\n else:\n keypts = None\n\n boxes = [\n BoxMode.convert(x[\"bbox\"], x[\"bbox_mode\"], BoxMode.XYXY_ABS)\n if len(x[\"bbox\"]) == 4\n else x[\"bbox\"]\n for x in annos\n ]\n\n colors = None\n category_ids = [x[\"category_id\"] for x in annos]\n if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get(\"thing_colors\"):\n colors = [\n self._jitter([x / 255 for x in self.metadata.thing_colors[c]])\n for c in category_ids\n ]\n names = self.metadata.get(\"thing_classes\", None)\n labels = _create_text_labels(\n category_ids,\n scores=None,\n class_names=names,\n is_crowd=[x.get(\"iscrowd\", 0) for x in annos],\n )\n self.overlay_instances(\n labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors\n )\n\n sem_seg = dic.get(\"sem_seg\", None)\n if sem_seg is None and \"sem_seg_file_name\" in dic:\n with PathManager.open(dic[\"sem_seg_file_name\"], \"rb\") as f:\n sem_seg = Image.open(f)\n sem_seg = np.asarray(sem_seg, dtype=\"uint8\")\n if sem_seg is not None:\n self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.4)\n\n pan_seg = dic.get(\"pan_seg\", None)\n if pan_seg is None and \"pan_seg_file_name\" in dic:\n with PathManager.open(dic[\"pan_seg_file_name\"], \"rb\") as f:\n pan_seg = Image.open(f)\n pan_seg = np.asarray(pan_seg)\n from panopticapi.utils import rgb2id\n\n pan_seg = rgb2id(pan_seg)\n if pan_seg is not None:\n segments_info = dic[\"segments_info\"]\n pan_seg = torch.tensor(pan_seg)\n self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.7)\n return self.output\n\n def overlay_instances(\n self,\n *,\n boxes=None,\n labels=None,\n masks=None,\n keypoints=None,\n assigned_colors=None,\n alpha=0.5,\n ):\n \"\"\"\n Args:\n boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`,\n or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image,\n or a :class:`RotatedBoxes`,\n or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format\n for the N objects in a single image,\n labels (list[str]): the text to be displayed for each instance.\n masks (masks-like object): Supported types are:\n\n * :class:`detectron2.structures.PolygonMasks`,\n :class:`detectron2.structures.BitMasks`.\n * list[list[ndarray]]: contains the segmentation masks for all objects in one image.\n The first level of the list corresponds to individual instances. The second\n level to all the polygon that compose the instance, and the third level\n to the polygon coordinates. The third level should have the format of\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\n * list[ndarray]: each ndarray is a binary mask of shape (H, W).\n * list[dict]: each dict is a COCO-style RLE.\n keypoints (Keypoint or array like): an array-like object of shape (N, K, 3),\n where the N is the number of instances and K is the number of keypoints.\n The last dimension corresponds to (x, y, visibility or score).\n assigned_colors (list[matplotlib.colors]): a list of colors, where each color\n corresponds to each mask or box in the image. Refer to 'matplotlib.colors'\n for full list of formats that the colors are accepted in.\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n num_instances = 0\n if boxes is not None:\n boxes = self._convert_boxes(boxes)\n num_instances = len(boxes)\n if masks is not None:\n masks = self._convert_masks(masks)\n if num_instances:\n assert len(masks) == num_instances\n else:\n num_instances = len(masks)\n if keypoints is not None:\n if num_instances:\n assert len(keypoints) == num_instances\n else:\n num_instances = len(keypoints)\n keypoints = self._convert_keypoints(keypoints)\n if labels is not None:\n assert len(labels) == num_instances\n if assigned_colors is None:\n assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]\n if num_instances == 0:\n return self.output\n if boxes is not None and boxes.shape[1] == 5:\n return self.overlay_rotated_instances(\n boxes=boxes, labels=labels, assigned_colors=assigned_colors\n )\n\n # Display in largest to smallest order to reduce occlusion.\n areas = None\n if boxes is not None:\n areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1)\n elif masks is not None:\n areas = np.asarray([x.area() for x in masks])\n\n if areas is not None:\n sorted_idxs = np.argsort(-areas).tolist()\n # Re-order overlapped instances in descending order.\n boxes = boxes[sorted_idxs] if boxes is not None else None\n labels = [labels[k] for k in sorted_idxs] if labels is not None else None\n masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None\n assigned_colors = [assigned_colors[idx] for idx in sorted_idxs]\n keypoints = keypoints[sorted_idxs] if keypoints is not None else None\n\n for i in range(num_instances):\n color = assigned_colors[i]\n if boxes is not None:\n self.draw_box(boxes[i], edge_color=color)\n\n if masks is not None:\n for segment in masks[i].polygons:\n self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha)\n\n if labels is not None:\n # first get a box\n if boxes is not None:\n x0, y0, x1, y1 = boxes[i]\n text_pos = (x0, y0) # if drawing boxes, put text on the box corner.\n horiz_align = \"left\"\n elif masks is not None:\n # skip small mask without polygon\n if len(masks[i].polygons) == 0:\n continue\n\n x0, y0, x1, y1 = masks[i].bbox()\n\n # draw text in the center (defined by median) when box is not drawn\n # median is less sensitive to outliers.\n text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1]\n horiz_align = \"center\"\n else:\n continue # drawing the box confidence for keypoints isn't very useful.\n # for small objects, draw text at the side to avoid occlusion\n instance_area = (y1 - y0) * (x1 - x0)\n if (\n instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale\n or y1 - y0 < 40 * self.output.scale\n ):\n if y1 >= self.output.height - 5:\n text_pos = (x1, y0)\n else:\n text_pos = (x0, y1)\n\n height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width)\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2)\n * 0.5\n * self._default_font_size\n )\n self.draw_text(\n labels[i],\n text_pos,\n color=lighter_color,\n horizontal_alignment=horiz_align,\n font_size=font_size,\n )\n\n # draw keypoints\n if keypoints is not None:\n for keypoints_per_instance in keypoints:\n self.draw_and_connect_keypoints(keypoints_per_instance)\n\n return self.output\n\n def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None):\n \"\"\"\n Args:\n boxes (ndarray): an Nx5 numpy array of\n (x_center, y_center, width, height, angle_degrees) format\n for the N objects in a single image.\n labels (list[str]): the text to be displayed for each instance.\n assigned_colors (list[matplotlib.colors]): a list of colors, where each color\n corresponds to each mask or box in the image. Refer to 'matplotlib.colors'\n for full list of formats that the colors are accepted in.\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n num_instances = len(boxes)\n\n if assigned_colors is None:\n assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)]\n if num_instances == 0:\n return self.output\n\n # Display in largest to smallest order to reduce occlusion.\n if boxes is not None:\n areas = boxes[:, 2] * boxes[:, 3]\n\n sorted_idxs = np.argsort(-areas).tolist()\n # Re-order overlapped instances in descending order.\n boxes = boxes[sorted_idxs]\n labels = [labels[k] for k in sorted_idxs] if labels is not None else None\n colors = [assigned_colors[idx] for idx in sorted_idxs]\n\n for i in range(num_instances):\n self.draw_rotated_box_with_label(\n boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None\n )\n\n return self.output\n\n def draw_and_connect_keypoints(self, keypoints):\n \"\"\"\n Draws keypoints of an instance and follows the rules for keypoint connections\n to draw lines between appropriate keypoints. This follows color heuristics for\n line color.\n\n Args:\n keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints\n and the last dimension corresponds to (x, y, probability).\n\n Returns:\n output (VisImage): image object with visualizations.\n \"\"\"\n visible = {}\n keypoint_names = self.metadata.get(\"keypoint_names\")\n for idx, keypoint in enumerate(keypoints):\n\n # draw keypoint\n x, y, prob = keypoint\n if prob > self.keypoint_threshold:\n self.draw_circle((x, y), color=_RED)\n if keypoint_names:\n keypoint_name = keypoint_names[idx]\n visible[keypoint_name] = (x, y)\n\n if self.metadata.get(\"keypoint_connection_rules\"):\n for kp0, kp1, color in self.metadata.keypoint_connection_rules:\n if kp0 in visible and kp1 in visible:\n x0, y0 = visible[kp0]\n x1, y1 = visible[kp1]\n color = tuple(x / 255.0 for x in color)\n self.draw_line([x0, x1], [y0, y1], color=color)\n\n # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip\n # Note that this strategy is specific to person keypoints.\n # For other keypoints, it should just do nothing\n try:\n ls_x, ls_y = visible[\"left_shoulder\"]\n rs_x, rs_y = visible[\"right_shoulder\"]\n mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2\n except KeyError:\n pass\n else:\n # draw line from nose to mid-shoulder\n nose_x, nose_y = visible.get(\"nose\", (None, None))\n if nose_x is not None:\n self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED)\n\n try:\n # draw line from mid-shoulder to mid-hip\n lh_x, lh_y = visible[\"left_hip\"]\n rh_x, rh_y = visible[\"right_hip\"]\n except KeyError:\n pass\n else:\n mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2\n self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED)\n return self.output\n\n \"\"\"\n Primitive drawing functions:\n \"\"\"\n\n def draw_text(\n self,\n text,\n position,\n *,\n font_size=None,\n color=\"g\",\n horizontal_alignment=\"center\",\n rotation=0,\n ):\n \"\"\"\n Args:\n text (str): class label\n position (tuple): a tuple of the x and y coordinates to place text on image.\n font_size (int, optional): font of the text. If not provided, a font size\n proportional to the image width is calculated and used.\n color: color of the text. Refer to `matplotlib.colors` for full list\n of formats that are accepted.\n horizontal_alignment (str): see `matplotlib.text.Text`\n rotation: rotation angle in degrees CCW\n\n Returns:\n output (VisImage): image object with text drawn.\n \"\"\"\n if not font_size:\n font_size = self._default_font_size\n\n # since the text background is dark, we don't want the text to be dark\n color = np.maximum(list(mplc.to_rgb(color)), 0.15)\n color[np.argmax(color)] = max(0.8, np.max(color))\n\n def contrasting_color(rgb):\n \"\"\"Returns 'white' or 'black' depending on which color contrasts more with the given RGB value.\"\"\"\n \n # Decompose the RGB tuple\n R, G, B = rgb\n\n # Calculate the Y value\n Y = 0.299 * R + 0.587 * G + 0.114 * B\n\n # If Y value is greater than 128, it's closer to white so return black. Otherwise, return white.\n return 'black' if Y > 128 else 'white'\n\n bbox_background = contrasting_color(color*255)\n\n x, y = position\n self.output.ax.text(\n x,\n y,\n text,\n size=font_size * self.output.scale,\n family=\"sans-serif\",\n bbox={\"facecolor\": bbox_background, \"alpha\": 0.8, \"pad\": 0.7, \"edgecolor\": \"none\"},\n verticalalignment=\"top\",\n horizontalalignment=horizontal_alignment,\n color=color,\n zorder=10,\n rotation=rotation,\n )\n return self.output\n\n def draw_box(self, box_coord, alpha=0.5, edge_color=\"g\", line_style=\"-\"):\n \"\"\"\n Args:\n box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0\n are the coordinates of the image's top left corner. x1 and y1 are the\n coordinates of the image's bottom right corner.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n edge_color: color of the outline of the box. Refer to `matplotlib.colors`\n for full list of formats that are accepted.\n line_style (string): the string to use to create the outline of the boxes.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n x0, y0, x1, y1 = box_coord\n width = x1 - x0\n height = y1 - y0\n\n linewidth = max(self._default_font_size / 12, 1)\n\n self.output.ax.add_patch(\n mpl.patches.Rectangle(\n (x0, y0),\n width,\n height,\n fill=False,\n edgecolor=edge_color,\n linewidth=linewidth * self.output.scale,\n alpha=alpha,\n linestyle=line_style,\n )\n )\n return self.output\n\n def draw_rotated_box_with_label(\n self, rotated_box, alpha=0.5, edge_color=\"g\", line_style=\"-\", label=None\n ):\n \"\"\"\n Draw a rotated box with label on its top-left corner.\n\n Args:\n rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle),\n where cnt_x and cnt_y are the center coordinates of the box.\n w and h are the width and height of the box. angle represents how\n many degrees the box is rotated CCW with regard to the 0-degree box.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n edge_color: color of the outline of the box. Refer to `matplotlib.colors`\n for full list of formats that are accepted.\n line_style (string): the string to use to create the outline of the boxes.\n label (string): label for rotated box. It will not be rendered when set to None.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n cnt_x, cnt_y, w, h, angle = rotated_box\n area = w * h\n # use thinner lines when the box is small\n linewidth = self._default_font_size / (\n 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3\n )\n\n theta = angle * math.pi / 180.0\n c = math.cos(theta)\n s = math.sin(theta)\n rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)]\n # x: left->right ; y: top->down\n rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect]\n for k in range(4):\n j = (k + 1) % 4\n self.draw_line(\n [rotated_rect[k][0], rotated_rect[j][0]],\n [rotated_rect[k][1], rotated_rect[j][1]],\n color=edge_color,\n linestyle=\"--\" if k == 1 else line_style,\n linewidth=linewidth,\n )\n\n if label is not None:\n text_pos = rotated_rect[1] # topleft corner\n\n height_ratio = h / np.sqrt(self.output.height * self.output.width)\n label_color = self._change_color_brightness(edge_color, brightness_factor=0.7)\n font_size = (\n np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size\n )\n self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle)\n\n return self.output\n\n def draw_circle(self, circle_coord, color, radius=3):\n \"\"\"\n Args:\n circle_coord (list(int) or tuple(int)): contains the x and y coordinates\n of the center of the circle.\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n radius (int): radius of the circle.\n\n Returns:\n output (VisImage): image object with box drawn.\n \"\"\"\n x, y = circle_coord\n self.output.ax.add_patch(\n mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color)\n )\n return self.output\n\n def draw_line(self, x_data, y_data, color, linestyle=\"-\", linewidth=None):\n \"\"\"\n Args:\n x_data (list[int]): a list containing x values of all the points being drawn.\n Length of list should match the length of y_data.\n y_data (list[int]): a list containing y values of all the points being drawn.\n Length of list should match the length of x_data.\n color: color of the line. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n linestyle: style of the line. Refer to `matplotlib.lines.Line2D`\n for a full list of formats that are accepted.\n linewidth (float or None): width of the line. When it's None,\n a default value will be computed and used.\n\n Returns:\n output (VisImage): image object with line drawn.\n \"\"\"\n if linewidth is None:\n linewidth = self._default_font_size / 3\n linewidth = max(linewidth, 1)\n self.output.ax.add_line(\n mpl.lines.Line2D(\n x_data,\n y_data,\n linewidth=linewidth * self.output.scale,\n color=color,\n linestyle=linestyle,\n )\n )\n return self.output\n\n def draw_binary_mask(\n self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.7, area_threshold=10\n ):\n \"\"\"\n Args:\n binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and\n W is the image width. Each value in the array is either a 0 or 1 value of uint8\n type.\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted.\n text (str): if None, will be drawn on the object\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n area_threshold (float): a connected component smaller than this area will not be shown.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n color = random_color(rgb=True, maximum=1)\n color = mplc.to_rgb(color)\n\n has_valid_segment = False\n binary_mask = binary_mask.astype(\"uint8\") # opencv needs uint8\n mask = GenericMask(binary_mask, self.output.height, self.output.width)\n shape2d = (binary_mask.shape[0], binary_mask.shape[1])\n\n if not mask.has_holes:\n # draw polygons for regular masks\n for segment in mask.polygons:\n area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))\n if area < (area_threshold or 0):\n continue\n has_valid_segment = True\n segment = segment.reshape(-1, 2)\n self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)\n else:\n # TODO: Use Path/PathPatch to draw vector graphics:\n # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = (mask.mask == 1).astype(\"float32\") * alpha\n has_valid_segment = True\n self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))\n\n if text is not None and has_valid_segment:\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n self._draw_text_in_mask(binary_mask, text, lighter_color)\n return self.output\n \n def draw_binary_mask_with_number(\n self, binary_mask, color=None, *, edge_color=None, text=None, label_mode='1', alpha=0.1, anno_mode=['Mask'], area_threshold=10\n ):\n \"\"\"\n Args:\n binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and\n W is the image width. Each value in the array is either a 0 or 1 value of uint8\n type.\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted.\n text (str): if None, will be drawn on the object\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n area_threshold (float): a connected component smaller than this area will not be shown.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n randint = random.randint(0, len(self.color_proposals)-1)\n color = self.color_proposals[randint]\n color = mplc.to_rgb(color)\n\n has_valid_segment = True\n binary_mask = binary_mask.astype(\"uint8\") # opencv needs uint8\n mask = GenericMask(binary_mask, self.output.height, self.output.width)\n shape2d = (binary_mask.shape[0], binary_mask.shape[1])\n bbox = mask.bbox()\n\n if 'Mask' in anno_mode:\n if not mask.has_holes:\n # draw polygons for regular masks\n for segment in mask.polygons:\n area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1]))\n if area < (area_threshold or 0):\n continue\n has_valid_segment = True\n segment = segment.reshape(-1, 2)\n self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha)\n else:\n # TODO: Use Path/PathPatch to draw vector graphics:\n # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = (mask.mask == 1).astype(\"float32\") * alpha\n has_valid_segment = True\n self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))\n\n if 'Box' in anno_mode:\n self.draw_box(bbox, edge_color=color, alpha=0.75)\n\n if 'Mark' in anno_mode:\n has_valid_segment = True\n else:\n has_valid_segment = False\n\n if text is not None and has_valid_segment:\n # lighter_color = tuple([x*0.2 for x in color])\n lighter_color = [1,1,1] # self._change_color_brightness(color, brightness_factor=0.7)\n self._draw_number_in_mask(binary_mask, text, lighter_color, label_mode)\n return self.output\n\n def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5):\n \"\"\"\n Args:\n soft_mask (ndarray): float array of shape (H, W), each value in [0, 1].\n color: color of the mask. Refer to `matplotlib.colors` for a full list of\n formats that are accepted. If None, will pick a random color.\n text (str): if None, will be drawn on the object\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n\n Returns:\n output (VisImage): image object with mask drawn.\n \"\"\"\n if color is None:\n color = random_color(rgb=True, maximum=1)\n color = mplc.to_rgb(color)\n\n shape2d = (soft_mask.shape[0], soft_mask.shape[1])\n rgba = np.zeros(shape2d + (4,), dtype=\"float32\")\n rgba[:, :, :3] = color\n rgba[:, :, 3] = soft_mask * alpha\n self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0))\n\n if text is not None:\n lighter_color = self._change_color_brightness(color, brightness_factor=0.7)\n binary_mask = (soft_mask > 0.5).astype(\"uint8\")\n self._draw_text_in_mask(binary_mask, text, lighter_color)\n return self.output\n\n def draw_polygon(self, segment, color, edge_color=None, alpha=0.5):\n \"\"\"\n Args:\n segment: numpy array of shape Nx2, containing all the points in the polygon.\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a\n full list of formats that are accepted. If not provided, a darker shade\n of the polygon color will be used instead.\n alpha (float): blending efficient. Smaller values lead to more transparent masks.\n\n Returns:\n output (VisImage): image object with polygon drawn.\n \"\"\"\n if edge_color is None:\n # make edge color darker than the polygon color\n if alpha > 0.8:\n edge_color = self._change_color_brightness(color, brightness_factor=-0.7)\n else:\n edge_color = color\n edge_color = mplc.to_rgb(edge_color) + (1,)\n\n polygon = mpl.patches.Polygon(\n segment,\n fill=True,\n facecolor=mplc.to_rgb(color) + (alpha,),\n edgecolor=edge_color,\n linewidth=max(self._default_font_size // 15 * self.output.scale, 1),\n )\n self.output.ax.add_patch(polygon)\n return self.output\n\n \"\"\"\n Internal methods:\n \"\"\"\n\n def _jitter(self, color):\n \"\"\"\n Randomly modifies given color to produce a slightly different color than the color given.\n\n Args:\n color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color\n picked. The values in the list are in the [0.0, 1.0] range.\n\n Returns:\n jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the\n color after being jittered. The values in the list are in the [0.0, 1.0] range.\n \"\"\"\n color = mplc.to_rgb(color)\n # np.random.seed(0)\n vec = np.random.rand(3)\n # better to do it in another color space\n vec = vec / np.linalg.norm(vec) * 0.5\n res = np.clip(vec + color, 0, 1)\n return tuple(res)\n\n def _create_grayscale_image(self, mask=None):\n \"\"\"\n Create a grayscale version of the original image.\n The colors in masked area, if given, will be kept.\n \"\"\"\n img_bw = self.img.astype(\"f4\").mean(axis=2)\n img_bw = np.stack([img_bw] * 3, axis=2)\n if mask is not None:\n img_bw[mask] = self.img[mask]\n return img_bw\n\n def _change_color_brightness(self, color, brightness_factor):\n \"\"\"\n Depending on the brightness_factor, gives a lighter or darker color i.e. a color with\n less or more saturation than the original color.\n\n Args:\n color: color of the polygon. Refer to `matplotlib.colors` for a full list of\n formats that are accepted.\n brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of\n 0 will correspond to no change, a factor in [-1.0, 0) range will result in\n a darker color and a factor in (0, 1.0] range will result in a lighter color.\n\n Returns:\n modified_color (tuple[double]): a tuple containing the RGB values of the\n modified color. Each value in the tuple is in the [0.0, 1.0] range.\n \"\"\"\n assert brightness_factor >= -1.0 and brightness_factor <= 1.0\n color = mplc.to_rgb(color)\n polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color))\n modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1])\n modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness\n modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness\n modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2])\n return modified_color\n\n def _convert_boxes(self, boxes):\n \"\"\"\n Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension.\n \"\"\"\n if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes):\n return boxes.tensor.detach().numpy()\n else:\n return np.asarray(boxes)\n\n def _convert_masks(self, masks_or_polygons):\n \"\"\"\n Convert different format of masks or polygons to a tuple of masks and polygons.\n\n Returns:\n list[GenericMask]:\n \"\"\"\n\n m = masks_or_polygons\n if isinstance(m, PolygonMasks):\n m = m.polygons\n if isinstance(m, BitMasks):\n m = m.tensor.numpy()\n if isinstance(m, torch.Tensor):\n m = m.numpy()\n ret = []\n for x in m:\n if isinstance(x, GenericMask):\n ret.append(x)\n else:\n ret.append(GenericMask(x, self.output.height, self.output.width))\n return ret\n\n def _draw_number_in_mask(self, binary_mask, text, color, label_mode='1'):\n \"\"\"\n Find proper places to draw text given a binary mask.\n \"\"\"\n\n def number_to_string(n):\n chars = []\n while n:\n n, remainder = divmod(n-1, 26)\n chars.append(chr(97 + remainder))\n return ''.join(reversed(chars))\n\n binary_mask = np.pad(binary_mask, ((1, 1), (1, 1)), 'constant')\n mask_dt = cv2.distanceTransform(binary_mask, cv2.DIST_L2, 0)\n mask_dt = mask_dt[1:-1, 1:-1]\n max_dist = np.max(mask_dt)\n coords_y, coords_x = np.where(mask_dt == max_dist) # coords is [y, x]\n\n if label_mode == 'a':\n text = number_to_string(int(text))\n else:\n text = text\n\n self.draw_text(text, (coords_x[len(coords_x)//2] + 2, coords_y[len(coords_y)//2] - 6), color=color)\n\n # TODO sometimes drawn on wrong objects. the heuristics here can improve.\n # _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)\n # if stats[1:, -1].size == 0:\n # return\n # largest_component_id = np.argmax(stats[1:, -1]) + 1\n\n # # draw text on the largest component, as well as other very large components.\n # for cid in range(1, _num_cc):\n # if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:\n # # median is more stable than centroid\n # # center = centroids[largest_component_id]\n # center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]\n # # bottom=np.max((cc_labels == cid).nonzero(), axis=1)[::-1]\n # # center[1]=bottom[1]+2\n # self.draw_text(text, center, color=color)\n \n def _draw_text_in_mask(self, binary_mask, text, color):\n \"\"\"\n Find proper places to draw text given a binary mask.\n \"\"\"\n # TODO sometimes drawn on wrong objects. the heuristics here can improve.\n _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8)\n if stats[1:, -1].size == 0:\n return\n largest_component_id = np.argmax(stats[1:, -1]) + 1\n\n # draw text on the largest component, as well as other very large components.\n for cid in range(1, _num_cc):\n if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH:\n # median is more stable than centroid\n # center = centroids[largest_component_id]\n center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1]\n bottom=np.max((cc_labels == cid).nonzero(), axis=1)[::-1]\n center[1]=bottom[1]+2\n self.draw_text(text, center, color=color)\n\n def _convert_keypoints(self, keypoints):\n if isinstance(keypoints, Keypoints):\n keypoints = keypoints.tensor\n keypoints = np.asarray(keypoints)\n return keypoints\n\n def get_output(self):\n \"\"\"\n Returns:\n output (VisImage): the image output containing the visualizations added\n to the image.\n \"\"\"\n return self.output" }, { "identifier": "SemanticSamAutomaticMaskGenerator", "path": "task_adapter/semantic_sam/tasks/automatic_mask_generator.py", "snippet": "class SemanticSamAutomaticMaskGenerator:\n def __init__(\n self,\n model,\n points_per_side: Optional[int] = 32,\n points_per_batch: int = 200,\n pred_iou_thresh: float = 0.88,\n stability_score_thresh: float = 0.92,\n stability_score_offset: float = 1.0,\n box_nms_thresh: float = 0.7,\n crop_n_layers: int = 0,\n crop_nms_thresh: float = 0.7,\n crop_overlap_ratio: float = 512 / 1500,\n crop_n_points_downscale_factor: int = 1,\n point_grids: Optional[List[np.ndarray]] = None,\n min_mask_region_area: int = 10,\n output_mode: str = \"binary_mask\",\n level: list = [1, 2, 3, 4, 5, 6],\n ) -> None:\n \"\"\"\n Using a SAM model, generates masks for the entire image.\n Generates a grid of point prompts over the image, then filters\n low quality and duplicate masks. The default settings are chosen\n for SAM with a ViT-H backbone.\n\n Arguments:\n model (Sam): The SAM model to use for mask prediction.\n points_per_side (int or None): The number of points to be sampled\n along one side of the image. The total number of points is\n points_per_side**2. If None, 'point_grids' must provide explicit\n point sampling.\n points_per_batch (int): Sets the number of points run simultaneously\n by the model. Higher numbers may be faster but use more GPU memory.\n pred_iou_thresh (float): A filtering threshold in [0,1], using the\n model's predicted mask quality.\n stability_score_thresh (float): A filtering threshold in [0,1], using\n the stability of the mask under changes to the cutoff used to binarize\n the model's mask predictions.\n stability_score_offset (float): The amount to shift the cutoff when\n calculated the stability score.\n box_nms_thresh (float): The box IoU cutoff used by non-maximal\n suppression to filter duplicate masks.\n crops_n_layers (int): If >0, mask prediction will be run again on\n crops of the image. Sets the number of layers to run, where each\n layer has 2**i_layer number of image crops.\n crops_nms_thresh (float): The box IoU cutoff used by non-maximal\n suppression to filter duplicate masks between different crops.\n crop_overlap_ratio (float): Sets the degree to which crops overlap.\n In the first crop layer, crops will overlap by this fraction of\n the image length. Later layers with more crops scale down this overlap.\n crop_n_points_downscale_factor (int): The number of points-per-side\n sampled in layer n is scaled down by crop_n_points_downscale_factor**n.\n point_grids (list(np.ndarray) or None): A list over explicit grids\n of points used for sampling, normalized to [0,1]. The nth grid in the\n list is used in the nth crop layer. Exclusive with points_per_side.\n min_mask_region_area (int): If >0, postprocessing will be applied\n to remove disconnected regions and holes in masks with area smaller\n than min_mask_region_area. Requires opencv.\n output_mode (str): The form masks are returned in. Can be 'binary_mask',\n 'uncompressed_rle', or 'coco_rle'. 'coco_rle' requires pycocotools.\n For large resolutions, 'binary_mask' may consume large amounts of\n memory.\n \"\"\"\n self.level = [prompt_switch(l) for l in level]\n assert (points_per_side is None) != (\n point_grids is None\n ), \"Exactly one of points_per_side or point_grid must be provided.\"\n if points_per_side is not None:\n self.point_grids = build_all_layer_point_grids(\n points_per_side,\n crop_n_layers,\n crop_n_points_downscale_factor,\n )\n elif point_grids is not None:\n self.point_grids = point_grids\n else:\n raise ValueError(\"Can't have both points_per_side and point_grid be None.\")\n\n assert output_mode in [\n \"binary_mask\",\n \"uncompressed_rle\",\n \"coco_rle\",\n ], f\"Unknown output_mode {output_mode}.\"\n if output_mode == \"coco_rle\":\n from pycocotools import mask as mask_utils # type: ignore # noqa: F401\n\n if min_mask_region_area > 0:\n import cv2 # type: ignore # noqa: F401\n\n self.predictor = model\n self.points_per_batch = points_per_batch\n self.pred_iou_thresh = pred_iou_thresh\n self.stability_score_thresh = stability_score_thresh\n self.stability_score_offset = stability_score_offset\n self.box_nms_thresh = box_nms_thresh\n self.crop_n_layers = crop_n_layers\n self.crop_nms_thresh = crop_nms_thresh\n self.crop_overlap_ratio = crop_overlap_ratio\n self.crop_n_points_downscale_factor = crop_n_points_downscale_factor\n self.min_mask_region_area = min_mask_region_area\n self.output_mode = output_mode\n\n @torch.no_grad()\n def generate(self, image: np.ndarray) -> List[Dict[str, Any]]:\n \"\"\"\n Generates masks for the given image.\n\n Arguments:\n image (np.ndarray): The image to generate masks for, in HWC uint8 format.\n\n Returns:\n list(dict(str, any)): A list over records for masks. Each record is\n a dict containing the following keys:\n segmentation (dict(str, any) or np.ndarray): The mask. If\n output_mode='binary_mask', is an array of shape HW. Otherwise,\n is a dictionary containing the RLE.\n bbox (list(float)): The box around the mask, in XYWH format.\n area (int): The area in pixels of the mask.\n predicted_iou (float): The model's own prediction of the mask's\n quality. This is filtered by the pred_iou_thresh parameter.\n point_coords (list(list(float))): The point coordinates input\n to the model to generate this mask.\n stability_score (float): A measure of the mask's quality. This\n is filtered on using the stability_score_thresh parameter.\n crop_box (list(float)): The crop of the image used to generate\n the mask, given in XYWH format.\n \"\"\"\n\n # Generate masks\n mask_data = self._generate_masks(image)\n\n # Filter small disconnected regions and holes in masks\n if self.min_mask_region_area > 0:\n mask_data = self.postprocess_small_regions(\n mask_data,\n self.min_mask_region_area,\n max(self.box_nms_thresh, self.crop_nms_thresh),\n )\n # Encode masks\n if self.output_mode == \"coco_rle\":\n mask_data[\"segmentations\"] = [coco_encode_rle(rle) for rle in mask_data[\"rles\"]]\n elif self.output_mode == \"binary_mask\":\n mask_data[\"segmentations\"] = [rle_to_mask(rle) for rle in mask_data[\"rles\"]]\n else:\n mask_data[\"segmentations\"] = mask_data[\"rles\"]\n\n # Write mask records\n curr_anns = []\n for idx in range(len(mask_data[\"segmentations\"])):\n ann = {\n \"segmentation\": mask_data[\"segmentations\"][idx],\n \"area\": area_from_rle(mask_data[\"rles\"][idx]),\n \"bbox\": box_xyxy_to_xywh(mask_data[\"boxes\"][idx]).tolist(),\n \"predicted_iou\": mask_data[\"iou_preds\"][idx].item(),\n \"point_coords\": [mask_data[\"points\"][idx].tolist()],\n \"stability_score\": mask_data[\"stability_score\"][idx].item(),\n \"crop_box\": box_xyxy_to_xywh(mask_data[\"crop_boxes\"][idx]).tolist(),\n }\n curr_anns.append(ann)\n\n return curr_anns\n\n def _generate_masks(self, image: np.ndarray) -> MaskData:\n orig_size = image.shape[-2:]\n crop_boxes, layer_idxs = generate_crop_boxes(\n orig_size, self.crop_n_layers, self.crop_overlap_ratio\n )\n\n # Iterate over image crops\n assert len(crop_boxes)==1\n data = MaskData()\n # import ipdb; ipdb.set_trace()\n for crop_box, layer_idx in zip(crop_boxes, layer_idxs):\n crop_data = self._process_crop(image, crop_box, layer_idx, orig_size)\n\n data.cat(crop_data)\n # import ipdb; ipdb.set_trace()\n # Remove duplicate masks between crops\n if len(crop_boxes) > 1:\n # Prefer masks from smaller crops\n scores = 1 / box_area(data[\"crop_boxes\"])\n scores = scores.to(data[\"boxes\"].device)\n keep_by_nms = batched_nms(\n data[\"boxes\"].float(),\n scores,\n torch.zeros(len(data[\"boxes\"])), # categories\n iou_threshold=self.crop_nms_thresh,\n )\n data.filter(keep_by_nms)\n\n data.to_numpy()\n return data\n\n def _process_crop(\n self,\n image: np.ndarray,\n crop_box: List[int],\n crop_layer_idx: int,\n orig_size: Tuple[int, ...],\n ) -> MaskData:\n # Crop the image and calculate embeddings\n x0, y0, x1, y1 = crop_box\n cropped_im = image#[y0:y1, x0:x1, :]\n cropped_im_size = cropped_im.shape[-2:]\n # self.predictor.set_image(cropped_im)\n\n # Get points for this crop\n points_scale = np.array(cropped_im_size)[None, ::-1]\n points_for_image = self.point_grids[crop_layer_idx] #* points_scale\n\n # Generate masks for this crop in batches\n data = MaskData()\n self.enc_features=None\n # import ipdb; ipdb.set_trace()\n for (points,) in batch_iterator(self.points_per_batch, points_for_image):\n batch_data = self._process_batch(cropped_im,points, cropped_im_size, crop_box, orig_size)\n data.cat(batch_data)\n del batch_data\n\n keep_by_nms = batched_nms(\n data[\"boxes\"].float(),\n data[\"iou_preds\"],\n torch.zeros(len(data[\"boxes\"])), # categories\n iou_threshold=self.box_nms_thresh,\n )\n # import ipdb; ipdb.set_trace()\n data.filter(keep_by_nms)\n # import ipdb; ipdb.set_trace()\n # Return to the original image frame\n data[\"boxes\"] = uncrop_boxes_xyxy(data[\"boxes\"], crop_box)\n data[\"crop_boxes\"] = torch.tensor([crop_box for _ in range(len(data[\"rles\"]))])\n\n return data\n\n def _process_batch(\n self,\n images,\n points: np.ndarray,\n im_size: Tuple[int, ...],\n crop_box: List[int],\n orig_size: Tuple[int, ...],\n ) -> MaskData:\n orig_h, orig_w = orig_size\n\n data = {\"image\": images, \"height\": orig_h, \"width\": orig_w}\n points=torch.tensor(points,dtype=torch.float).to(images.device)\n points = torch.cat([points, points.new_tensor([[0.005, 0.005]]).repeat(len(points), 1)], dim=-1)\n data['targets'] = [dict()]\n data['targets'][0]['points']=points\n data['targets'][0]['pb']=points.new_tensor([0.]*len(points))\n batch_inputs = [data]\n if self.enc_features is None:\n masks, iou_preds,mask_features,multi_scale_features= self.predictor.model.evaluate_demo(batch_inputs,None,None,return_features=True, level=self.level)\n self.enc_features=(mask_features,multi_scale_features)\n else:\n masks, iou_preds= self.predictor.model.evaluate_demo(batch_inputs,None,None,self.enc_features[0],self.enc_features[1], level=self.level)\n\n data = MaskData(\n masks=masks,\n iou_preds=iou_preds.flatten(),\n points=torch.as_tensor(points[:,None].repeat(1,len(self.level), 1).view(-1,4)),\n )\n del masks\n # Filter by predicted IoU\n keep_mask = data[\"iou_preds\"] > self.pred_iou_thresh\n data.filter(keep_mask)\n\n # Calculate stability score\n data[\"stability_score\"] = calculate_stability_score(\n data[\"masks\"], 0.0, self.stability_score_offset\n )\n # if self.stability_score_thresh > 0.0:\n keep_mask = data[\"stability_score\"] >= self.stability_score_thresh\n data.filter(keep_mask)\n\n # Threshold masks and calculate boxes\n data[\"masks\"] = data[\"masks\"] > 0.0\n data[\"boxes\"] = batched_mask_to_box(data[\"masks\"])\n\n # Filter boxes that touch crop boundaries\n keep_mask = ~is_box_near_crop_edge(data[\"boxes\"], crop_box, [0, 0, orig_w, orig_h])\n if not torch.all(keep_mask):\n data.filter(keep_mask)\n\n # Compress to RLE\n data[\"masks\"] = uncrop_masks(data[\"masks\"], crop_box, orig_h, orig_w)\n data[\"rles\"] = mask_to_rle_pytorch(data[\"masks\"])\n del data[\"masks\"]\n\n return data\n\n @staticmethod\n def postprocess_small_regions(\n mask_data: MaskData, min_area: int, nms_thresh: float\n ) -> MaskData:\n \"\"\"\n Removes small disconnected regions and holes in masks, then reruns\n box NMS to remove any new duplicates.\n\n Edits mask_data in place.\n\n Requires open-cv as a dependency.\n \"\"\"\n if len(mask_data[\"rles\"]) == 0:\n return mask_data\n\n # Filter small disconnected regions and holes\n new_masks = []\n scores = []\n for rle in mask_data[\"rles\"]:\n mask = rle_to_mask(rle)\n\n mask, changed = remove_small_regions(mask, min_area, mode=\"holes\")\n unchanged = not changed\n mask, changed = remove_small_regions(mask, min_area, mode=\"islands\")\n unchanged = unchanged and not changed\n\n new_masks.append(torch.as_tensor(mask).unsqueeze(0))\n # Give score=0 to changed masks and score=1 to unchanged masks\n # so NMS will prefer ones that didn't need postprocessing\n scores.append(float(unchanged))\n\n # Recalculate boxes and remove any new duplicates\n masks = torch.cat(new_masks, dim=0)\n boxes = batched_mask_to_box(masks)\n keep_by_nms = batched_nms(\n boxes.float(),\n torch.as_tensor(scores),\n torch.zeros(len(boxes)), # categories\n iou_threshold=nms_thresh,\n )\n\n # Only recalculate RLEs for masks that have changed\n for i_mask in keep_by_nms:\n if scores[i_mask] == 0.0:\n mask_torch = masks[i_mask].unsqueeze(0)\n mask_data[\"rles\"][i_mask] = mask_to_rle_pytorch(mask_torch)[0]\n mask_data[\"boxes\"][i_mask] = boxes[i_mask] # update res directly\n mask_data.filter(keep_by_nms)\n\n return mask_data" } ]
import torch import numpy as np import matplotlib.pyplot as plt import cv2 import io import cv2 # type: ignore from torchvision import transforms from task_adapter.utils.visualizer import Visualizer from typing import Tuple from PIL import Image from detectron2.data import MetadataCatalog from .automatic_mask_generator import SemanticSamAutomaticMaskGenerator from task_adapter.utils.visualizer import Visualizer
15,527
# -------------------------------------------------------- # Semantic-SAM: Segment and Recognize Anything at Any Granularity # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Hao Zhang ([email protected]) # -------------------------------------------------------- metadata = MetadataCatalog.get('coco_2017_train_panoptic') def inference_semsam_m2m_auto(model, image, level, all_classes, all_parts, thresh, text_size, hole_scale, island_scale, semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']): t = [] t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) transform1 = transforms.Compose(t) image_ori = transform1(image) image_ori = np.asarray(image_ori) images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()
# -------------------------------------------------------- # Semantic-SAM: Segment and Recognize Anything at Any Granularity # Copyright (c) 2023 Microsoft # Licensed under The MIT License [see LICENSE for details] # Written by Hao Zhang ([email protected]) # -------------------------------------------------------- metadata = MetadataCatalog.get('coco_2017_train_panoptic') def inference_semsam_m2m_auto(model, image, level, all_classes, all_parts, thresh, text_size, hole_scale, island_scale, semantic, refimg=None, reftxt=None, audio_pth=None, video_pth=None, label_mode='1', alpha=0.1, anno_mode=['Mask']): t = [] t.append(transforms.Resize(int(text_size), interpolation=Image.BICUBIC)) transform1 = transforms.Compose(t) image_ori = transform1(image) image_ori = np.asarray(image_ori) images = torch.from_numpy(image_ori.copy()).permute(2,0,1).cuda()
mask_generator = SemanticSamAutomaticMaskGenerator(model,points_per_side=32,
1
2023-10-16 03:39:26+00:00
24k
hkchengrex/Cutie
gui/main_controller.py
[ { "identifier": "CUTIE", "path": "cutie/model/cutie.py", "snippet": "class CUTIE(nn.Module):\n def __init__(self, cfg: DictConfig, *, single_object=False):\n super().__init__()\n model_cfg = cfg.model\n self.ms_dims = model_cfg.pixel_encoder.ms_dims\n self.key_dim = model_cfg.key_dim\n self.value_dim = model_cfg.value_dim\n self.sensory_dim = model_cfg.sensory_dim\n self.pixel_dim = model_cfg.pixel_dim\n self.embed_dim = model_cfg.embed_dim\n self.single_object = single_object\n\n log.info(f'Single object: {self.single_object}')\n\n self.pixel_encoder = PixelEncoder(model_cfg)\n self.pix_feat_proj = nn.Conv2d(self.ms_dims[0], self.pixel_dim, kernel_size=1)\n self.key_proj = KeyProjection(model_cfg)\n self.mask_encoder = MaskEncoder(model_cfg, single_object=single_object)\n self.mask_decoder = MaskDecoder(model_cfg)\n self.pixel_fuser = PixelFeatureFuser(model_cfg, single_object=single_object)\n self.object_transformer = QueryTransformer(model_cfg)\n self.object_summarizer = ObjectSummarizer(model_cfg)\n self.aux_computer = AuxComputer(cfg)\n\n self.register_buffer(\"pixel_mean\", torch.Tensor(model_cfg.pixel_mean).view(-1, 1, 1), False)\n self.register_buffer(\"pixel_std\", torch.Tensor(model_cfg.pixel_std).view(-1, 1, 1), False)\n\n def _get_others(self, masks: torch.Tensor) -> torch.Tensor:\n # for each object, return the sum of masks of all other objects\n if self.single_object:\n return None\n\n num_objects = masks.shape[1]\n if num_objects >= 1:\n others = (masks.sum(dim=1, keepdim=True) - masks).clamp(0, 1)\n else:\n others = torch.zeros_like(masks)\n return others\n\n def encode_image(self, image: torch.Tensor) -> (Iterable[torch.Tensor], torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n ms_image_feat = self.pixel_encoder(image)\n return ms_image_feat, self.pix_feat_proj(ms_image_feat[0])\n\n def encode_mask(\n self,\n image: torch.Tensor,\n ms_features: List[torch.Tensor],\n sensory: torch.Tensor,\n masks: torch.Tensor,\n *,\n deep_update: bool = True,\n chunk_size: int = -1,\n need_weights: bool = False) -> (torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor):\n image = (image - self.pixel_mean) / self.pixel_std\n others = self._get_others(masks)\n mask_value, new_sensory = self.mask_encoder(image,\n ms_features,\n sensory,\n masks,\n others,\n deep_update=deep_update,\n chunk_size=chunk_size)\n object_summaries, object_logits = self.object_summarizer(masks, mask_value, need_weights)\n return mask_value, new_sensory, object_summaries, object_logits\n\n def transform_key(self,\n final_pix_feat: torch.Tensor,\n *,\n need_sk: bool = True,\n need_ek: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n key, shrinkage, selection = self.key_proj(final_pix_feat, need_s=need_sk, need_e=need_ek)\n return key, shrinkage, selection\n\n # Used in training only.\n # This step is replaced by MemoryManager in test time\n def read_memory(self, query_key: torch.Tensor, query_selection: torch.Tensor,\n memory_key: torch.Tensor, memory_shrinkage: torch.Tensor,\n msk_value: torch.Tensor, obj_memory: torch.Tensor, pix_feat: torch.Tensor,\n sensory: torch.Tensor, last_mask: torch.Tensor,\n selector: torch.Tensor) -> (torch.Tensor, Dict[str, torch.Tensor]):\n \"\"\"\n query_key : B * CK * H * W\n query_selection : B * CK * H * W\n memory_key : B * CK * T * H * W\n memory_shrinkage: B * 1 * T * H * W\n msk_value : B * num_objects * CV * T * H * W\n obj_memory : B * num_objects * T * num_summaries * C\n pixel_feature : B * C * H * W\n \"\"\"\n batch_size, num_objects = msk_value.shape[:2]\n\n # read using visual attention\n with torch.cuda.amp.autocast(enabled=False):\n affinity = get_affinity(memory_key.float(), memory_shrinkage.float(), query_key.float(),\n query_selection.float())\n\n msk_value = msk_value.flatten(start_dim=1, end_dim=2).float()\n\n # B * (num_objects*CV) * H * W\n pixel_readout = readout(affinity, msk_value)\n pixel_readout = pixel_readout.view(batch_size, num_objects, self.value_dim,\n *pixel_readout.shape[-2:])\n pixel_readout = self.pixel_fusion(pix_feat, pixel_readout, sensory, last_mask)\n\n # read from query transformer\n mem_readout, aux_features = self.readout_query(pixel_readout, obj_memory, selector=selector)\n\n aux_output = {\n 'sensory': sensory,\n 'q_logits': aux_features['logits'] if aux_features else None,\n 'attn_mask': aux_features['attn_mask'] if aux_features else None,\n }\n\n return mem_readout, aux_output\n\n def pixel_fusion(self,\n pix_feat: torch.Tensor,\n pixel: torch.Tensor,\n sensory: torch.Tensor,\n last_mask: torch.Tensor,\n *,\n chunk_size: int = -1) -> torch.Tensor:\n last_mask = F.interpolate(last_mask, size=sensory.shape[-2:], mode='area')\n last_others = self._get_others(last_mask)\n fused = self.pixel_fuser(pix_feat,\n pixel,\n sensory,\n last_mask,\n last_others,\n chunk_size=chunk_size)\n return fused\n\n def readout_query(self,\n pixel_readout,\n obj_memory,\n *,\n selector=None,\n need_weights=False) -> (torch.Tensor, Dict[str, torch.Tensor]):\n return self.object_transformer(pixel_readout,\n obj_memory,\n selector=selector,\n need_weights=need_weights)\n\n def segment(self,\n ms_image_feat: List[torch.Tensor],\n memory_readout: torch.Tensor,\n sensory: torch.Tensor,\n *,\n selector: bool = None,\n chunk_size: int = -1,\n update_sensory: bool = True) -> (torch.Tensor, torch.Tensor, torch.Tensor):\n \"\"\"\n multi_scale_features is from the key encoder for skip-connection\n memory_readout is from working/long-term memory\n sensory is the sensory memory\n last_mask is the mask from the last frame, supplementing sensory memory\n selector is 1 if an object exists, and 0 otherwise. We use it to filter padded objects\n during training.\n \"\"\"\n sensory, logits = self.mask_decoder(ms_image_feat,\n memory_readout,\n sensory,\n chunk_size=chunk_size,\n update_sensory=update_sensory)\n\n prob = torch.sigmoid(logits)\n if selector is not None:\n prob = prob * selector\n\n # Softmax over all objects[]\n logits = aggregate(prob, dim=1)\n logits = F.interpolate(logits, scale_factor=4, mode='bilinear', align_corners=False)\n prob = F.softmax(logits, dim=1)\n\n return sensory, logits, prob\n\n def compute_aux(self, pix_feat: torch.Tensor, aux_inputs: Dict[str, torch.Tensor],\n selector: torch.Tensor) -> Dict[str, torch.Tensor]:\n return self.aux_computer(pix_feat, aux_inputs, selector)\n\n def forward(self, *args, **kwargs):\n raise NotImplementedError\n\n def load_weights(self, src_dict, init_as_zero_if_needed=False) -> None:\n if not self.single_object:\n # Map single-object weight to multi-object weight (4->5 out channels in conv1)\n for k in list(src_dict.keys()):\n if k == 'mask_encoder.conv1.weight':\n if src_dict[k].shape[1] == 4:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((64, 1, 7, 7), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif k == 'pixel_fuser.sensory_compress.weight':\n if src_dict[k].shape[1] == self.sensory_dim + 1:\n log.info(f'Converting {k} from single object to multiple objects.')\n pads = torch.zeros((self.value_dim, 1, 1, 1), device=src_dict[k].device)\n if not init_as_zero_if_needed:\n nn.init.orthogonal_(pads)\n log.info(f'Randomly initialized padding for {k}.')\n else:\n log.info(f'Zero-initialized padding for {k}.')\n src_dict[k] = torch.cat([src_dict[k], pads], 1)\n elif self.single_object:\n \"\"\"\n If the model is multiple-object and we are training in single-object, \n we strip the last channel of conv1.\n This is not supposed to happen in standard training except when users are trying to\n finetune a trained model with single object datasets.\n \"\"\"\n if src_dict['mask_encoder.conv1.weight'].shape[1] == 5:\n log.warning(f'Converting {k} from multiple objects to single object.'\n 'This is not supposed to happen in standard training.')\n src_dict[k] = src_dict[k][:, :-1]\n\n for k in src_dict:\n if k not in self.state_dict():\n log.info(f'Key {k} found in src_dict but not in self.state_dict()!!!')\n for k in self.state_dict():\n if k not in src_dict:\n log.info(f'Key {k} found in self.state_dict() but not in src_dict!!!')\n\n self.load_state_dict(src_dict, strict=False)\n\n @property\n def device(self) -> torch.device:\n return self.pixel_mean.device" }, { "identifier": "InferenceCore", "path": "cutie/inference/inference_core.py", "snippet": "class InferenceCore:\n def __init__(self,\n network: CUTIE,\n cfg: DictConfig,\n *,\n image_feature_store: ImageFeatureStore = None):\n self.network = network\n self.cfg = cfg\n self.mem_every = cfg.mem_every\n stagger_updates = cfg.stagger_updates\n self.chunk_size = cfg.chunk_size\n self.save_aux = cfg.save_aux\n self.max_internal_size = cfg.max_internal_size\n self.flip_aug = cfg.flip_aug\n\n self.curr_ti = -1\n self.last_mem_ti = 0\n # at which time indices should we update the sensory memory\n if stagger_updates >= self.mem_every:\n self.stagger_ti = set(range(1, self.mem_every + 1))\n else:\n self.stagger_ti = set(\n np.round(np.linspace(1, self.mem_every, stagger_updates)).astype(int))\n self.object_manager = ObjectManager()\n self.memory = MemoryManager(cfg=cfg, object_manager=self.object_manager)\n\n if image_feature_store is None:\n self.image_feature_store = ImageFeatureStore(self.network)\n else:\n self.image_feature_store = image_feature_store\n\n self.last_mask = None\n\n def clear_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory = MemoryManager(cfg=self.cfg, object_manager=self.object_manager)\n\n def clear_non_permanent_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory.clear_non_permanent_memory()\n\n def clear_sensory_memory(self):\n self.curr_ti = -1\n self.last_mem_ti = 0\n self.memory.clear_sensory_memory()\n\n def update_config(self, cfg):\n self.mem_every = cfg['mem_every']\n self.memory.update_config(cfg)\n\n def _add_memory(self,\n image: torch.Tensor,\n pix_feat: torch.Tensor,\n prob: torch.Tensor,\n key: torch.Tensor,\n shrinkage: torch.Tensor,\n selection: torch.Tensor,\n *,\n is_deep_update: bool = True,\n force_permanent: bool = False) -> None:\n \"\"\"\n Memorize the given segmentation in all memory stores.\n\n The batch dimension is 1 if flip augmentation is not used.\n image: RGB image, (1/2)*3*H*W\n pix_feat: from the key encoder, (1/2)*_*H*W\n prob: (1/2)*num_objects*H*W, in [0, 1]\n key/shrinkage/selection: for anisotropic l2, (1/2)*_*H*W\n selection can be None if not using long-term memory\n is_deep_update: whether to use deep update (e.g. with the mask encoder)\n force_permanent: whether to force the memory to be permanent\n \"\"\"\n if prob.shape[1] == 0:\n # nothing to add\n log.warn('Trying to add an empty object mask to memory!')\n return\n\n if force_permanent:\n as_permanent = 'all'\n else:\n as_permanent = 'first'\n\n self.memory.initialize_sensory_if_needed(key, self.object_manager.all_obj_ids)\n msk_value, sensory, obj_value, self.obj_logits = self.network.encode_mask(\n image,\n pix_feat,\n self.memory.get_sensory(self.object_manager.all_obj_ids),\n prob,\n deep_update=is_deep_update,\n chunk_size=self.chunk_size,\n need_weights=self.save_aux)\n self.memory.add_memory(key,\n shrinkage,\n msk_value,\n obj_value,\n self.object_manager.all_obj_ids,\n selection=selection,\n as_permanent=as_permanent)\n self.last_mem_ti = self.curr_ti\n if is_deep_update:\n self.memory.update_sensory(sensory, self.object_manager.all_obj_ids)\n\n def _segment(self,\n key: torch.Tensor,\n selection: torch.Tensor,\n pix_feat: torch.Tensor,\n ms_features: Iterable[torch.Tensor],\n update_sensory: bool = True) -> torch.Tensor:\n \"\"\"\n Produce a segmentation using the given features and the memory\n\n The batch dimension is 1 if flip augmentation is not used.\n key/selection: for anisotropic l2: (1/2) * _ * H * W\n pix_feat: from the key encoder, (1/2) * _ * H * W\n ms_features: an iterable of multiscale features from the encoder, each is (1/2)*_*H*W\n with strides 16, 8, and 4 respectively\n update_sensory: whether to update the sensory memory\n\n Returns: (num_objects+1)*H*W normalized probability; the first channel is the background\n \"\"\"\n bs = key.shape[0]\n if self.flip_aug:\n assert bs == 2\n else:\n assert bs == 1\n\n if not self.memory.engaged:\n log.warn('Trying to segment without any memory!')\n return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16),\n device=key.device,\n dtype=key.dtype)\n\n memory_readout = self.memory.read(pix_feat, key, selection, self.last_mask, self.network)\n memory_readout = self.object_manager.realize_dict(memory_readout)\n sensory, _, pred_prob_with_bg = self.network.segment(ms_features,\n memory_readout,\n self.memory.get_sensory(\n self.object_manager.all_obj_ids),\n chunk_size=self.chunk_size,\n update_sensory=update_sensory)\n # remove batch dim\n if self.flip_aug:\n # average predictions of the non-flipped and flipped version\n pred_prob_with_bg = (pred_prob_with_bg[0] +\n torch.flip(pred_prob_with_bg[1], dims=[-1])) / 2\n else:\n pred_prob_with_bg = pred_prob_with_bg[0]\n if update_sensory:\n self.memory.update_sensory(sensory, self.object_manager.all_obj_ids)\n return pred_prob_with_bg\n\n def step(self,\n image: torch.Tensor,\n mask: Optional[torch.Tensor] = None,\n objects: Optional[List[int]] = None,\n *,\n idx_mask: bool = True,\n end: bool = False,\n delete_buffer: bool = True,\n force_permanent: bool = False) -> torch.Tensor:\n \"\"\"\n Take a step with a new incoming image.\n If there is an incoming mask with new objects, we will memorize them.\n If there is no incoming mask, we will segment the image using the memory.\n In both cases, we will update the memory and return a segmentation.\n\n image: 3*H*W\n mask: H*W (if idx mask) or len(objects)*H*W or None\n objects: list of object ids that are valid in the mask Tensor.\n The ids themselves do not need to be consecutive/in order, but they need to be \n in the same position in the list as the corresponding mask\n in the tensor in non-idx-mask mode.\n objects is ignored if the mask is None. \n If idx_mask is False and objects is None, we sequentially infer the object ids.\n idx_mask: if True, mask is expected to contain an object id at every pixel.\n If False, mask should have multiple channels with each channel representing one object.\n end: if we are at the end of the sequence, we do not need to update memory\n if unsure just set it to False \n delete_buffer: whether to delete the image feature buffer after this step\n force_permanent: the memory recorded this frame will be added to the permanent memory\n \"\"\"\n if objects is None and mask is not None:\n assert not idx_mask\n objects = list(range(1, mask.shape[0] + 1))\n\n # resize input if needed -- currently only used for the GUI\n resize_needed = False\n if self.max_internal_size > 0:\n h, w = image.shape[-2:]\n min_side = min(h, w)\n if min_side > self.max_internal_size:\n resize_needed = True\n new_h = int(h / min_side * self.max_internal_size)\n new_w = int(w / min_side * self.max_internal_size)\n image = F.interpolate(image.unsqueeze(0),\n size=(new_h, new_w),\n mode='bilinear',\n align_corners=False)[0]\n if mask is not None:\n if idx_mask:\n mask = F.interpolate(mask.unsqueeze(0).unsqueeze(0).float(),\n size=(new_h, new_w),\n mode='nearest',\n align_corners=False)[0, 0].round().long()\n else:\n mask = F.interpolate(mask.unsqueeze(0),\n size=(new_h, new_w),\n mode='bilinear',\n align_corners=False)[0]\n\n self.curr_ti += 1\n\n image, self.pad = pad_divide_by(image, 16)\n image = image.unsqueeze(0) # add the batch dimension\n if self.flip_aug:\n image = torch.cat([image, torch.flip(image, dims=[-1])], dim=0)\n\n # whether to update the working memory\n is_mem_frame = ((self.curr_ti - self.last_mem_ti >= self.mem_every) or\n (mask is not None)) and (not end)\n # segment when there is no input mask or when the input mask is incomplete\n need_segment = (mask is None) or (self.object_manager.num_obj > 0\n and not self.object_manager.has_all(objects))\n update_sensory = ((self.curr_ti - self.last_mem_ti) in self.stagger_ti) and (not end)\n\n # encoding the image\n ms_feat, pix_feat = self.image_feature_store.get_features(self.curr_ti, image)\n key, shrinkage, selection = self.image_feature_store.get_key(self.curr_ti, image)\n\n # segmentation from memory if needed\n if need_segment:\n pred_prob_with_bg = self._segment(key,\n selection,\n pix_feat,\n ms_feat,\n update_sensory=update_sensory)\n\n # use the input mask if provided\n if mask is not None:\n # inform the manager of the new objects, and get a list of temporary id\n # temporary ids -- indicates the position of objects in the tensor\n # (starts with 1 due to the background channel)\n corresponding_tmp_ids, _ = self.object_manager.add_new_objects(objects)\n\n mask, _ = pad_divide_by(mask, 16)\n if need_segment:\n # merge predicted mask with the incomplete input mask\n pred_prob_no_bg = pred_prob_with_bg[1:]\n # use the mutual exclusivity of segmentation\n if idx_mask:\n pred_prob_no_bg[:, mask > 0] = 0\n else:\n pred_prob_no_bg[:, mask.max(0) > 0.5] = 0\n\n new_masks = []\n for mask_id, tmp_id in enumerate(corresponding_tmp_ids):\n if idx_mask:\n this_mask = (mask == objects[mask_id]).type_as(pred_prob_no_bg)\n else:\n this_mask = mask[tmp_id]\n if tmp_id > pred_prob_no_bg.shape[0]:\n new_masks.append(this_mask.unsqueeze(0))\n else:\n # +1 for padding the background channel\n pred_prob_no_bg[tmp_id - 1] = this_mask\n # new_masks are always in the order of tmp_id\n mask = torch.cat([pred_prob_no_bg, *new_masks], dim=0)\n elif idx_mask:\n # simply convert cls to one-hot representation\n if len(objects) == 0:\n if delete_buffer:\n self.image_feature_store.delete(self.curr_ti)\n log.warn('Trying to insert an empty mask as memory!')\n return torch.zeros((1, key.shape[-2] * 16, key.shape[-1] * 16),\n device=key.device,\n dtype=key.dtype)\n mask = torch.stack(\n [mask == objects[mask_id] for mask_id, _ in enumerate(corresponding_tmp_ids)],\n dim=0)\n pred_prob_with_bg = aggregate(mask, dim=0)\n pred_prob_with_bg = torch.softmax(pred_prob_with_bg, dim=0)\n\n self.last_mask = pred_prob_with_bg[1:].unsqueeze(0)\n if self.flip_aug:\n self.last_mask = torch.cat(\n [self.last_mask, torch.flip(self.last_mask, dims=[-1])], dim=0)\n\n # save as memory if needed\n if is_mem_frame or force_permanent:\n self._add_memory(image,\n pix_feat,\n self.last_mask,\n key,\n shrinkage,\n selection,\n force_permanent=force_permanent)\n\n if delete_buffer:\n self.image_feature_store.delete(self.curr_ti)\n\n output_prob = unpad(pred_prob_with_bg, self.pad)\n if resize_needed:\n # restore output to the original size\n output_prob = F.interpolate(output_prob.unsqueeze(0),\n size=(h, w),\n mode='bilinear',\n align_corners=False)[0]\n\n return output_prob\n\n def get_aux_outputs(self, image: torch.Tensor) -> Dict[str, torch.Tensor]:\n image, pads = pad_divide_by(image, 16)\n image = image.unsqueeze(0) # add the batch dimension\n _, pix_feat = self.image_feature_store.get_features(self.curr_ti, image)\n\n aux_inputs = self.memory.aux\n aux_outputs = self.network.compute_aux(pix_feat, aux_inputs, selector=None)\n aux_outputs['q_weights'] = aux_inputs['q_weights']\n aux_outputs['p_weights'] = aux_inputs['p_weights']\n\n for k, v in aux_outputs.items():\n if len(v.shape) == 5:\n aux_outputs[k] = F.interpolate(v[0],\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)\n elif 'weights' in k:\n b, num_objects, num_heads, num_queries, h, w = v.shape\n v = v.view(num_objects * num_heads, num_queries, h, w)\n v = F.interpolate(v, size=image.shape[-2:], mode='bilinear', align_corners=False)\n aux_outputs[k] = v.view(num_objects, num_heads, num_queries, *image.shape[-2:])\n else:\n aux_outputs[k] = F.interpolate(v,\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)[0]\n aux_outputs[k] = unpad(aux_outputs[k], pads)\n if 'weights' in k:\n weights = aux_outputs[k]\n weights = weights / (weights.max(-1, keepdim=True)[0].max(-2, keepdim=True)[0] +\n 1e-8)\n aux_outputs[k] = (weights * 255).cpu().numpy()\n else:\n aux_outputs[k] = (aux_outputs[k].softmax(dim=0) * 255).cpu().numpy()\n\n self.image_feature_store.delete(self.curr_ti)\n return aux_outputs\n\n def get_aux_object_weights(self, image: torch.Tensor) -> np.ndarray:\n image, pads = pad_divide_by(image, 16)\n # B*num_objects*H*W*num_queries -> num_objects*num_queries*H*W\n # weights = F.softmax(self.obj_logits, dim=-1)[0]\n weights = F.sigmoid(self.obj_logits)[0]\n weights = weights.permute(0, 3, 1, 2).contiguous()\n weights = F.interpolate(weights,\n size=image.shape[-2:],\n mode='bilinear',\n align_corners=False)\n # weights = weights / (weights.max(-1, keepdim=True)[0].max(-2, keepdim=True)[0])\n weights = unpad(weights, pads)\n weights = (weights * 255).cpu().numpy()\n return weights" }, { "identifier": "ResourceManager", "path": "gui/resource_manager.py", "snippet": "class ResourceManager:\n def __init__(self, cfg: DictConfig):\n # determine inputs\n images = cfg['images']\n video = cfg['video']\n self.workspace = cfg['workspace']\n self.max_size = cfg['max_overall_size']\n self.palette = davis_palette\n\n # create temporary workspace if not specified\n if self.workspace is None:\n if images is not None:\n basename = path.basename(images)\n elif video is not None:\n basename = path.basename(video)[:-4]\n else:\n raise NotImplementedError('Either images, video, or workspace has to be specified')\n\n self.workspace = path.join('./workspace', basename)\n\n print(f'Workspace is in: {self.workspace}')\n with open_dict(cfg):\n cfg['workspace'] = self.workspace\n\n # determine the location of input images\n need_decoding = False\n need_resizing = False\n if path.exists(path.join(self.workspace, 'images')):\n pass\n elif images is not None:\n need_resizing = True\n elif video is not None:\n # will decode video into frames later\n need_decoding = True\n\n # create workspace subdirectories\n self.image_dir = path.join(self.workspace, 'images')\n self.mask_dir = path.join(self.workspace, 'masks')\n self.visualization_dir = path.join(self.workspace, 'visualization')\n self.soft_mask_dir = path.join(self.workspace, 'soft_masks')\n os.makedirs(self.image_dir, exist_ok=True)\n os.makedirs(self.mask_dir, exist_ok=True)\n os.makedirs(self.visualization_dir, exist_ok=True)\n os.makedirs(self.soft_mask_dir, exist_ok=True)\n\n # create all soft mask sub-directories\n for i in range(1, cfg['num_objects'] + 1):\n os.makedirs(path.join(self.soft_mask_dir, f'{i}'), exist_ok=True)\n\n # convert read functions to be buffered\n self.get_image = LRU(self._get_image_unbuffered, maxsize=cfg['buffer_size'])\n self.get_mask = LRU(self._get_mask_unbuffered, maxsize=cfg['buffer_size'])\n\n # extract frames from video\n if need_decoding:\n self._extract_frames(video)\n\n # copy/resize existing images to the workspace\n if need_resizing:\n self._copy_resize_frames(images)\n\n # read all frame names\n self.names = sorted(os.listdir(self.image_dir))\n self.names = [f[:-4] for f in self.names] # remove extensions\n self.length = len(self.names)\n\n assert self.length > 0, f'No images found! Check {self.workspace}/images. Remove folder if necessary.'\n\n print(f'{self.length} images found.')\n\n self.height, self.width = self.get_image(0).shape[:2]\n\n # create the saver threads for saving the masks/visualizations\n self.save_queue = Queue(maxsize=cfg['save_queue_size'])\n self.num_save_threads = cfg['num_save_threads']\n self.save_threads = [\n Thread(target=self.save_thread, args=(self.save_queue, ))\n for _ in range(self.num_save_threads)\n ]\n for t in self.save_threads:\n t.daemon = True\n t.start()\n\n def __del__(self):\n for _ in range(self.num_save_threads):\n self.save_queue.put(None)\n self.save_queue.join()\n for t in self.save_threads:\n t.join()\n\n def save_thread(self, queue: Queue):\n while True:\n args: SaveItem = queue.get()\n if args is None:\n queue.task_done()\n break\n if args.type == 'mask':\n # PIL image\n args.data.save(path.join(self.mask_dir, args.name + '.png'))\n elif args.type.startswith('visualization'):\n # numpy array, save with cv2\n vis_mode = args.type.split('_')[-1]\n data = cv2.cvtColor(args.data, cv2.COLOR_RGB2BGR)\n os.makedirs(path.join(self.visualization_dir, vis_mode), exist_ok=True)\n cv2.imwrite(path.join(self.visualization_dir, vis_mode, args.name + '.jpg'), data)\n elif args.type == 'soft_mask':\n # numpy array, save each channel with cv2\n num_channels = args.data.shape[0]\n # first channel is background -- ignore\n for i in range(1, num_channels):\n data = args.data[i]\n data = (data * 255).astype(np.uint8)\n cv2.imwrite(path.join(self.soft_mask_dir, f'{i}', args.name + '.png'), data)\n else:\n raise NotImplementedError\n queue.task_done()\n\n def _extract_frames(self, video: str):\n cap = cv2.VideoCapture(video)\n frame_index = 0\n print(f'Extracting frames from {video} into {self.image_dir}...')\n with tqdm() as bar:\n while (cap.isOpened()):\n _, frame = cap.read()\n if frame is None:\n break\n h, w = frame.shape[:2]\n if self.max_size > 0 and min(h, w) > self.max_size:\n new_w = (w * self.max_size // min(w, h))\n new_h = (h * self.max_size // min(w, h))\n frame = cv2.resize(frame, dsize=(new_w, new_h), interpolation=cv2.INTER_AREA)\n cv2.imwrite(path.join(self.image_dir, f'{frame_index:07d}.jpg'), frame)\n frame_index += 1\n bar.update()\n print('Done!')\n\n def _copy_resize_frames(self, images: str):\n image_list = os.listdir(images)\n print(f'Copying/resizing frames into {self.image_dir}...')\n for image_name in tqdm(image_list):\n if self.max_size < 0:\n # just copy\n shutil.copy2(path.join(images, image_name), self.image_dir)\n else:\n frame = cv2.imread(path.join(images, image_name))\n h, w = frame.shape[:2]\n if self.max_size > 0 and min(h, w) > self.max_size:\n new_w = (w * self.max_size // min(w, h))\n new_h = (h * self.max_size // min(w, h))\n frame = cv2.resize(frame, dsize=(new_w, new_h), interpolation=cv2.INTER_AREA)\n cv2.imwrite(path.join(self.image_dir, image_name), frame)\n print('Done!')\n\n def add_to_queue_with_warning(self, item: SaveItem):\n if self.save_queue.full():\n print(\n 'The save queue is full! You need more threads or faster IO. Program might pause.')\n self.save_queue.put(item)\n\n def save_mask(self, ti: int, mask: np.ndarray):\n # mask should be uint8 H*W without channels\n assert 0 <= ti < self.length\n assert isinstance(mask, np.ndarray)\n\n mask = Image.fromarray(mask)\n mask.putpalette(self.palette)\n self.invalidate(ti)\n self.add_to_queue_with_warning(SaveItem('mask', mask, self.names[ti]))\n\n def save_visualization(self, ti: int, vis_mode: str, image: np.ndarray):\n # image should be uint8 3*H*W\n assert 0 <= ti < self.length\n assert isinstance(image, np.ndarray)\n\n self.add_to_queue_with_warning(SaveItem(f'visualization_{vis_mode}', image, self.names[ti]))\n\n def save_soft_mask(self, ti: int, prob: np.ndarray):\n # mask should be float (num_objects+1)*H*W np array\n assert 0 <= ti < self.length\n assert isinstance(prob, np.ndarray)\n\n self.add_to_queue_with_warning(SaveItem('soft_mask', prob, self.names[ti]))\n\n def _get_image_unbuffered(self, ti: int):\n # returns H*W*3 uint8 array\n assert 0 <= ti < self.length\n\n image = Image.open(path.join(self.image_dir, self.names[ti] + '.jpg')).convert('RGB')\n image = np.array(image)\n return image\n\n def _get_mask_unbuffered(self, ti: int):\n # returns H*W uint8 array\n assert 0 <= ti < self.length\n\n mask_path = path.join(self.mask_dir, self.names[ti] + '.png')\n if path.exists(mask_path):\n mask = Image.open(mask_path)\n mask = np.array(mask)\n return mask\n else:\n return None\n\n def import_mask(self, file_name: str, size: Optional[Tuple[int, int]] = None):\n # read an mask file and resize it to exactly match the canvas size\n image = Image.open(file_name)\n if size is not None:\n # PIL uses (width, height)\n image = image.resize((size[1], size[0]), resample=Image.Resampling.NEAREST)\n image = np.array(image)\n return image\n\n def import_layer(self, file_name: str, size: Tuple[int, int]):\n # read a RGBA/RGB file and resize it such that the entire layer is visible in the canvas\n # and then pad it to the canvas size (h, w)\n image = Image.open(file_name).convert('RGBA')\n im_w, im_h = image.size\n im_ratio = im_w / im_h\n canvas_ratio = size[1] / size[0]\n if im_ratio < canvas_ratio:\n # fit height\n new_h = size[0]\n new_w = int(new_h * im_ratio)\n else:\n # fit width\n new_w = size[1]\n new_h = int(new_w / im_ratio)\n image = image.resize((new_w, new_h), resample=Image.Resampling.BILINEAR)\n image = np.array(image)\n # padding\n pad_h = (size[0] - new_h) // 2\n pad_w = (size[1] - new_w) // 2\n image = np.pad(image,\n ((pad_h, size[0] - new_h - pad_h), (pad_w, size[1] - new_w - pad_w), (0, 0)),\n mode='constant',\n constant_values=0)\n\n return image\n\n def invalidate(self, ti: int):\n # the image buffer is never invalidated\n self.get_mask.invalidate((ti, ))\n\n def __len__(self):\n return self.length\n\n @property\n def T(self) -> int:\n return self.length\n\n @property\n def h(self) -> int:\n return self.height\n\n @property\n def w(self) -> int:\n return self.width" }, { "identifier": "GUI", "path": "gui/gui.py", "snippet": "class GUI(QWidget):\n def __init__(self, controller, cfg: DictConfig) -> None:\n super().__init__()\n\n # callbacks to be set by the controller\n self.on_mouse_motion_xy = None\n self.click_fn = None\n\n self.controller = controller\n self.cfg = cfg\n self.h = controller.h\n self.w = controller.w\n self.T = controller.T\n\n # set up the window\n self.setWindowTitle(f'Cutie demo: {cfg[\"workspace\"]}')\n self.setGeometry(100, 100, self.w + 200, self.h + 200)\n self.setWindowIcon(QIcon('docs/icon.png'))\n\n # set up some buttons\n self.play_button = QPushButton('Play video')\n self.play_button.clicked.connect(self.on_play_video)\n self.commit_button = QPushButton('Commit to permanent memory')\n self.commit_button.clicked.connect(controller.on_commit)\n self.export_video_button = QPushButton('Export as video')\n self.export_video_button.clicked.connect(controller.on_export_visualization)\n self.export_binary_button = QPushButton('Export binary masks')\n self.export_binary_button.clicked.connect(controller.on_export_binary)\n\n self.forward_run_button = QPushButton('Propagate forward')\n self.forward_run_button.clicked.connect(controller.on_forward_propagation)\n self.forward_run_button.setMinimumWidth(150)\n\n self.backward_run_button = QPushButton('Propagate backward')\n self.backward_run_button.clicked.connect(controller.on_backward_propagation)\n self.backward_run_button.setMinimumWidth(150)\n\n # universal progressbar\n self.progressbar = QProgressBar()\n self.progressbar.setMinimum(0)\n self.progressbar.setMaximum(100)\n self.progressbar.setValue(0)\n self.progressbar.setMinimumWidth(200)\n\n self.reset_frame_button = QPushButton('Reset frame')\n self.reset_frame_button.clicked.connect(controller.on_reset_mask)\n self.reset_object_button = QPushButton('Reset object')\n self.reset_object_button.clicked.connect(controller.on_reset_object)\n\n # set up the LCD\n self.lcd = QTextEdit()\n self.lcd.setReadOnly(True)\n self.lcd.setMaximumHeight(28)\n self.lcd.setMaximumWidth(150)\n self.lcd.setText('{: 5d} / {: 5d}'.format(0, controller.T - 1))\n\n # current object id\n self.object_dial = QSpinBox()\n self.object_dial.setReadOnly(False)\n self.object_dial.setMinimumSize(50, 30)\n self.object_dial.setMinimum(1)\n self.object_dial.setMaximum(controller.num_objects)\n self.object_dial.editingFinished.connect(controller.on_object_dial_change)\n\n self.object_color = QLabel()\n self.object_color.setMinimumSize(100, 30)\n self.object_color.setAlignment(Qt.AlignmentFlag.AlignCenter)\n\n self.frame_name = QLabel()\n self.frame_name.setMinimumSize(100, 30)\n self.frame_name.setAlignment(Qt.AlignmentFlag.AlignLeft)\n\n # timeline slider\n self.tl_slider = QSlider(Qt.Orientation.Horizontal)\n self.tl_slider.valueChanged.connect(controller.on_slider_update)\n self.tl_slider.setMinimum(0)\n self.tl_slider.setMaximum(controller.T - 1)\n self.tl_slider.setValue(0)\n self.tl_slider.setTickPosition(QSlider.TickPosition.TicksBelow)\n self.tl_slider.setTickInterval(1)\n\n # combobox\n self.combo = QComboBox(self)\n self.combo.addItem(\"mask\")\n self.combo.addItem(\"davis\")\n self.combo.addItem(\"fade\")\n self.combo.addItem(\"light\")\n self.combo.addItem(\"popup\")\n self.combo.addItem(\"layer\")\n self.combo.setCurrentText('davis')\n self.combo.currentTextChanged.connect(controller.set_vis_mode)\n\n self.save_visualization_checkbox = QCheckBox(self)\n self.save_visualization_checkbox.toggled.connect(controller.on_save_visualization_toggle)\n self.save_visualization_checkbox.setChecked(False)\n\n self.save_soft_mask_checkbox = QCheckBox(self)\n self.save_soft_mask_checkbox.toggled.connect(controller.on_save_soft_mask_toggle)\n self.save_soft_mask_checkbox.setChecked(False)\n\n # controls for output FPS and bitrate\n self.fps_dial = QSpinBox()\n self.fps_dial.setReadOnly(False)\n self.fps_dial.setMinimumSize(40, 30)\n self.fps_dial.setMinimum(1)\n self.fps_dial.setMaximum(60)\n self.fps_dial.setValue(cfg['output_fps'])\n self.fps_dial.editingFinished.connect(controller.on_fps_dial_change)\n\n self.bitrate_dial = QSpinBox()\n self.bitrate_dial.setReadOnly(False)\n self.bitrate_dial.setMinimumSize(40, 30)\n self.bitrate_dial.setMinimum(1)\n self.bitrate_dial.setMaximum(100)\n self.bitrate_dial.setValue(cfg['output_bitrate'])\n self.bitrate_dial.editingFinished.connect(controller.on_bitrate_dial_change)\n\n # Main canvas -> QLabel\n self.main_canvas = QLabel()\n self.main_canvas.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)\n self.main_canvas.setAlignment(Qt.AlignmentFlag.AlignCenter)\n self.main_canvas.setMinimumSize(100, 100)\n\n self.main_canvas.mousePressEvent = self.on_mouse_press\n self.main_canvas.mouseMoveEvent = self.on_mouse_motion\n self.main_canvas.setMouseTracking(True) # Required for all-time tracking\n self.main_canvas.mouseReleaseEvent = self.on_mouse_release\n\n # clearing memory\n self.clear_all_mem_button = QPushButton('Reset all memory')\n self.clear_all_mem_button.clicked.connect(controller.on_clear_memory)\n self.clear_non_perm_mem_button = QPushButton('Reset non-permanent memory')\n self.clear_non_perm_mem_button.clicked.connect(controller.on_clear_non_permanent_memory)\n\n # displaying memory usage\n self.perm_mem_gauge, self.perm_mem_gauge_layout = create_gauge('Permanent memory size')\n self.work_mem_gauge, self.work_mem_gauge_layout = create_gauge('Working memory size')\n self.long_mem_gauge, self.long_mem_gauge_layout = create_gauge('Long-term memory size')\n self.gpu_mem_gauge, self.gpu_mem_gauge_layout = create_gauge(\n 'GPU mem. (all proc, w/ caching)')\n self.torch_mem_gauge, self.torch_mem_gauge_layout = create_gauge(\n 'GPU mem. (torch, w/o caching)')\n\n # Parameters setting\n self.work_mem_min, self.work_mem_min_layout = create_parameter_box(\n 1, 100, 'Min. working memory frames', callback=controller.on_work_min_change)\n self.work_mem_max, self.work_mem_max_layout = create_parameter_box(\n 2, 100, 'Max. working memory frames', callback=controller.on_work_max_change)\n self.long_mem_max, self.long_mem_max_layout = create_parameter_box(\n 1000,\n 100000,\n 'Max. long-term memory size',\n step=1000,\n callback=controller.update_config)\n self.mem_every_box, self.mem_every_box_layout = create_parameter_box(\n 1, 100, 'Memory frame every (r)', callback=controller.update_config)\n\n # import mask/layer\n self.import_mask_button = QPushButton('Import mask')\n self.import_mask_button.clicked.connect(controller.on_import_mask)\n self.import_layer_button = QPushButton('Import layer')\n self.import_layer_button.clicked.connect(controller.on_import_layer)\n\n # Console on the GUI\n self.console = QPlainTextEdit()\n self.console.setReadOnly(True)\n self.console.setMinimumHeight(100)\n self.console.setMaximumHeight(100)\n\n # Tips for the users\n self.tips = QTextEdit()\n self.tips.setReadOnly(True)\n self.tips.setTextInteractionFlags(Qt.NoTextInteraction)\n self.tips.setSizePolicy(QSizePolicy.Policy.Expanding, QSizePolicy.Policy.Expanding)\n with open('./gui/TIPS.md') as f:\n self.tips.setMarkdown(f.read())\n\n # navigator\n navi = QHBoxLayout()\n\n interact_subbox = QVBoxLayout()\n interact_topbox = QHBoxLayout()\n interact_botbox = QHBoxLayout()\n interact_topbox.setAlignment(Qt.AlignmentFlag.AlignCenter)\n interact_topbox.addWidget(self.lcd)\n interact_topbox.addWidget(self.play_button)\n interact_topbox.addWidget(self.reset_frame_button)\n interact_topbox.addWidget(self.reset_object_button)\n interact_botbox.addWidget(QLabel('Current object ID:'))\n interact_botbox.addWidget(self.object_dial)\n interact_botbox.addWidget(self.object_color)\n interact_botbox.addWidget(self.frame_name)\n interact_subbox.addLayout(interact_topbox)\n interact_subbox.addLayout(interact_botbox)\n interact_botbox.setAlignment(Qt.AlignmentFlag.AlignLeft)\n navi.addLayout(interact_subbox)\n\n apply_fixed_size_policy = lambda x: x.setSizePolicy(QSizePolicy.Policy.Fixed, QSizePolicy.\n Policy.Fixed)\n apply_to_all_children_widget(interact_topbox, apply_fixed_size_policy)\n apply_to_all_children_widget(interact_botbox, apply_fixed_size_policy)\n\n navi.addStretch(1)\n navi.addStretch(1)\n overlay_subbox = QVBoxLayout()\n overlay_topbox = QHBoxLayout()\n overlay_botbox = QHBoxLayout()\n overlay_topbox.setAlignment(Qt.AlignmentFlag.AlignLeft)\n overlay_botbox.setAlignment(Qt.AlignmentFlag.AlignLeft)\n overlay_topbox.addWidget(QLabel('Overlay mode'))\n overlay_topbox.addWidget(self.combo)\n overlay_topbox.addWidget(QLabel('Save soft mask during propagation'))\n overlay_topbox.addWidget(self.save_soft_mask_checkbox)\n overlay_topbox.addWidget(self.export_binary_button)\n overlay_botbox.addWidget(QLabel('Save overlay'))\n overlay_botbox.addWidget(self.save_visualization_checkbox)\n overlay_botbox.addWidget(self.export_video_button)\n overlay_botbox.addWidget(QLabel('Output FPS: '))\n overlay_botbox.addWidget(self.fps_dial)\n overlay_botbox.addWidget(QLabel('Output bitrate (Mbps): '))\n overlay_botbox.addWidget(self.bitrate_dial)\n overlay_subbox.addLayout(overlay_topbox)\n overlay_subbox.addLayout(overlay_botbox)\n navi.addLayout(overlay_subbox)\n apply_to_all_children_widget(overlay_topbox, apply_fixed_size_policy)\n apply_to_all_children_widget(overlay_botbox, apply_fixed_size_policy)\n\n navi.addStretch(1)\n control_subbox = QVBoxLayout()\n control_topbox = QHBoxLayout()\n control_botbox = QHBoxLayout()\n control_topbox.addWidget(self.commit_button)\n control_topbox.addWidget(self.forward_run_button)\n control_topbox.addWidget(self.backward_run_button)\n control_botbox.addWidget(self.progressbar)\n control_subbox.addLayout(control_topbox)\n control_subbox.addLayout(control_botbox)\n navi.addLayout(control_subbox)\n\n # Drawing area main canvas\n draw_area = QHBoxLayout()\n draw_area.addWidget(self.main_canvas, 4)\n\n # right area\n right_area = QVBoxLayout()\n right_area.setAlignment(Qt.AlignmentFlag.AlignBottom)\n right_area.addWidget(self.tips)\n # right_area.addStretch(1)\n\n # Parameters\n right_area.addLayout(self.perm_mem_gauge_layout)\n right_area.addLayout(self.work_mem_gauge_layout)\n right_area.addLayout(self.long_mem_gauge_layout)\n right_area.addLayout(self.gpu_mem_gauge_layout)\n right_area.addLayout(self.torch_mem_gauge_layout)\n right_area.addWidget(self.clear_all_mem_button)\n right_area.addWidget(self.clear_non_perm_mem_button)\n right_area.addLayout(self.work_mem_min_layout)\n right_area.addLayout(self.work_mem_max_layout)\n right_area.addLayout(self.long_mem_max_layout)\n right_area.addLayout(self.mem_every_box_layout)\n\n # import mask/layer\n import_area = QHBoxLayout()\n import_area.setAlignment(Qt.AlignmentFlag.AlignBottom)\n import_area.addWidget(self.import_mask_button)\n import_area.addWidget(self.import_layer_button)\n right_area.addLayout(import_area)\n\n # console\n right_area.addWidget(self.console)\n\n draw_area.addLayout(right_area, 1)\n\n layout = QVBoxLayout()\n layout.addLayout(draw_area)\n layout.addWidget(self.tl_slider)\n layout.addLayout(navi)\n self.setLayout(layout)\n\n # timer to play video\n self.timer = QTimer()\n self.timer.setSingleShot(False)\n self.timer.timeout.connect(controller.on_play_video_timer)\n\n # timer to update GPU usage\n self.gpu_timer = QTimer()\n self.gpu_timer.setSingleShot(False)\n self.gpu_timer.timeout.connect(controller.on_gpu_timer)\n self.gpu_timer.setInterval(2000)\n self.gpu_timer.start()\n\n # Objects shortcuts\n for i in range(1, controller.num_objects + 1):\n QShortcut(QKeySequence(str(i)),\n self).activated.connect(functools.partial(controller.hit_number_key, i))\n QShortcut(QKeySequence(f\"Ctrl+{i}\"),\n self).activated.connect(functools.partial(controller.hit_number_key, i))\n\n # <- and -> shortcuts\n QShortcut(QKeySequence(Qt.Key.Key_Left), self).activated.connect(controller.on_prev_frame)\n QShortcut(QKeySequence(Qt.Key.Key_Right), self).activated.connect(controller.on_next_frame)\n\n def resizeEvent(self, event):\n self.controller.show_current_frame()\n\n def text(self, text):\n self.console.moveCursor(QTextCursor.MoveOperation.End)\n self.console.insertPlainText(text + '\\n')\n\n def set_canvas(self, image):\n height, width, channel = image.shape\n bytesPerLine = 3 * width\n\n qImg = QImage(image.data, width, height, bytesPerLine, QImage.Format.Format_RGB888)\n self.main_canvas.setPixmap(\n QPixmap(\n qImg.scaled(self.main_canvas.size(), Qt.AspectRatioMode.KeepAspectRatio,\n Qt.TransformationMode.FastTransformation)))\n\n self.main_canvas_size = self.main_canvas.size()\n self.image_size = qImg.size()\n\n def update_slider(self, value):\n self.lcd.setText('{: 3d} / {: 3d}'.format(value, self.controller.T - 1))\n self.tl_slider.setValue(value)\n\n def pixel_pos_to_image_pos(self, x, y):\n # Un-scale and un-pad the label coordinates into image coordinates\n oh, ow = self.image_size.height(), self.image_size.width()\n nh, nw = self.main_canvas_size.height(), self.main_canvas_size.width()\n\n h_ratio = nh / oh\n w_ratio = nw / ow\n dominate_ratio = min(h_ratio, w_ratio)\n\n # Solve scale\n x /= dominate_ratio\n y /= dominate_ratio\n\n # Solve padding\n fh, fw = nh / dominate_ratio, nw / dominate_ratio\n x -= (fw - ow) / 2\n y -= (fh - oh) / 2\n\n return x, y\n\n def is_pos_out_of_bound(self, x, y):\n x, y = self.pixel_pos_to_image_pos(x, y)\n\n out_of_bound = ((x < 0) or (y < 0) or (x > self.w - 1) or (y > self.h - 1))\n\n return out_of_bound\n\n def get_scaled_pos(self, x, y):\n x, y = self.pixel_pos_to_image_pos(x, y)\n\n x = max(0, min(self.w - 1, x))\n y = max(0, min(self.h - 1, y))\n\n return x, y\n\n def forward_propagation_start(self):\n self.backward_run_button.setEnabled(False)\n self.forward_run_button.setText('Pause propagation')\n\n def backward_propagation_start(self):\n self.forward_run_button.setEnabled(False)\n self.backward_run_button.setText('Pause propagation')\n\n def pause_propagation(self):\n self.forward_run_button.setEnabled(True)\n self.backward_run_button.setEnabled(True)\n self.clear_all_mem_button.setEnabled(True)\n self.clear_non_perm_mem_button.setEnabled(True)\n self.forward_run_button.setText('Propagate forward')\n self.backward_run_button.setText('propagate backward')\n self.tl_slider.setEnabled(True)\n\n def process_events(self):\n QApplication.processEvents()\n\n def on_mouse_press(self, event):\n if self.is_pos_out_of_bound(event.position().x(), event.position().y()):\n return\n\n ex, ey = self.get_scaled_pos(event.position().x(), event.position().y())\n if event.button() == Qt.MouseButton.LeftButton:\n action = 'left'\n elif event.button() == Qt.MouseButton.RightButton:\n action = 'right'\n elif event.button() == Qt.MouseButton.MiddleButton:\n action = 'middle'\n\n self.click_fn(action, ex, ey)\n\n def on_mouse_motion(self, event):\n ex, ey = self.get_scaled_pos(event.position().x(), event.position().y())\n self.on_mouse_motion_xy(ex, ey)\n\n def on_mouse_release(self, event):\n pass\n\n def on_play_video(self):\n if self.timer.isActive():\n self.timer.stop()\n self.play_button.setText('Play video')\n else:\n self.timer.start(1000 // 30)\n self.play_button.setText('Stop video')\n\n def open_file(self, prompt):\n options = QFileDialog.Options()\n file_name, _ = QFileDialog.getOpenFileName(self,\n prompt,\n \"\",\n \"Image files (*)\",\n options=options)\n return file_name\n\n def set_object_color(self, object_id: int):\n r, g, b = davis_palette_np[object_id]\n rgb = f'rgb({r},{g},{b})'\n self.object_color.setStyleSheet('QLabel {background: ' + rgb + ';}')\n self.object_color.setText(f'{object_id}')\n\n def progressbar_update(self, progress: float):\n self.progressbar.setValue(int(progress * 100))\n self.process_events()" }, { "identifier": "ClickController", "path": "gui/click_controller.py", "snippet": "class ClickController:\n def __init__(self, checkpoint_path: str, device: str = 'cuda', max_size: int = 800):\n model = utils.load_is_model(checkpoint_path, device, cpu_dist_maps=True)\n\n # Predictor params\n zoomin_params = {\n 'skip_clicks': 1,\n 'target_size': 480,\n 'expansion_ratio': 1.4,\n }\n\n predictor_params = {\n 'brs_mode': 'f-BRS-B',\n # 'brs_mode': 'NoBRS',\n 'prob_thresh': 0.5,\n 'zoom_in_params': zoomin_params,\n 'predictor_params': {\n 'net_clicks_limit': 8,\n 'max_size': max_size,\n },\n 'brs_opt_func_params': {\n 'min_iou_diff': 1e-3\n },\n 'lbfgs_params': {\n 'maxfun': 20\n },\n 'with_flip': True,\n }\n\n self.controller = InteractiveController(model, device, predictor_params)\n self.anchored = False\n self.device = device\n\n def unanchor(self):\n self.anchored = False\n\n def interact(self, image: torch.Tensor, x: int, y: int, is_positive: bool,\n prev_mask: torch.Tensor):\n if not self.anchored:\n image = image.to(self.device, non_blocking=True)\n self.controller.set_image(image)\n self.controller.reset_predictor()\n self.anchored = True\n\n self.controller.add_click(x, y, is_positive, prev_mask=prev_mask)\n # return self.controller.result_mask\n return self.controller.probs_history[-1][1]\n # return (self.controller.probs_history[-1][1] > 0.5).float()\n\n def undo(self):\n self.controller.undo_click()\n if len(self.controller.probs_history) == 0:\n return None\n else:\n return (self.controller.probs_history[-1][1] > 0.5).float()" }, { "identifier": "PropagationReader", "path": "gui/reader.py", "snippet": "class PropagationReader(Dataset):\n def __init__(self, res_man: ResourceManager, start_ti: int, direction: Literal['forward',\n 'backward']):\n self.res_man = res_man\n self.start_ti = start_ti\n self.direction = direction\n\n # skip the first frame\n if self.direction == 'forward':\n self.start_ti += 1\n self.length = self.res_man.T - self.start_ti\n elif self.direction == 'backward':\n self.start_ti -= 1\n self.length = self.start_ti + 1\n else:\n raise NotImplementedError\n\n self.to_tensor = ToTensor()\n\n def __getitem__(self, index: int):\n if self.direction == 'forward':\n ti = self.start_ti + index\n elif self.direction == 'backward':\n ti = self.start_ti - index\n else:\n raise NotImplementedError\n\n assert 0 <= ti < self.res_man.T\n\n image = self.res_man.get_image(ti)\n image_torch = self.to_tensor(image)\n\n return image, image_torch\n\n def __len__(self):\n return self.length" }, { "identifier": "get_data_loader", "path": "gui/reader.py", "snippet": "def get_data_loader(dataset: Dataset, num_workers: int):\n if 'linux' in sys.platform:\n loader = DataLoader(dataset,\n batch_size=None,\n shuffle=False,\n num_workers=num_workers,\n collate_fn=lambda x: x)\n else:\n print(f'Non-linux platform {sys.platform} detected, using single-threaded dataloader')\n loader = DataLoader(dataset,\n batch_size=None,\n shuffle=False,\n num_workers=0,\n collate_fn=lambda x: x)\n return loader" }, { "identifier": "convert_frames_to_video", "path": "gui/exporter.py", "snippet": "def convert_frames_to_video(\n image_folder: str,\n output_path: str,\n fps: int = 24,\n bitrate: int = 1, # in Mbps\n progress_callback=None) -> None:\n images = [img for img in sorted(os.listdir(image_folder)) if img.endswith(\".jpg\")]\n frame = cv2.imread(os.path.join(image_folder, images[0]))\n height, width, layers = frame.shape\n\n output = av.open(output_path, mode=\"w\")\n\n stream = output.add_stream(\"h264\", rate=fps)\n stream.width = width\n stream.height = height\n stream.pix_fmt = \"yuv420p\"\n stream.bit_rate = bitrate * (10**7)\n\n for i, img_path in enumerate(images):\n img = cv2.imread(os.path.join(image_folder, img_path))\n frame = av.VideoFrame.from_ndarray(img, format='bgr24')\n packet = stream.encode(frame)\n output.mux(packet)\n\n if progress_callback is not None and i % 10 == 0:\n progress_callback(i / len(images))\n\n # flush\n packet = stream.encode(None)\n output.mux(packet)\n\n output.close()" }, { "identifier": "convert_mask_to_binary", "path": "gui/exporter.py", "snippet": "def convert_mask_to_binary(mask_folder: str,\n output_path: str,\n target_objects: List[int],\n progress_callback=None) -> None:\n masks = [img for img in sorted(os.listdir(mask_folder)) if img.endswith(\".png\")]\n\n for i, mask_path in enumerate(masks):\n mask = Image.open(os.path.join(mask_folder, mask_path))\n mask = np.array(mask)\n mask = np.where(np.isin(mask, target_objects), 255, 0)\n cv2.imwrite(os.path.join(output_path, mask_path), mask)\n\n if progress_callback is not None and i % 10 == 0:\n progress_callback(i / len(masks))" }, { "identifier": "download_models_if_needed", "path": "scripts/download_models.py", "snippet": "def download_models_if_needed():\n os.makedirs('weights', exist_ok=True)\n for link, md5 in _links:\n # download file if not exists with a progressbar\n filename = link.split('/')[-1]\n if not os.path.exists(os.path.join('weights', filename)) or hashlib.md5(open(os.path.join('weights', filename), 'rb').read()).hexdigest() != md5:\n print(f'Downloading {filename}...')\n r = requests.get(link, stream=True)\n total_size = int(r.headers.get('content-length', 0))\n block_size = 1024\n t = tqdm(total=total_size, unit='iB', unit_scale=True)\n with open(os.path.join('weights', filename), 'wb') as f:\n for data in r.iter_content(block_size):\n t.update(len(data))\n f.write(data)\n t.close()\n if total_size != 0 and t.n != total_size:\n raise RuntimeError('Error while downloading %s' % filename)" } ]
import os import logging import cv2 import torch import numpy as np from os import path from typing import Literal from torch import mps from torch import autocast from torchvision.transforms.functional import to_tensor from omegaconf import DictConfig, open_dict from cutie.model.cutie import CUTIE from cutie.inference.inference_core import InferenceCore from gui.interaction import * from gui.interactive_utils import * from gui.resource_manager import ResourceManager from gui.gui import GUI from gui.click_controller import ClickController from gui.reader import PropagationReader, get_data_loader from gui.exporter import convert_frames_to_video, convert_mask_to_binary from scripts.download_models import download_models_if_needed
17,325
if loaded_mask is None: self.curr_mask.fill(0) else: self.curr_mask = loaded_mask.copy() self.curr_prob = None def convert_current_image_mask_torch(self, no_mask: bool = False): if self.curr_image_torch is None: self.curr_image_torch = to_tensor(self.curr_image_np).to(self.device, non_blocking=True) if self.curr_prob is None and not no_mask: self.curr_prob = index_numpy_to_one_hot_torch(self.curr_mask, self.num_objects + 1).to( self.device, non_blocking=True) def compose_current_im(self): self.vis_image = get_visualization(self.vis_mode, self.curr_image_np, self.curr_mask, self.overlay_layer, self.vis_target_objects) def update_canvas(self): self.gui.set_canvas(self.vis_image) def update_current_image_fast(self): # fast path, uses gpu. Changes the image in-place to avoid copying # thus current_image_torch must be voided afterwards self.vis_image = get_visualization_torch(self.vis_mode, self.curr_image_torch, self.curr_prob, self.overlay_layer_torch, self.vis_target_objects) self.curr_image_torch = None self.vis_image = np.ascontiguousarray(self.vis_image) if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) if self.save_soft_mask: self.res_man.save_soft_mask(self.curr_ti, self.curr_prob.cpu().numpy()) self.gui.set_canvas(self.vis_image) def show_current_frame(self, fast: bool = False): # Re-compute overlay and show the image if fast: self.update_current_image_fast() else: self.compose_current_im() if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) self.update_canvas() self.gui.update_slider(self.curr_ti) self.gui.frame_name.setText(self.res_man.names[self.curr_ti] + '.jpg') def set_vis_mode(self): self.vis_mode = self.gui.combo.currentText() self.show_current_frame() def save_current_mask(self): # save mask to hard disk self.res_man.save_mask(self.curr_ti, self.curr_mask) def on_slider_update(self): # if we are propagating, the on_run function will take care of everything # don't do duplicate work here self.curr_ti = self.gui.tl_slider.value() if not self.propagating: # with self.vis_cond: # self.vis_cond.notify() if self.curr_frame_dirty: self.save_current_mask() self.curr_frame_dirty = False self.reset_this_interaction() self.curr_ti = self.gui.tl_slider.value() self.load_current_image_mask() self.show_current_frame() def on_forward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_next_frame self.gui.forward_propagation_start() self.propagate_direction = 'forward' self.on_propagate() def on_backward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_prev_frame self.gui.backward_propagation_start() self.propagate_direction = 'backward' self.on_propagate() def on_pause(self): self.propagating = False self.gui.text(f'Propagation stopped at t={self.curr_ti}.') self.gui.pause_propagation() def on_propagate(self): # start to propagate with autocast(self.device, enabled=(self.amp and self.device == 'cuda')): self.convert_current_image_mask_torch() self.gui.text(f'Propagation started at t={self.curr_ti}.') self.processor.clear_sensory_memory() self.curr_prob = self.processor.step(self.curr_image_torch, self.curr_prob[1:], idx_mask=False) self.curr_mask = torch_prob_to_numpy_mask(self.curr_prob) # clear self.interacted_prob = None self.reset_this_interaction() self.show_current_frame(fast=True) self.propagating = True self.gui.clear_all_mem_button.setEnabled(False) self.gui.clear_non_perm_mem_button.setEnabled(False) self.gui.tl_slider.setEnabled(False)
# fix conflicts between qt5 and cv2 os.environ.pop("QT_QPA_PLATFORM_PLUGIN_PATH") try: except: print('torch.MPS not available.') log = logging.getLogger() class MainController(): def __init__(self, cfg: DictConfig) -> None: super().__init__() self.initialized = False # setting up the workspace if cfg["workspace"] is None: if cfg["images"] is not None: basename = path.basename(cfg["images"]) elif cfg["video"] is not None: basename = path.basename(cfg["video"])[:-4] else: raise NotImplementedError('Either images, video, or workspace has to be specified') cfg["workspace"] = path.join(cfg['workspace_root'], basename) # reading arguments self.cfg = cfg self.num_objects = cfg['num_objects'] self.device = cfg['device'] self.amp = cfg['amp'] # initializing the network(s) self.initialize_networks() # main components self.res_man = ResourceManager(cfg) self.processor = InferenceCore(self.cutie, self.cfg) self.gui = GUI(self, self.cfg) # initialize control info self.length: int = self.res_man.length self.interaction: Interaction = None self.interaction_type: str = 'Click' self.curr_ti: int = 0 self.curr_object: int = 1 self.propagating: bool = False self.propagate_direction: Literal['forward', 'backward', 'none'] = 'none' self.last_ex = self.last_ey = 0 # current frame info self.curr_frame_dirty: bool = False self.curr_image_np: np.ndarray = np.zeros((self.h, self.w, 3), dtype=np.uint8) self.curr_image_torch: torch.Tensor = None self.curr_mask: np.ndarray = np.zeros((self.h, self.w), dtype=np.uint8) self.curr_prob: torch.Tensor = torch.zeros((self.num_objects + 1, self.h, self.w), dtype=torch.float).to(self.device) self.curr_prob[0] = 1 # visualization info self.vis_mode: str = 'davis' self.vis_image: np.ndarray = None self.save_visualization: bool = False self.save_soft_mask: bool = False self.interacted_prob: torch.Tensor = None self.overlay_layer: np.ndarray = None self.overlay_layer_torch: torch.Tensor = None # the object id used for popup/layer overlay self.vis_target_objects = list(range(1, self.num_objects + 1)) self.load_current_image_mask() self.show_current_frame() # initialize stuff self.update_memory_gauges() self.update_gpu_gauges() self.gui.work_mem_min.setValue(self.processor.memory.min_mem_frames) self.gui.work_mem_max.setValue(self.processor.memory.max_mem_frames) self.gui.long_mem_max.setValue(self.processor.memory.max_long_tokens) self.gui.mem_every_box.setValue(self.processor.mem_every) # for exporting videos self.output_fps = cfg['output_fps'] self.output_bitrate = cfg['output_bitrate'] # set callbacks self.gui.on_mouse_motion_xy = self.on_mouse_motion_xy self.gui.click_fn = self.click_fn self.gui.show() self.gui.text('Initialized.') self.initialized = True # try to load the default overlay self._try_load_layer('./docs/uiuc.png') self.gui.set_object_color(self.curr_object) self.update_config() def initialize_networks(self) -> None: download_models_if_needed() self.cutie = CUTIE(self.cfg).eval().to(self.device) model_weights = torch.load(self.cfg.weights, map_location=self.device) self.cutie.load_weights(model_weights) self.click_ctrl = ClickController(self.cfg.ritm_weights, device=self.device) def hit_number_key(self, number: int): if number == self.curr_object: return self.curr_object = number self.gui.object_dial.setValue(number) if self.click_ctrl is not None: self.click_ctrl.unanchor() self.gui.text(f'Current object changed to {number}.') self.gui.set_object_color(number) self.show_current_frame() def click_fn(self, action: Literal['left', 'right', 'middle'], x: int, y: int): if self.propagating: return last_interaction = self.interaction new_interaction = None with autocast(self.device, enabled=(self.amp and self.device == 'cuda')): if action in ['left', 'right']: # left: positive click # right: negative click self.convert_current_image_mask_torch() image = self.curr_image_torch if (last_interaction is None or last_interaction.tar_obj != self.curr_object): # create new interaction is needed self.complete_interaction() self.click_ctrl.unanchor() new_interaction = ClickInteraction(image, self.curr_prob, (self.h, self.w), self.click_ctrl, self.curr_object) if new_interaction is not None: self.interaction = new_interaction self.interaction.push_point(x, y, is_neg=(action == 'right')) self.interacted_prob = self.interaction.predict().to(self.device, non_blocking=True) self.update_interacted_mask() self.update_gpu_gauges() elif action == 'middle': # middle: select a new visualization object target_object = self.curr_mask[int(y), int(x)] if target_object in self.vis_target_objects: self.vis_target_objects.remove(target_object) else: self.vis_target_objects.append(target_object) self.gui.text(f'Overlay target(s) changed to {self.vis_target_objects}') self.show_current_frame() return else: raise NotImplementedError def load_current_image_mask(self, no_mask: bool = False): self.curr_image_np = self.res_man.get_image(self.curr_ti) self.curr_image_torch = None if not no_mask: loaded_mask = self.res_man.get_mask(self.curr_ti) if loaded_mask is None: self.curr_mask.fill(0) else: self.curr_mask = loaded_mask.copy() self.curr_prob = None def convert_current_image_mask_torch(self, no_mask: bool = False): if self.curr_image_torch is None: self.curr_image_torch = to_tensor(self.curr_image_np).to(self.device, non_blocking=True) if self.curr_prob is None and not no_mask: self.curr_prob = index_numpy_to_one_hot_torch(self.curr_mask, self.num_objects + 1).to( self.device, non_blocking=True) def compose_current_im(self): self.vis_image = get_visualization(self.vis_mode, self.curr_image_np, self.curr_mask, self.overlay_layer, self.vis_target_objects) def update_canvas(self): self.gui.set_canvas(self.vis_image) def update_current_image_fast(self): # fast path, uses gpu. Changes the image in-place to avoid copying # thus current_image_torch must be voided afterwards self.vis_image = get_visualization_torch(self.vis_mode, self.curr_image_torch, self.curr_prob, self.overlay_layer_torch, self.vis_target_objects) self.curr_image_torch = None self.vis_image = np.ascontiguousarray(self.vis_image) if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) if self.save_soft_mask: self.res_man.save_soft_mask(self.curr_ti, self.curr_prob.cpu().numpy()) self.gui.set_canvas(self.vis_image) def show_current_frame(self, fast: bool = False): # Re-compute overlay and show the image if fast: self.update_current_image_fast() else: self.compose_current_im() if self.save_visualization: self.res_man.save_visualization(self.curr_ti, self.vis_mode, self.vis_image) self.update_canvas() self.gui.update_slider(self.curr_ti) self.gui.frame_name.setText(self.res_man.names[self.curr_ti] + '.jpg') def set_vis_mode(self): self.vis_mode = self.gui.combo.currentText() self.show_current_frame() def save_current_mask(self): # save mask to hard disk self.res_man.save_mask(self.curr_ti, self.curr_mask) def on_slider_update(self): # if we are propagating, the on_run function will take care of everything # don't do duplicate work here self.curr_ti = self.gui.tl_slider.value() if not self.propagating: # with self.vis_cond: # self.vis_cond.notify() if self.curr_frame_dirty: self.save_current_mask() self.curr_frame_dirty = False self.reset_this_interaction() self.curr_ti = self.gui.tl_slider.value() self.load_current_image_mask() self.show_current_frame() def on_forward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_next_frame self.gui.forward_propagation_start() self.propagate_direction = 'forward' self.on_propagate() def on_backward_propagation(self): if self.propagating: # acts as a pause button self.propagating = False self.propagate_direction = 'none' else: self.propagate_fn = self.on_prev_frame self.gui.backward_propagation_start() self.propagate_direction = 'backward' self.on_propagate() def on_pause(self): self.propagating = False self.gui.text(f'Propagation stopped at t={self.curr_ti}.') self.gui.pause_propagation() def on_propagate(self): # start to propagate with autocast(self.device, enabled=(self.amp and self.device == 'cuda')): self.convert_current_image_mask_torch() self.gui.text(f'Propagation started at t={self.curr_ti}.') self.processor.clear_sensory_memory() self.curr_prob = self.processor.step(self.curr_image_torch, self.curr_prob[1:], idx_mask=False) self.curr_mask = torch_prob_to_numpy_mask(self.curr_prob) # clear self.interacted_prob = None self.reset_this_interaction() self.show_current_frame(fast=True) self.propagating = True self.gui.clear_all_mem_button.setEnabled(False) self.gui.clear_non_perm_mem_button.setEnabled(False) self.gui.tl_slider.setEnabled(False)
dataset = PropagationReader(self.res_man, self.curr_ti, self.propagate_direction)
5
2023-10-19 17:49:24+00:00
24k
ZhengyiLuo/PerpetualHumanoidControl
scripts/vis/vis_smpl_o3d_multi.py
[ { "identifier": "SMPL_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPL_Parser(_SMPL):\n\n def __init__(self, create_transl=False, *args, **kwargs):\n \"\"\"SMPL model constructor\n Parameters\n ----------\n model_path: str\n The path to the folder or to the file where the model\n parameters are stored\n data_struct: Strct\n A struct object. If given, then the parameters of the model are\n read from the object. Otherwise, the model tries to read the\n parameters from the given `model_path`. (default = None)\n create_global_orient: bool, optional\n Flag for creating a member variable for the global orientation\n of the body. (default = True)\n global_orient: torch.tensor, optional, Bx3\n The default value for the global orientation variable.\n (default = None)\n create_body_pose: bool, optional\n Flag for creating a member variable for the pose of the body.\n (default = True)\n body_pose: torch.tensor, optional, Bx(Body Joints * 3)\n The default value for the body pose variable.\n (default = None)\n create_betas: bool, optional\n Flag for creating a member variable for the shape space\n (default = True).\n betas: torch.tensor, optional, Bx10\n The default value for the shape member variable.\n (default = None)\n create_transl: bool, optional\n Flag for creating a member variable for the translation\n of the body. (default = True)\n transl: torch.tensor, optional, Bx3\n The default value for the transl variable.\n (default = None)\n dtype: torch.dtype, optional\n The data type for the created variables\n batch_size: int, optional\n The batch size used for creating the member variables\n joint_mapper: object, optional\n An object that re-maps the joints. Useful if one wants to\n re-order the SMPL joints to some other convention (e.g. MSCOCO)\n (default = None)\n gender: str, optional\n Which gender to load\n vertex_ids: dict, optional\n A dictionary containing the indices of the extra vertices that\n will be selected\n \"\"\"\n super(SMPL_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPL_BONE_ORDER_NAMES\n\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"x\", \"y\", \"z\"] for x in self.joint_names}\n self.joint_range = {x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi]) for x in self.joint_names}\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n self.joint_range[\"L_Shoulder\"] *= 4\n self.joint_range[\"R_Shoulder\"] *= 4\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n\n # self.contype = {\n # 3: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 1: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n # self.conaffinity = {\n # 1: ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee','R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Neck', 'Head','L_Thorax', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Elbow', 'R_Wrist', 'R_Hand'],\n # 3: ['Chest', \"L_Shoulder\", \"R_Shoulder\"]\n # }\n\n self.zero_pose = torch.zeros(1, 72).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPL_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 72\n \"\"\"\n if pose.shape[1] != 72:\n pose = pose.reshape(-1, 72)\n\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n if th_betas.shape[-1] == 16:\n th_betas = th_betas[:, :10]\n\n batch_size = pose.shape[0]\n\n smpl_output = self.forward(\n betas=th_betas,\n transl=th_trans,\n body_pose=pose[:, 3:],\n global_orient=pose[:, :3],\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints[:, :24]\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, zero_pose=None, betas=torch.zeros(1, 10).float()):\n with torch.no_grad():\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = Jtr.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n joint_names = self.joint_names\n joint_pos = Jtr[0].numpy()\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents)}\n parents_dict = {joint_names[i]: joint_names[parents[i]] for i in range(len(joint_names))}\n channels = [\"z\", \"y\", \"x\"]\n skin_weights = self.lbs_weights.numpy()\n return (verts[0], jts_np[0], skin_weights, self.joint_names, joint_offsets, parents_dict, channels, self.joint_range)\n\n def get_mesh_offsets(self, zero_pose=None, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr[0].numpy()\n joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents)}\n joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents)}\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )\n\n def get_mesh_offsets_batch(self, betas=torch.zeros(1, 10), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose.repeat(betas.shape[0], 1), th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n\n joint_pos = Jtr\n joint_offsets = {joint_names[c]: (joint_pos[:, c] - joint_pos[:, p]) if c > 0 else joint_pos[:, c] for c, p in enumerate(smpl_joint_parents)}\n joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents)}\n\n skin_weights = self.lbs_weights\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLH_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLH_Parser(_SMPLH):\n\n def __init__(self, *args, **kwargs):\n super(SMPLH_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi]) for x in self.joint_names}\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLH_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n L_hand_pose=pose[:, 66:111],\n R_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # joints = smpl_output.joints[:,JOINST_TO_USE]\n return vertices, joints\n\n def get_offsets(self, betas=torch.zeros(1, 16).float()):\n with torch.no_grad():\n verts, jts = self.get_joints_verts(self.zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = jts.detach().cpu().numpy()\n\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n names_smpl = self.joint_names\n offset_smpl_dict = {names_smpl[i]: offsets_smpl[i] for i in range(len(names_smpl))}\n parents_dict = {names_smpl[i]: names_smpl[parents[i]] for i in range(len(names_smpl))}\n parents_dict[\"Hips\"] = \"None\"\n channels = [\"z\", \"y\", \"x\"]\n\n return offset_smpl_dict, parents_dict, channels\n\n def get_mesh_offsets(self, betas=torch.zeros(1, 16), flatfoot=False):\n with torch.no_grad():\n joint_names = self.joint_names\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n\n verts_np = verts.detach().cpu().numpy()\n verts = verts_np[0]\n\n if flatfoot:\n feet_subset = verts[:, 1] < np.min(verts[:, 1]) + 0.01\n verts[feet_subset, 1] = np.mean(verts[feet_subset][:, 1])\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents)}\n joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents)}\n\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()\n return (\n verts,\n joint_pos,\n skin_weights,\n joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPLX_Parser", "path": "uhc/smpllib/smpl_parser.py", "snippet": "class SMPLX_Parser(_SMPLX):\n\n def __init__(self, *args, **kwargs):\n super(SMPLX_Parser, self).__init__(*args, **kwargs)\n self.device = next(self.parameters()).device\n self.joint_names = SMPLH_BONE_ORDER_NAMES\n self.joint_axes = {x: np.identity(3) for x in self.joint_names}\n self.joint_dofs = {x: [\"z\", \"y\", \"x\"] for x in self.joint_names}\n self.joint_range = {x: np.hstack([np.ones([3, 1]) * -np.pi, np.ones([3, 1]) * np.pi]) for x in self.joint_names}\n self.joint_range[\"L_Elbow\"] *= 4\n self.joint_range[\"R_Elbow\"] *= 4\n # import ipdb\n # ipdb.set_trace()\n\n self.contype = {1: self.joint_names}\n self.conaffinity = {1: self.joint_names}\n self.zero_pose = torch.zeros(1, 156).float()\n self.joint_to_use = [SMPLX_BONE_ORDER_NAMES.index(i) for i in SMPLH_BONE_ORDER_NAMES]\n self.parents_to_use = np.concatenate([np.arange(0, 22), np.arange(25, 55)])\n\n def forward(self, *args, **kwargs):\n smpl_output = super(SMPLX_Parser, self).forward(*args, **kwargs)\n return smpl_output\n\n def get_joints_verts(self, pose, th_betas=None, th_trans=None):\n \"\"\"\n Pose should be batch_size x 156\n \"\"\"\n\n if pose.shape[1] != 156:\n pose = pose.reshape(-1, 156)\n pose = pose.float()\n if th_betas is not None:\n th_betas = th_betas.float()\n\n batch_size = pose.shape[0]\n smpl_output = self.forward(\n body_pose=pose[:, 3:66],\n global_orient=pose[:, :3],\n left_hand_pose=pose[:, 66:111],\n right_hand_pose=pose[:, 111:156],\n betas=th_betas,\n transl=th_trans,\n )\n vertices = smpl_output.vertices\n joints = smpl_output.joints\n # return vertices, joints\n return vertices, joints\n \n \n\n def get_offsets(self, v_template=None, zero_pose=None, betas=torch.zeros(1, 26).float()):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n if zero_pose is None:\n verts, Jtr = self.get_joints_verts(self.zero_pose, th_betas=betas)\n else:\n verts, Jtr = self.get_joints_verts(zero_pose, th_betas=betas)\n verts_np = verts.detach().cpu().numpy()\n jts_np = Jtr.detach().cpu().numpy()\n parents = self.parents.cpu().numpy()\n offsets_smpl = [np.array([0, 0, 0])]\n for i in range(1, len(parents)):\n p_id = parents[i]\n p3d = jts_np[0, p_id]\n curr_3d = jts_np[0, i]\n offset_curr = curr_3d - p3d\n offsets_smpl.append(offset_curr)\n offsets_smpl = np.array(offsets_smpl)\n joint_names = self.joint_names\n joint_pos = Jtr[0].numpy()\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents)}\n parents_dict = {joint_names[i]: joint_names[parents[i]] for i in range(len(joint_names))}\n channels = [\"z\", \"y\", \"x\"]\n skin_weights = self.lbs_weights.numpy()\n return (verts[0], jts_np[0], skin_weights, self.joint_names, joint_offsets, parents_dict, channels, self.joint_range)\n\n def get_mesh_offsets(self, v_template=None):\n if not v_template is None:\n self.v_template = v_template\n with torch.no_grad():\n # joint_names = self.joint_names\n joint_names = SMPLX_BONE_ORDER_NAMES\n verts, Jtr = self.get_joints_verts(self.zero_pose)\n\n smpl_joint_parents = self.parents.cpu().numpy()\n joint_pos = Jtr[0].numpy()\n # print(\n # joint_pos.shape,\n # smpl_joint_parents.shape,\n # len(self.parents_to_use),\n # self.parents.cpu().numpy().shape,\n # )\n joint_offsets = {joint_names[c]: (joint_pos[c] - joint_pos[p]) if c > 0 else joint_pos[c] for c, p in enumerate(smpl_joint_parents) if joint_names[c] in self.joint_names}\n joint_parents = {x: joint_names[i] if i >= 0 else None for x, i in zip(joint_names, smpl_joint_parents) if joint_names[i] in self.joint_names}\n\n verts = verts[0].numpy()\n # skin_weights = smpl_layer.th_weights.numpy()\n skin_weights = self.lbs_weights.numpy()[:, self.parents_to_use]\n return (\n verts,\n joint_pos,\n skin_weights,\n self.joint_names,\n joint_offsets,\n joint_parents,\n self.joint_axes,\n self.joint_dofs,\n self.joint_range,\n self.contype,\n self.conaffinity,\n )" }, { "identifier": "SMPL_BONE_ORDER_NAMES", "path": "uhc/smpllib/smpl_mujoco.py", "snippet": "class SMPLConverter:\n def __init__(self, model, new_model, smpl_model=\"smpl\"):\n def qpos_smpl_2_new(self, qpos):\n def qvel_smpl_2_new(self, qpvel):\n def qpos_new_2_smpl(self, qpos):\n def qvel_new_2_smpl(self, qvel):\n def jpos_new_2_smpl(self, jpos):\n def get_new_qpos_lim(self):\n def get_new_qvel_lim(self):\n def get_new_body_lim(self):\n def get_new_diff_weight(self):\n def get_new_jkp(self):\n def get_new_jkd(self):\n def get_new_a_scale(self):\n def get_new_torque_limit(self):\ndef smplh_to_smpl(pose):\ndef smpl_to_smplh(pose):\ndef smpl_to_qpose(\n pose,\n mj_model,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef smpl_to_qpose_multi(\n pose,\n offset,\n mujoco_body_order,\n num_people=1,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef smpl_to_qpose_torch(\n pose,\n mj_model,\n trans=None,\n normalize=False,\n random_root=False,\n count_offset=True,\n use_quat=False,\n euler_order=\"ZYX\",\n model=\"smpl\",\n):\ndef qpos_to_smpl(qpos, mj_model, smpl_model=\"smpl\"):\ndef qpos_to_smpl_torch(qpos, mj_model, smpl_model=\"smpl\"):\ndef smpl_6d_to_qpose(full_pose, model, normalize=False):\ndef normalize_smpl_pose(pose_aa, trans=None, random_root=False):" }, { "identifier": "SkeletonTree", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonTree(Serializable):\n \"\"\"\n A skeleton tree gives a complete description of a rigid skeleton. It describes a tree structure\n over a list of nodes with their names indicated by strings. Each edge in the tree has a local\n translation associated with it which describes the distance between the two nodes that it\n connects. \n\n Basic Usage:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> t\n SkeletonTree(\n node_names=['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot'],\n parent_indices=tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11]),\n local_translation=tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n )\n >>> t.node_names\n ['torso', 'front_left_leg', 'aux_1', 'front_left_foot', 'front_right_leg', 'aux_2', 'front_right_foot', 'left_back_leg', 'aux_3', 'left_back_foot', 'right_back_leg', 'aux_4', 'right_back_foot']\n >>> t.parent_indices\n tensor([-1, 0, 1, 2, 0, 4, 5, 0, 7, 8, 0, 10, 11])\n >>> t.local_translation\n tensor([[ 0.0000, 0.0000, 0.7500],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [-0.2000, 0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [-0.2000, -0.2000, 0.0000],\n [ 0.0000, 0.0000, 0.0000],\n [ 0.2000, -0.2000, 0.0000],\n [ 0.2000, -0.2000, 0.0000]])\n >>> t.parent_of('front_left_leg')\n 'torso'\n >>> t.index('front_right_foot')\n 6\n >>> t[2]\n 'aux_1'\n \"\"\"\n\n __example_mjcf_path__ = os.path.join(os.path.dirname(os.path.realpath(__file__)), \"tests/ant.xml\")\n\n def __init__(self, node_names, parent_indices, local_translation):\n \"\"\"\n :param node_names: a list of names for each tree node\n :type node_names: List[str]\n :param parent_indices: an int32-typed tensor that represents the edge to its parent.\\\n -1 represents the root node\n :type parent_indices: Tensor\n :param local_translation: a 3d vector that gives local translation information\n :type local_translation: Tensor\n \"\"\"\n ln, lp, ll = len(node_names), len(parent_indices), len(local_translation)\n assert len(set((ln, lp, ll))) == 1\n self._node_names = node_names\n self._parent_indices = parent_indices.long()\n self._local_translation = local_translation\n self._node_indices = {self.node_names[i]: i for i in range(len(self))}\n\n def __len__(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self.node_names)\n\n def __iter__(self):\n \"\"\" iterator that iterate through the name of each node \"\"\"\n yield from self.node_names\n\n def __getitem__(self, item):\n \"\"\" get the name of the node given the index \"\"\"\n return self.node_names[item]\n\n def __repr__(self):\n return (\"SkeletonTree(\\n node_names={},\\n parent_indices={},\"\n \"\\n local_translation={}\\n)\".format(\n self._indent(repr(self.node_names)),\n self._indent(repr(self.parent_indices)),\n self._indent(repr(self.local_translation)),\n ))\n\n def _indent(self, s):\n return \"\\n \".join(s.split(\"\\n\"))\n\n @property\n def node_names(self):\n return self._node_names\n\n @property\n def parent_indices(self):\n return self._parent_indices\n\n @property\n def local_translation(self):\n return self._local_translation\n\n @property\n def num_joints(self):\n \"\"\" number of nodes in the skeleton tree \"\"\"\n return len(self)\n\n @classmethod\n def from_dict(cls, dict_repr, *args, **kwargs):\n return cls(\n list(map(str, dict_repr[\"node_names\"])),\n TensorUtils.from_dict(dict_repr[\"parent_indices\"], *args, **kwargs),\n TensorUtils.from_dict(dict_repr[\"local_translation\"], *args, **kwargs),\n )\n\n def to_dict(self):\n return OrderedDict([\n (\"node_names\", self.node_names),\n (\"parent_indices\", tensor_to_dict(self.parent_indices)),\n (\"local_translation\", tensor_to_dict(self.local_translation)),\n ])\n\n @classmethod\n def from_mjcf(cls, path: str) -> \"SkeletonTree\":\n \"\"\"\n Parses a mujoco xml scene description file and returns a Skeleton Tree.\n We use the model attribute at the root as the name of the tree.\n \n :param path:\n :type path: string\n :return: The skeleton tree constructed from the mjcf file\n :rtype: SkeletonTree\n \"\"\"\n tree = ET.parse(path)\n xml_doc_root = tree.getroot()\n xml_world_body = xml_doc_root.find(\"worldbody\")\n if xml_world_body is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n # assume this is the root\n xml_body_root = xml_world_body.find(\"body\")\n if xml_body_root is None:\n raise ValueError(\"MJCF parsed incorrectly please verify it.\")\n\n node_names = []\n parent_indices = []\n local_translation = []\n\n # recursively adding all nodes into the skel_tree\n def _add_xml_node(xml_node, parent_index, node_index):\n node_name = xml_node.attrib.get(\"name\")\n # parse the local translation into float list\n pos = np.fromstring(xml_node.attrib.get(\"pos\"), dtype=float, sep=\" \")\n node_names.append(node_name)\n parent_indices.append(parent_index)\n local_translation.append(pos)\n curr_index = node_index\n node_index += 1\n for next_node in xml_node.findall(\"body\"):\n node_index = _add_xml_node(next_node, curr_index, node_index)\n return node_index\n\n _add_xml_node(xml_body_root, -1, 0)\n\n return cls(\n node_names,\n torch.from_numpy(np.array(parent_indices, dtype=np.int32)),\n torch.from_numpy(np.array(local_translation, dtype=np.float32)),\n )\n\n def parent_of(self, node_name):\n \"\"\" get the name of the parent of the given node\n\n :param node_name: the name of the node\n :type node_name: string\n :rtype: string\n \"\"\"\n return self[int(self.parent_indices[self.index(node_name)].item())]\n\n def index(self, node_name):\n \"\"\" get the index of the node\n \n :param node_name: the name of the node\n :type node_name: string\n :rtype: int\n \"\"\"\n return self._node_indices[node_name]\n\n def drop_nodes_by_names(self, node_names: List[str], pairwise_translation=None) -> \"SkeletonTree\":\n new_length = len(self) - len(node_names)\n new_node_names = []\n new_local_translation = torch.zeros(new_length, 3, dtype=self.local_translation.dtype)\n new_parent_indices = torch.zeros(new_length, dtype=self.parent_indices.dtype)\n parent_indices = self.parent_indices.numpy()\n new_node_indices: dict = {}\n new_node_index = 0\n for node_index in range(len(self)):\n if self[node_index] in node_names:\n continue\n tb_node_index = parent_indices[node_index]\n if tb_node_index != -1:\n local_translation = self.local_translation[node_index, :]\n while tb_node_index != -1 and self[tb_node_index] in node_names:\n local_translation += self.local_translation[tb_node_index, :]\n tb_node_index = parent_indices[tb_node_index]\n assert tb_node_index != -1, \"the root node cannot be dropped\"\n\n if pairwise_translation is not None:\n local_translation = pairwise_translation[tb_node_index, node_index, :]\n else:\n local_translation = self.local_translation[node_index, :]\n\n new_node_names.append(self[node_index])\n new_local_translation[new_node_index, :] = local_translation\n if tb_node_index == -1:\n new_parent_indices[new_node_index] = -1\n else:\n new_parent_indices[new_node_index] = new_node_indices[self[tb_node_index]]\n new_node_indices[self[node_index]] = new_node_index\n new_node_index += 1\n\n return SkeletonTree(new_node_names, new_parent_indices, new_local_translation)\n\n def keep_nodes_by_names(self, node_names: List[str], pairwise_translation=None) -> \"SkeletonTree\":\n nodes_to_drop = list(filter(lambda x: x not in node_names, self))\n return self.drop_nodes_by_names(nodes_to_drop, pairwise_translation)" }, { "identifier": "SkeletonMotion", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonMotion(SkeletonState):\n\n def __init__(self, tensor_backend, skeleton_tree, is_local, fps, *args, **kwargs):\n self._fps = fps\n super().__init__(tensor_backend, skeleton_tree, is_local, *args, **kwargs)\n\n def clone(self):\n return SkeletonMotion(self.tensor.clone(), self.skeleton_tree, self._is_local, self._fps)\n\n @property\n def invariant_property(self):\n return {\n \"skeleton_tree\": self.skeleton_tree,\n \"is_local\": self.is_local,\n \"fps\": self.fps,\n }\n\n @property\n def global_velocity(self):\n \"\"\" global velocity \"\"\"\n curr_index = self.num_joints * 4 + 3\n return self.tensor[..., curr_index:curr_index + self.num_joints * 3].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 3)))\n\n @property\n def global_angular_velocity(self):\n \"\"\" global angular velocity \"\"\"\n curr_index = self.num_joints * 7 + 3\n return self.tensor[..., curr_index:curr_index + self.num_joints * 3].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 3)))\n\n @property\n def fps(self):\n \"\"\" number of frames per second \"\"\"\n return self._fps\n\n @property\n def time_delta(self):\n \"\"\" time between two adjacent frames \"\"\"\n return 1.0 / self.fps\n\n @property\n def global_root_velocity(self):\n \"\"\" global root velocity \"\"\"\n return self.global_velocity[..., 0, :]\n\n @property\n def global_root_angular_velocity(self):\n \"\"\" global root angular velocity \"\"\"\n return self.global_angular_velocity[..., 0, :]\n\n @classmethod\n def from_state_vector_and_velocity(\n cls,\n skeleton_tree,\n state_vector,\n global_velocity,\n global_angular_velocity,\n is_local,\n fps,\n ):\n \"\"\"\n Construct a skeleton motion from a skeleton state vector, global velocity and angular\n velocity at each joint.\n\n :param skeleton_tree: the skeleton tree that the motion is based on \n :type skeleton_tree: SkeletonTree\n :param state_vector: the state vector from the skeleton state by `.tensor`\n :type state_vector: Tensor\n :param global_velocity: the global velocity at each joint\n :type global_velocity: Tensor\n :param global_angular_velocity: the global angular velocity at each joint\n :type global_angular_velocity: Tensor\n :param is_local: if the rotation ins the state vector is given in local frame\n :type is_local: boolean\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n state_shape = state_vector.shape[:-1]\n v = global_velocity.reshape(*(state_shape + (-1,)))\n av = global_angular_velocity.reshape(*(state_shape + (-1,)))\n new_state_vector = torch.cat([state_vector, v, av], axis=-1)\n return cls(\n new_state_vector,\n skeleton_tree=skeleton_tree,\n is_local=is_local,\n fps=fps,\n )\n\n @classmethod\n def from_skeleton_state(cls: Type[\"SkeletonMotion\"], skeleton_state: SkeletonState, fps: int):\n \"\"\"\n Construct a skeleton motion from a skeleton state. The velocities are estimated using second\n order guassian filter along the last axis. The skeleton state must have at least .dim >= 1\n\n :param skeleton_state: the skeleton state that the motion is based on \n :type skeleton_state: SkeletonState\n :param fps: number of frames per second\n :type fps: int\n\n :rtype: SkeletonMotion\n \"\"\"\n assert (type(skeleton_state) == SkeletonState), \"expected type of {}, got {}\".format(SkeletonState, type(skeleton_state))\n global_velocity = SkeletonMotion._compute_velocity(p=skeleton_state.global_translation, time_delta=1 / fps)\n global_angular_velocity = SkeletonMotion._compute_angular_velocity(r=skeleton_state.global_rotation, time_delta=1 / fps)\n return cls.from_state_vector_and_velocity(\n skeleton_tree=skeleton_state.skeleton_tree,\n state_vector=skeleton_state.tensor,\n global_velocity=global_velocity,\n global_angular_velocity=global_angular_velocity,\n is_local=skeleton_state.is_local,\n fps=fps,\n )\n\n @staticmethod\n def _to_state_vector(rot, rt, vel, avel):\n state_shape = rot.shape[:-2]\n skeleton_state_v = SkeletonState._to_state_vector(rot, rt)\n v = vel.reshape(*(state_shape + (-1,)))\n av = avel.reshape(*(state_shape + (-1,)))\n skeleton_motion_v = torch.cat([skeleton_state_v, v, av], axis=-1)\n return skeleton_motion_v\n\n @classmethod\n def from_dict(cls: Type[\"SkeletonMotion\"], dict_repr: OrderedDict, *args, **kwargs) -> \"SkeletonMotion\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n vel = TensorUtils.from_dict(dict_repr[\"global_velocity\"], *args, **kwargs)\n avel = TensorUtils.from_dict(dict_repr[\"global_angular_velocity\"], *args, **kwargs)\n return cls(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=SkeletonTree.from_dict(dict_repr[\"skeleton_tree\"], *args, **kwargs),\n is_local=dict_repr[\"is_local\"],\n fps=dict_repr[\"fps\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict([\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"global_velocity\", tensor_to_dict(self.global_velocity)),\n (\"global_angular_velocity\", tensor_to_dict(self.global_angular_velocity)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n (\"fps\", self.fps),\n ])\n\n @classmethod\n def from_fbx(\n cls: Type[\"SkeletonMotion\"],\n fbx_file_path,\n fbx_configs,\n skeleton_tree=None,\n is_local=True,\n fps=120,\n root_joint=\"\",\n root_trans_index=0,\n *args,\n **kwargs,\n ) -> \"SkeletonMotion\":\n \"\"\"\n Construct a skeleton motion from a fbx file (TODO - generalize this). If the skeleton tree\n is not given, it will use the first frame of the mocap to construct the skeleton tree.\n\n :param fbx_file_path: the path of the fbx file\n :type fbx_file_path: string\n :param fbx_configs: the configuration in terms of {\"tmp_path\": ..., \"fbx_py27_path\": ...}\n :type fbx_configs: dict\n :param skeleton_tree: the optional skeleton tree that the rotation will be applied to\n :type skeleton_tree: SkeletonTree, optional\n :param is_local: the state vector uses local or global rotation as the representation\n :type is_local: bool, optional, default=True\n :rtype: SkeletonMotion\n \"\"\"\n joint_names, joint_parents, transforms, fps = fbx_to_array(fbx_file_path, fbx_configs, root_joint, fps)\n # swap the last two axis to match the convention\n local_transform = euclidean_to_transform(transformation_matrix=torch.from_numpy(np.swapaxes(np.array(transforms), -1, -2),).float())\n local_rotation = transform_rotation(local_transform)\n root_translation = transform_translation(local_transform)[..., root_trans_index, :]\n joint_parents = torch.from_numpy(np.array(joint_parents)).int()\n\n if skeleton_tree is None:\n local_translation = transform_translation(local_transform).reshape(-1, len(joint_parents), 3)[0]\n skeleton_tree = SkeletonTree(joint_names, joint_parents, local_translation)\n skeleton_state = SkeletonState.from_rotation_and_root_translation(skeleton_tree, r=local_rotation, t=root_translation, is_local=True)\n if not is_local:\n skeleton_state = skeleton_state.global_repr()\n return cls.from_skeleton_state(skeleton_state=skeleton_state, fps=fps)\n\n @staticmethod\n def _compute_velocity(p, time_delta, guassian_filter=True):\n velocity = np.gradient(p.numpy(), axis=-3) / time_delta\n if guassian_filter:\n velocity = torch.from_numpy(filters.gaussian_filter1d(velocity, 2, axis=-3, mode=\"nearest\")).to(p)\n else:\n velocity = torch.from_numpy(velocity).to(p)\n\n return velocity\n\n @staticmethod\n def _compute_angular_velocity(r, time_delta: float, guassian_filter=True):\n # assume the second last dimension is the time axis\n diff_quat_data = quat_identity_like(r).to(r)\n diff_quat_data[..., :-1, :, :] = quat_mul_norm(r[..., 1:, :, :], quat_inverse(r[..., :-1, :, :]))\n diff_angle, diff_axis = quat_angle_axis(diff_quat_data)\n angular_velocity = diff_axis * diff_angle.unsqueeze(-1) / time_delta\n if guassian_filter:\n angular_velocity = torch.from_numpy(filters.gaussian_filter1d(angular_velocity.numpy(), 2, axis=-3, mode=\"nearest\"),)\n return angular_velocity\n\n def crop(self, start: int, end: int, fps: Optional[int] = None):\n \"\"\"\n Crop the motion along its last axis. This is equivalent to performing a slicing on the\n object with [..., start: end: skip_every] where skip_every = old_fps / fps. Note that the\n new fps provided must be a factor of the original fps. \n\n :param start: the beginning frame index\n :type start: int\n :param end: the ending frame index\n :type end: int\n :param fps: number of frames per second in the output (if not given the original fps will be used)\n :type fps: int, optional\n :rtype: SkeletonMotion\n \"\"\"\n if fps is None:\n new_fps = int(self.fps)\n old_fps = int(self.fps)\n else:\n new_fps = int(fps)\n old_fps = int(self.fps)\n assert old_fps % fps == 0, (\"the resampling doesn't support fps with non-integer division \"\n \"from the original fps: {} => {}\".format(old_fps, fps))\n skip_every = old_fps // new_fps\n s = slice(start, end, skip_every)\n z = self[..., s]\n\n rot = z.local_rotation if z.is_local else z.global_rotation\n rt = z.root_translation\n vel = z.global_velocity\n avel = z.global_angular_velocity\n return SkeletonMotion(\n SkeletonMotion._to_state_vector(rot, rt, vel, avel),\n skeleton_tree=z.skeleton_tree,\n is_local=z.is_local,\n fps=new_fps,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: \"SkeletonTree\",\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return SkeletonMotion.from_skeleton_state(\n super().retarget_to(\n joint_mapping,\n source_tpose_local_rotation,\n source_tpose_root_translation,\n target_skeleton_tree,\n target_tpose_local_rotation,\n target_tpose_root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n ),\n self.fps,\n )\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonMotion\":\n \"\"\" \n Same as the one in :class:`SkeletonState`. This method discards all velocity information before\n retargeting and re-estimate the velocity after the retargeting. The same fps is used in the\n new retargetted motion.\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonMotion\n \"\"\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n z_up,\n )" }, { "identifier": "SkeletonState", "path": "poselib/poselib/skeleton/skeleton3d.py", "snippet": "class SkeletonState(Serializable):\n \"\"\"\n A skeleton state contains all the information needed to describe a static state of a skeleton.\n It requires a skeleton tree, local/global rotation at each joint and the root translation.\n\n Example:\n >>> t = SkeletonTree.from_mjcf(SkeletonTree.__example_mjcf_path__)\n >>> zero_pose = SkeletonState.zero_pose(t)\n >>> plot_skeleton_state(zero_pose) # can be imported from `.visualization.common`\n [plot of the ant at zero pose\n >>> local_rotation = zero_pose.local_rotation.clone()\n >>> local_rotation[2] = torch.tensor([0, 0, 1, 0])\n >>> new_pose = SkeletonState.from_rotation_and_root_translation(\n ... skeleton_tree=t,\n ... r=local_rotation,\n ... t=zero_pose.root_translation,\n ... is_local=True\n ... )\n >>> new_pose.local_rotation\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n >>> plot_skeleton_state(new_pose) # you should be able to see one of ant's leg is bent\n [plot of the ant with the new pose\n >>> new_pose.global_rotation # the local rotation is propagated to the global rotation at joint #3\n tensor([[0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 1., 0., 0.],\n [0., 1., 0., 0.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.],\n [0., 0., 0., 1.]])\n\n Global/Local Representation (cont. from the previous example)\n >>> new_pose.is_local\n True\n >>> new_pose.tensor # this will return the local rotation followed by the root translation\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 0., 0., 1., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.tensor.shape # 4 * 13 (joint rotation) + 3 (root translatio\n torch.Size([55])\n >>> new_pose.global_repr().is_local\n False\n >>> new_pose.global_repr().tensor # this will return the global rotation followed by the root translation instead\n tensor([0., 0., 0., 1., 0., 0., 0., 1., 0., 1., 0., 0., 0., 1., 0., 0., 0., 0.,\n 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1.,\n 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0., 0., 1., 0., 0.,\n 0.])\n >>> new_pose.global_repr().tensor.shape # 4 * 13 (joint rotation) + 3 (root translation\n torch.Size([55])\n \"\"\"\n\n def __init__(self, tensor_backend, skeleton_tree, is_local):\n self._skeleton_tree = skeleton_tree\n self._is_local = is_local\n self.tensor = tensor_backend.clone()\n\n def __len__(self):\n return self.tensor.shape[0]\n\n @property\n def rotation(self):\n if not hasattr(self, \"_rotation\"):\n self._rotation = self.tensor[..., :self.num_joints * 4].reshape(*(self.tensor.shape[:-1] + (self.num_joints, 4)))\n return self._rotation\n\n @property\n def _local_rotation(self):\n if self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def _global_rotation(self):\n if not self._is_local:\n return self.rotation\n else:\n return None\n\n @property\n def is_local(self):\n \"\"\" is the rotation represented in local frame? \n \n :rtype: bool\n \"\"\"\n return self._is_local\n\n @property\n def invariant_property(self):\n return {\"skeleton_tree\": self.skeleton_tree, \"is_local\": self.is_local}\n\n @property\n def num_joints(self):\n \"\"\" number of joints in the skeleton tree \n \n :rtype: int\n \"\"\"\n return self.skeleton_tree.num_joints\n\n @property\n def skeleton_tree(self):\n \"\"\" skeleton tree \n \n :rtype: SkeletonTree\n \"\"\"\n return self._skeleton_tree\n\n @property\n def root_translation(self):\n \"\"\" root translation \n \n :rtype: Tensor\n \"\"\"\n if not hasattr(self, \"_root_translation\"):\n self._root_translation = self.tensor[..., self.num_joints * 4:self.num_joints * 4 + 3]\n return self._root_translation\n\n @property\n def global_transformation(self):\n \"\"\" global transformation of each joint (transform from joint frame to global frame) \"\"\"\n # Forward Kinematics\n if not hasattr(self, \"_global_transformation\"):\n local_transformation = self.local_transformation\n global_transformation = []\n parent_indices = self.skeleton_tree.parent_indices.numpy()\n # global_transformation = local_transformation.identity_like()\n for node_index in range(len(self.skeleton_tree)):\n parent_index = parent_indices[node_index]\n if parent_index == -1:\n global_transformation.append(local_transformation[..., node_index, :])\n else:\n global_transformation.append(transform_mul(\n global_transformation[parent_index],\n local_transformation[..., node_index, :],\n ))\n self._global_transformation = torch.stack(global_transformation, axis=-2)\n return self._global_transformation\n\n @property\n def global_rotation(self):\n \"\"\" global rotation of each joint (rotation matrix to rotate from joint's F.O.R to global\n F.O.R) \"\"\"\n if self._global_rotation is None:\n if not hasattr(self, \"_comp_global_rotation\"):\n self._comp_global_rotation = transform_rotation(self.global_transformation)\n return self._comp_global_rotation\n else:\n return self._global_rotation\n\n @property\n def global_translation(self):\n \"\"\" global translation of each joint \"\"\"\n if not hasattr(self, \"_global_translation\"):\n self._global_translation = transform_translation(self.global_transformation)\n return self._global_translation\n\n @property\n def global_translation_xy(self):\n \"\"\" global translation in xy \"\"\"\n trans_xy_data = self.global_translation.zeros_like()\n trans_xy_data[..., 0:2] = self.global_translation[..., 0:2]\n return trans_xy_data\n\n @property\n def global_translation_xz(self):\n \"\"\" global translation in xz \"\"\"\n trans_xz_data = self.global_translation.zeros_like()\n trans_xz_data[..., 0:1] = self.global_translation[..., 0:1]\n trans_xz_data[..., 2:3] = self.global_translation[..., 2:3]\n return trans_xz_data\n\n @property\n def local_rotation(self):\n \"\"\" the rotation from child frame to parent frame given in the order of child nodes appeared\n in `.skeleton_tree.node_names` \"\"\"\n if self._local_rotation is None:\n if not hasattr(self, \"_comp_local_rotation\"):\n local_rotation = quat_identity_like(self.global_rotation)\n for node_index in range(len(self.skeleton_tree)):\n parent_index = self.skeleton_tree.parent_indices[node_index]\n if parent_index == -1:\n local_rotation[..., node_index, :] = self.global_rotation[..., node_index, :]\n else:\n local_rotation[..., node_index, :] = quat_mul_norm(\n quat_inverse(self.global_rotation[..., parent_index, :]),\n self.global_rotation[..., node_index, :],\n )\n self._comp_local_rotation = local_rotation\n return self._comp_local_rotation\n else:\n return self._local_rotation\n\n @property\n def local_transformation(self):\n \"\"\" local translation + local rotation. It describes the transformation from child frame to \n parent frame given in the order of child nodes appeared in `.skeleton_tree.node_names` \"\"\"\n if not hasattr(self, \"_local_transformation\"):\n self._local_transformation = transform_from_rotation_translation(r=self.local_rotation, t=self.local_translation)\n return self._local_transformation\n\n @property\n def local_translation(self):\n \"\"\" local translation of the skeleton state. It is identical to the local translation in\n `.skeleton_tree.local_translation` except the root translation. The root translation is\n identical to `.root_translation` \"\"\"\n if not hasattr(self, \"_local_translation\"):\n broadcast_shape = (tuple(self.tensor.shape[:-1]) + (len(self.skeleton_tree),) + tuple(self.skeleton_tree.local_translation.shape[-1:]))\n local_translation = self.skeleton_tree.local_translation.broadcast_to(*broadcast_shape).clone()\n local_translation[..., 0, :] = self.root_translation\n self._local_translation = local_translation\n return self._local_translation\n\n # Root Properties\n @property\n def root_translation_xy(self):\n \"\"\" root translation on xy \"\"\"\n if not hasattr(self, \"_root_translation_xy\"):\n self._root_translation_xy = self.global_translation_xy[..., 0, :]\n return self._root_translation_xy\n\n @property\n def global_root_rotation(self):\n \"\"\" root rotation \"\"\"\n if not hasattr(self, \"_global_root_rotation\"):\n self._global_root_rotation = self.global_rotation[..., 0, :]\n return self._global_root_rotation\n\n @property\n def global_root_yaw_rotation(self):\n \"\"\" root yaw rotation \"\"\"\n if not hasattr(self, \"_global_root_yaw_rotation\"):\n self._global_root_yaw_rotation = self.global_root_rotation.yaw_rotation()\n return self._global_root_yaw_rotation\n\n # Properties relative to root\n @property\n def local_translation_to_root(self):\n \"\"\" The 3D translation from joint frame to the root frame. \"\"\"\n if not hasattr(self, \"_local_translation_to_root\"):\n self._local_translation_to_root = (self.global_translation - self.root_translation.unsqueeze(-1))\n return self._local_translation_to_root\n\n @property\n def local_rotation_to_root(self):\n \"\"\" The 3D rotation from joint frame to the root frame. It is equivalent to \n The root_R_world * world_R_node \"\"\"\n return (quat_inverse(self.global_root_rotation).unsqueeze(-1) * self.global_rotation)\n\n def compute_forward_vector(\n self,\n left_shoulder_index,\n right_shoulder_index,\n left_hip_index,\n right_hip_index,\n gaussian_filter_width=20,\n ):\n \"\"\" Computes forward vector based on cross product of the up vector with \n average of the right->left shoulder and hip vectors \"\"\"\n global_positions = self.global_translation\n # Perpendicular to the forward direction.\n # Uses the shoulders and hips to find this.\n side_direction = (global_positions[:, left_shoulder_index].numpy() - global_positions[:, right_shoulder_index].numpy() + global_positions[:, left_hip_index].numpy() - global_positions[:, right_hip_index].numpy())\n side_direction = (side_direction / np.sqrt((side_direction**2).sum(axis=-1))[..., np.newaxis])\n\n # Forward direction obtained by crossing with the up direction.\n forward_direction = np.cross(side_direction, np.array([[0, 1, 0]]))\n\n # Smooth the forward direction with a Gaussian.\n # Axis 0 is the time/frame axis.\n forward_direction = filters.gaussian_filter1d(forward_direction, gaussian_filter_width, axis=0, mode=\"nearest\")\n forward_direction = (forward_direction / np.sqrt((forward_direction**2).sum(axis=-1))[..., np.newaxis])\n\n return torch.from_numpy(forward_direction)\n\n @staticmethod\n def _to_state_vector(rot, rt):\n state_shape = rot.shape[:-2]\n vr = rot.reshape(*(state_shape + (-1,)))\n vt = rt.broadcast_to(*state_shape + rt.shape[-1:]).reshape(*(state_shape + (-1,)))\n v = torch.cat([vr, vt], axis=-1)\n return v\n\n @classmethod\n def from_dict(cls: Type[\"SkeletonState\"], dict_repr: OrderedDict, *args, **kwargs) -> \"SkeletonState\":\n rot = TensorUtils.from_dict(dict_repr[\"rotation\"], *args, **kwargs)\n rt = TensorUtils.from_dict(dict_repr[\"root_translation\"], *args, **kwargs)\n return cls(\n SkeletonState._to_state_vector(rot, rt),\n SkeletonTree.from_dict(dict_repr[\"skeleton_tree\"], *args, **kwargs),\n dict_repr[\"is_local\"],\n )\n\n def to_dict(self) -> OrderedDict:\n return OrderedDict([\n (\"rotation\", tensor_to_dict(self.rotation)),\n (\"root_translation\", tensor_to_dict(self.root_translation)),\n (\"skeleton_tree\", self.skeleton_tree.to_dict()),\n (\"is_local\", self.is_local),\n ])\n\n @classmethod\n def from_rotation_and_root_translation(cls, skeleton_tree, r, t, is_local=True):\n \"\"\"\n Construct a skeleton state from rotation and root translation\n\n :param skeleton_tree: the skeleton tree\n :type skeleton_tree: SkeletonTree\n :param r: rotation (either global or local)\n :type r: Tensor\n :param t: root translation\n :type t: Tensor\n :param is_local: to indicate that whether the rotation is local or global\n :type is_local: bool, optional, default=True\n \"\"\"\n assert (r.dim() > 0), \"the rotation needs to have at least 1 dimension (dim = {})\".format(r.dim)\n state_vec = SkeletonState._to_state_vector(r, t)\n\n return cls(\n state_vec,\n skeleton_tree=skeleton_tree,\n is_local=is_local,\n )\n\n @classmethod\n def zero_pose(cls, skeleton_tree):\n \"\"\"\n Construct a zero-pose skeleton state from the skeleton tree by assuming that all the local\n rotation is 0 and root translation is also 0.\n\n :param skeleton_tree: the skeleton tree as the rigid body\n :type skeleton_tree: SkeletonTree\n \"\"\"\n return cls.from_rotation_and_root_translation(\n skeleton_tree=skeleton_tree,\n r=quat_identity([skeleton_tree.num_joints]),\n t=torch.zeros(3, dtype=skeleton_tree.local_translation.dtype),\n is_local=True,\n )\n\n def local_repr(self):\n \"\"\" \n Convert the skeleton state into local representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=True`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def global_repr(self):\n \"\"\" \n Convert the skeleton state into global representation. This will only affects the values of\n .tensor. If the skeleton state already has `is_local=False`. This method will do nothing. \n\n :rtype: SkeletonState\n \"\"\"\n if not self.is_local:\n return self\n return SkeletonState.from_rotation_and_root_translation(\n self.skeleton_tree,\n r=self.global_rotation,\n t=self.root_translation,\n is_local=False,\n )\n\n def _get_pairwise_average_translation(self):\n global_transform_inv = transform_inverse(self.global_transformation)\n p1 = global_transform_inv.unsqueeze(-2)\n p2 = self.global_transformation.unsqueeze(-3)\n\n pairwise_translation = (transform_translation(transform_mul(p1, p2)).reshape(-1, len(self.skeleton_tree), len(self.skeleton_tree), 3).mean(axis=0))\n return pairwise_translation\n\n def _transfer_to(self, new_skeleton_tree: SkeletonTree):\n old_indices = list(map(self.skeleton_tree.index, new_skeleton_tree))\n return SkeletonState.from_rotation_and_root_translation(\n new_skeleton_tree,\n r=self.global_rotation[..., old_indices, :],\n t=self.root_translation,\n is_local=False,\n )\n\n def drop_nodes_by_names(self, node_names: List[str], estimate_local_translation_from_states: bool = True) -> \"SkeletonState\":\n \"\"\" \n Drop a list of nodes from the skeleton and re-compute the local rotation to match the \n original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n if estimate_local_translation_from_states:\n pairwise_translation = self._get_pairwise_average_translation()\n else:\n pairwise_translation = None\n new_skeleton_tree = self.skeleton_tree.drop_nodes_by_names(node_names, pairwise_translation)\n return self._transfer_to(new_skeleton_tree)\n\n def keep_nodes_by_names(self, node_names: List[str], estimate_local_translation_from_states: bool = True) -> \"SkeletonState\":\n \"\"\" \n Keep a list of nodes and drop all other nodes from the skeleton and re-compute the local \n rotation to match the original joint position as much as possible. \n\n :param node_names: a list node names that specifies the nodes need to be dropped\n :type node_names: List of strings\n :param estimate_local_translation_from_states: the boolean indicator that specifies whether\\\n or not to re-estimate the local translation from the states (avg.)\n :type estimate_local_translation_from_states: boolean\n :rtype: SkeletonState\n \"\"\"\n return self.drop_nodes_by_names(\n list(filter(lambda x: (x not in node_names), self)),\n estimate_local_translation_from_states,\n )\n\n def _remapped_to(self, joint_mapping: Dict[str, str], target_skeleton_tree: SkeletonTree):\n joint_mapping_inv = {target: source for source, target in joint_mapping.items()}\n reduced_target_skeleton_tree = target_skeleton_tree.keep_nodes_by_names(list(joint_mapping_inv))\n n_joints = (\n len(joint_mapping),\n len(self.skeleton_tree),\n len(reduced_target_skeleton_tree),\n )\n assert (len(set(n_joints)) == 1), \"the joint mapping is not consistent with the skeleton trees\"\n source_indices = list(map(\n lambda x: self.skeleton_tree.index(joint_mapping_inv[x]),\n reduced_target_skeleton_tree,\n ))\n target_local_rotation = self.local_rotation[..., source_indices, :]\n return SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=reduced_target_skeleton_tree,\n r=target_local_rotation,\n t=self.root_translation,\n is_local=True,\n )\n\n def retarget_to(\n self,\n joint_mapping: Dict[str, str],\n source_tpose_local_rotation,\n source_tpose_root_translation: np.ndarray,\n target_skeleton_tree: SkeletonTree,\n target_tpose_local_rotation,\n target_tpose_root_translation: np.ndarray,\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n z_up: bool = True,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. The function follows the procedures below.\n\n Steps:\n 1. Drop the joints from the source (self) that do not belong to the joint mapping\\\n with an implementation that is similar to \"keep_nodes_by_names()\" - take a\\\n look at the function doc for more details (same for source_tpose)\n \n 2. Rotate the source state and the source tpose by \"rotation_to_target_skeleton\"\\\n to align the source with the target orientation\n \n 3. Extract the root translation and normalize it to match the scale of the target\\\n skeleton\n \n 4. Extract the global rotation from source state relative to source tpose and\\\n re-apply the relative rotation to the target tpose to construct the global\\\n rotation after retargetting\n \n 5. Combine the computed global rotation and the root translation from 3 and 4 to\\\n complete the retargeting.\n \n 6. Make feet on the ground (global translation z)\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose_local_rotation: the local rotation of the source skeleton\n :type source_tpose_local_rotation: Tensor\n \n :param source_tpose_root_translation: the root translation of the source tpose\n :type source_tpose_root_translation: np.ndarray\n \n :param target_skeleton_tree: the target skeleton tree\n :type target_skeleton_tree: SkeletonTree\n \n :param target_tpose_local_rotation: the local rotation of the target skeleton\n :type target_tpose_local_rotation: Tensor\n \n :param target_tpose_root_translation: the root translation of the target tpose\n :type target_tpose_root_translation: Tensor\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n\n # STEP 0: Preprocess\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=self.skeleton_tree,\n r=source_tpose_local_rotation,\n t=source_tpose_root_translation,\n is_local=True,\n )\n target_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=target_tpose_local_rotation,\n t=target_tpose_root_translation,\n is_local=True,\n )\n\n # STEP 1: Drop the irrelevant joints\n pairwise_translation = self._get_pairwise_average_translation()\n node_names = list(joint_mapping)\n new_skeleton_tree = self.skeleton_tree.keep_nodes_by_names(node_names, pairwise_translation)\n\n # TODO: combine the following steps before STEP 3\n source_tpose = source_tpose._transfer_to(new_skeleton_tree)\n source_state = self._transfer_to(new_skeleton_tree)\n\n source_tpose = source_tpose._remapped_to(joint_mapping, target_skeleton_tree)\n source_state = source_state._remapped_to(joint_mapping, target_skeleton_tree)\n\n # STEP 2: Rotate the source to align with the target\n new_local_rotation = source_tpose.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(rotation_to_target_skeleton, source_tpose.local_rotation[..., 0, :])\n\n source_tpose = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_tpose.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_tpose.root_translation),\n is_local=True,\n )\n\n new_local_rotation = source_state.local_rotation.clone()\n new_local_rotation[..., 0, :] = quat_mul_norm(rotation_to_target_skeleton, source_state.local_rotation[..., 0, :])\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=source_state.skeleton_tree,\n r=new_local_rotation,\n t=quat_rotate(rotation_to_target_skeleton, source_state.root_translation),\n is_local=True,\n )\n\n # STEP 3: Normalize to match the target scale\n root_translation_diff = (source_state.root_translation - source_tpose.root_translation) * scale_to_target_skeleton\n\n # STEP 4: the global rotation from source state relative to source tpose and\n # re-apply to the target\n current_skeleton_tree = source_state.skeleton_tree\n target_tpose_global_rotation = source_state.global_rotation[0, :].clone()\n for current_index, name in enumerate(current_skeleton_tree):\n if name in target_tpose.skeleton_tree:\n target_tpose_global_rotation[current_index, :] = target_tpose.global_rotation[target_tpose.skeleton_tree.index(name), :]\n\n global_rotation_diff = quat_mul_norm(source_state.global_rotation, quat_inverse(source_tpose.global_rotation))\n new_global_rotation = quat_mul_norm(global_rotation_diff, target_tpose_global_rotation)\n\n # STEP 5: Putting 3 and 4 together\n current_skeleton_tree = source_state.skeleton_tree\n shape = source_state.global_rotation.shape[:-1]\n shape = shape[:-1] + target_tpose.global_rotation.shape[-2:-1]\n new_global_rotation_output = quat_identity(shape)\n for current_index, name in enumerate(target_skeleton_tree):\n while name not in current_skeleton_tree:\n name = target_skeleton_tree.parent_of(name)\n parent_index = current_skeleton_tree.index(name)\n new_global_rotation_output[:, current_index, :] = new_global_rotation[:, parent_index, :]\n\n source_state = SkeletonState.from_rotation_and_root_translation(\n skeleton_tree=target_skeleton_tree,\n r=new_global_rotation_output,\n t=target_tpose.root_translation + root_translation_diff,\n is_local=False,\n ).local_repr()\n\n return source_state\n\n def retarget_to_by_tpose(\n self,\n joint_mapping: Dict[str, str],\n source_tpose: \"SkeletonState\",\n target_tpose: \"SkeletonState\",\n rotation_to_target_skeleton,\n scale_to_target_skeleton: float,\n ) -> \"SkeletonState\":\n \"\"\" \n Retarget the skeleton state to a target skeleton tree. This is a naive retarget\n implementation with rough approximations. See the method `retarget_to()` for more information\n\n :param joint_mapping: a dictionary of that maps the joint node from the source skeleton to \\\n the target skeleton\n :type joint_mapping: Dict[str, str]\n \n :param source_tpose: t-pose of the source skeleton\n :type source_tpose: SkeletonState\n \n :param target_tpose: t-pose of the target skeleton\n :type target_tpose: SkeletonState\n \n :param rotation_to_target_skeleton: the rotation that needs to be applied to the source\\\n skeleton to align with the target skeleton. Essentially the rotation is t_R_s, where t is\\\n the frame of reference of the target skeleton and s is the frame of reference of the source\\\n skeleton\n :type rotation_to_target_skeleton: Tensor\n :param scale_to_target_skeleton: the factor that needs to be multiplied from source\\\n skeleton to target skeleton (unit in distance). For example, to go from `cm` to `m`, the \\\n factor needs to be 0.01.\n :type scale_to_target_skeleton: float\n :rtype: SkeletonState\n \"\"\"\n assert (len(source_tpose.shape) == 0 and len(target_tpose.shape) == 0), \"the retargeting script currently doesn't support vectorized operations\"\n return self.retarget_to(\n joint_mapping,\n source_tpose.local_rotation,\n source_tpose.root_translation,\n target_tpose.skeleton_tree,\n target_tpose.local_rotation,\n target_tpose.root_translation,\n rotation_to_target_skeleton,\n scale_to_target_skeleton,\n )" } ]
import glob import os import sys import pdb import os.path as osp import open3d as o3d import open3d.visualization.rendering as rendering import imageio import joblib import numpy as np import torch import random import matplotlib.pyplot as plt import cv2 import matplotlib as mpl from tqdm import tqdm from uhc.smpllib.smpl_parser import ( SMPL_Parser, SMPLH_Parser, SMPLX_Parser, ) from uhc.smpllib.smpl_mujoco import SMPL_BONE_ORDER_NAMES as joint_names from poselib.poselib.skeleton.skeleton3d import SkeletonTree, SkeletonMotion, SkeletonState from scipy.spatial.transform import Rotation as sRot from tqdm import tqdm
19,852
sys.path.append(os.getcwd()) paused, reset, recording, image_list, writer, control, curr_zoom = False, False, False, [], None, None, 0.01 def pause_func(action): global paused paused = not paused print(f"Paused: {paused}") return True def reset_func(action): global reset reset = not reset print(f"Reset: {reset}") return True def record_func(action): global recording, writer if not recording: fps = 30 curr_video_file_name = "test.mp4" writer = imageio.get_writer(curr_video_file_name, fps=fps, macro_block_size=None) elif not writer is None: writer.close() writer = None recording = not recording print(f"Recording: {recording}") return True def capture_func(action): global capture capture = not capture return True def zoom_func(action): global control, curr_zoom curr_zoom = curr_zoom * 0.9 control.set_zoom(curr_zoom) print(f"Reset: {reset}") return True mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand'] Name = "getting_started" Title = "Getting Started" data_dir = "data/smpl" smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") # pkl_dir = "output/renderings/smpl_ego_long_8-2023-01-20-11:28:00.pkl" # pkl_dir = "output/renderings/smpl_im_comp_8-2023-02-05-15:36:14.pkl" pkl_dir = "output/renderings/smpl_im_comp_pnn_3-2023-03-07-14:31:50.pkl" Name = pkl_dir.split("/")[-1].split(".")[0] pkl_data = joblib.load(pkl_dir)
sys.path.append(os.getcwd()) paused, reset, recording, image_list, writer, control, curr_zoom = False, False, False, [], None, None, 0.01 def pause_func(action): global paused paused = not paused print(f"Paused: {paused}") return True def reset_func(action): global reset reset = not reset print(f"Reset: {reset}") return True def record_func(action): global recording, writer if not recording: fps = 30 curr_video_file_name = "test.mp4" writer = imageio.get_writer(curr_video_file_name, fps=fps, macro_block_size=None) elif not writer is None: writer.close() writer = None recording = not recording print(f"Recording: {recording}") return True def capture_func(action): global capture capture = not capture return True def zoom_func(action): global control, curr_zoom curr_zoom = curr_zoom * 0.9 control.set_zoom(curr_zoom) print(f"Reset: {reset}") return True mujoco_joint_names = ['Pelvis', 'L_Hip', 'L_Knee', 'L_Ankle', 'L_Toe', 'R_Hip', 'R_Knee', 'R_Ankle', 'R_Toe', 'Torso', 'Spine', 'Chest', 'Neck', 'Head', 'L_Thorax', 'L_Shoulder', 'L_Elbow', 'L_Wrist', 'L_Hand', 'R_Thorax', 'R_Shoulder', 'R_Elbow', 'R_Wrist', 'R_Hand'] Name = "getting_started" Title = "Getting Started" data_dir = "data/smpl" smpl_parser_n = SMPL_Parser(model_path=data_dir, gender="neutral") smpl_parser_m = SMPL_Parser(model_path=data_dir, gender="male") smpl_parser_f = SMPL_Parser(model_path=data_dir, gender="female") # pkl_dir = "output/renderings/smpl_ego_long_8-2023-01-20-11:28:00.pkl" # pkl_dir = "output/renderings/smpl_im_comp_8-2023-02-05-15:36:14.pkl" pkl_dir = "output/renderings/smpl_im_comp_pnn_3-2023-03-07-14:31:50.pkl" Name = pkl_dir.split("/")[-1].split(".")[0] pkl_data = joblib.load(pkl_dir)
mujoco_2_smpl = [mujoco_joint_names.index(q) for q in joint_names if q in mujoco_joint_names]
0
2023-10-15 19:05:47+00:00
24k
e4s2023/E4S2023
run_UI_seg19.py
[ { "identifier": "UIOptions", "path": "options/ui_options.py", "snippet": "class UIOptions:\n\n\tdef __init__(self):\n\t\tself.parser = ArgumentParser()\n\t\tself.initialize()\n\n\tdef initialize(self):\n\t\tself.parser.add_argument('--exp_dir', type=str, default=\"/apdcephfs/share_1290939/zhianliu/running_results/our_editing/work_dirs/dummy\",help='Path to experiment output directory')\n\t\tself.parser.add_argument('--num_seg_cls', type=int, default=12,help='Segmentation mask class number')\n\t\tself.parser.add_argument('--remaining_layer_idx', type=int, default=13, help='剩余的几层不用mask')\n # ================= 模型设置 相关 =====================\n\t\tself.parser.add_argument('--out_size', type=int, default=1024,help='output image size') \n\t\tself.parser.add_argument('--n_styles', default=11, type=int, help='StyleGAN层数')\n\t\tself.parser.add_argument('--fsencoder_type', type=str, default=\"psp\", help='FS Encode网络类型')\n\t\tself.parser.add_argument('--extra_encoder_input', type=str, default=\"diff_map\", help='额外的style code补偿Encode网络输入类型') \n # ================= 数据集 相关 =====================\n\t\t\n\t\tself.parser.add_argument('--label_dir', default='./ui_run/testset/CelebA-HQ/test/labels', type=str, help='dataset label dir')\n\t\tself.parser.add_argument('--image_dir', default='./ui_run/testset/CelebA-HQ/test/images', type=str, help='dataset label dir')\n\t\tself.parser.add_argument('--ds_frac', default=1.0, type=float, help='dataset fraction')\n\t\tself.parser.add_argument('--test_batch_size', default=1, type=int, help='Batch size for testing and inference')\n\t\tself.parser.add_argument('--test_workers', default=4, type=int, help='Number of test/inference dataloader workers')\n\t\tself.parser.add_argument('--train_G', default=False, type=bool, help='Whether to train the styleGAN model')\n \n\t\tself.parser.add_argument('--output_size', default=1024, type=int, help='Output size of generator')\n\t\t# self.parser.add_argument('--checkpoint_path', default=\"/apdcephfs/share_1290939/zhianliu/py_projects/pytorch-DDP-demo/work_dirs/ablation_study/v_15_baseline_seg12_finetuneGD_8A100_remainLyrIdx13_flip_FFHQ_300KIters/checkpoints/iteration_300000.pt\", type=str, help='Path to model checkpoint')\n\t\t# self.parser.add_argument('--checkpoint_path', default=\"/our_editing-master/ckpts/iteration_120000.pt\", type=str, help='Path to model checkpoint')\n\t\tself.parser.add_argument('--checkpoint_path', default=\"pretrained/zhian/iteration_300000.pt\", type=str,\n\t\t\t\t\t\t\t\t help='Path to model checkpoint')\n\t\tself.parser.add_argument('--save_dir', default=\"./out_dir\", type=str, help='Path to save dir') \n\t\tself.parser.add_argument('--device', default='cuda:0', type=str, help='Which GPU(s) to use')\n\n\t\tself.parser.add_argument('--start_from_latent_avg', action='store_true',default=True, help='Whether to add average latent vector to generate codes from encoder.')\n\t\tself.parser.add_argument('--learn_in_w', action='store_true', help='Whether to learn in w space instead of w+')\n \n\tdef parse(self):\n\t\topts = self.parser.parse_args()\n\t\treturn opts" }, { "identifier": "Ui_Form", "path": "ui_run/ui.py", "snippet": "class Ui_Form(object):\n def setupUi(self, Form):\n Form.setObjectName(\"Form\")\n # Form.resize(1920, 1080)\n\n Form.resize(int(1920* SCALE), int(1080 * SCALE))\n\n # Form.resize(1980, 1100)\n\n\n # Label Buttons to change the semantic meanings of the Brush\n # First Row\n self.add_brush_widgets(Form)\n self.add_top_buttons(Form)\n self.add_label_buttons(Form)\n # self.add_label_buttons_seg19(Form)\n self.add_tool_buttons(Form)\n self.add_checkbox_widgets(Form)\n self.add_input_img_button(Form)\n self.add_ops_log_textBox(Form)\n self.add_ref_img_button(Form)\n\n self.graphicsView = QtWidgets.QGraphicsView(Form)\n self.graphicsView.setGeometry(QtCore.QRect(652* SCALE, 140* SCALE, 518* SCALE, 518* SCALE))\n self.graphicsView.setObjectName(\"graphicsView\")\n self.graphicsView_2 = QtWidgets.QGraphicsView(Form)\n self.graphicsView_2.setGeometry(QtCore.QRect(1204* SCALE, 140* SCALE, 518* SCALE, 518* SCALE))\n self.graphicsView_2.setObjectName(\"graphicsView_2\")\n\n self.graphicsView_GT = QtWidgets.QGraphicsView(Form)\n self.graphicsView_GT.setGeometry(QtCore.QRect(100* SCALE, 140* SCALE, 518* SCALE, 518* SCALE))\n self.graphicsView_GT.setObjectName(\"graphicsView_GT\")\n\n\n self.referDialog = ReferenceDialog(self)\n self.referDialog.setObjectName('Reference Dialog')\n # self.referDialog.setWindowTitle('Reference Image:')\n self.referDialog.setWindowTitle('Style Image')\n self.referDialogImage = QtWidgets.QLabel(self.referDialog)\n self.referDialogImage.setFixedSize(512, 512)\n # self.referDialog.show()\n\n self.snapshotDialog = SnapshotDialog(self)\n self.snapshotDialog.setObjectName('Snapshot Dialog')\n self.snapshotDialog.setWindowTitle('Reference Image:')\n self.snapshotDialogImage = QtWidgets.QLabel(self.snapshotDialog)\n self.snapshotDialogImage.setFixedSize(512, 512)\n\n self.add_intermediate_results_button(Form)\n self.add_alpha_bar(Form)\n\n QtCore.QMetaObject.connectSlotsByName(Form) # 绑定 信号和槽\n\n def retranslateUi(self, Form):\n # Form.setWindowTitle(_translate(\"Form\", \"Let's Party Face Manipulation v0.2\"))\n Form.setWindowTitle(_translate(\"Form\", \"Inteactive Editing\"))\n self.pushButton.setText(_translate(\"Form\", \"Open Image\"))\n self.pushButton_2.setText(_translate(\"Form\", \"Edit Style\"))\n self.pushButton_3.setText(_translate(\"Form\", \"Edit Shape\"))\n self.pushButton_4.setText(_translate(\"Form\", \"Recon\"))\n\n self.saveImg.setText(_translate(\"Form\", \"Save Img\"))\n\n def add_alpha_bar(self, Form): # alpha value,控制插值的程度应该是\n\n\n self.alphaLabel = QtWidgets.QLabel(Form)\n self.alphaLabel.setObjectName(\"alphaLabel\")\n self.alphaLabel.setGeometry(QtCore.QRect(Lb_x + 10*SCALE * Lb_row_shift + 10*SCALE * Lb_width + 40*SCALE, Lb_y, 150*SCALE, 20*SCALE))\n self.alphaLabel.setText('Alpha: 1.0')\n font = self.brushsizeLabel.font()\n font.setPointSize(10)\n font.setBold(True)\n self.alphaLabel.setFont(font)\n\n self.alphaSlider = QtWidgets.QSlider(Form)\n self.alphaSlider.setOrientation(QtCore.Qt.Horizontal)\n self.alphaSlider.setGeometry(QtCore.QRect(Lb_x + 10*SCALE * Lb_row_shift + 10 *SCALE* Lb_width + 150*SCALE, Lb_y, 225*SCALE, 10*SCALE))\n self.alphaSlider.setObjectName(\"alphaSlider\")\n self.alphaSlider.setMinimum(0)\n self.alphaSlider.setMaximum(20)\n self.alphaSlider.setValue(20)\n self.alphaSlider.valueChanged.connect(Form.change_alpha_value)\n\n def add_intermediate_results_button(self, Form): # 保存中间结果的 scroll Area\n\n self.snap_scrollArea = QtWidgets.QScrollArea(Form)\n self.snap_scrollArea.setGeometry(QtCore.QRect(100, Lb_y + Lb_height + Lb_col_shift + Lb_height, 1622* SCALE, 250* SCALE))\n self.snap_scrollArea.setWidgetResizable(True)\n self.snap_scrollArea.setObjectName(\"snap_scrollArea\")\n self.snap_scrollArea.setAlignment(Qt.AlignCenter)\n #self.snap_scrollArea.setStyleSheet(\"border-color: transparent\")\n self.snap_scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n\n self.snap_scrollAreaWidgetContents = QtWidgets.QWidget()\n self.snap_scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, 1622* SCALE, 250* SCALE))\n self.snap_scrollAreaWidgetContents.setObjectName(\"snap_scrollAreaWidgetContents\")\n\n self.snap_gridlLayout = QtWidgets.QGridLayout(self.snap_scrollAreaWidgetContents)\n # # snap_horizontalLayout.setContentsMargins(11, 11, 11, 11)\n self.snap_gridlLayout.setSpacing(20)\n self.snap_gridlLayout.setAlignment(Qt.AlignLeft)\n\n self.snap_style_button_list = []\n self.mask_snap_style_button_list = []\n\n for i in range(15):\n snap_style_button = QtWidgets.QPushButton()\n snap_style_button.setFixedSize(100, 100)\n snap_style_button.setStyleSheet(\"background-color: transparent\")\n snap_style_button.setIcon(QIcon())\n snap_style_button.setIconSize(QSize(100, 100))\n snap_style_button.clicked.connect(partial(self.open_snapshot_dialog, i))\n # snap_style_button.snap_shot_name = None\n self.snap_style_button_list.append(snap_style_button)\n # style_button.hide()\n self.snap_gridlLayout.addWidget(snap_style_button, 1, i)\n\n\n mask_snap_style_button = QtWidgets.QPushButton()\n mask_snap_style_button.setFixedSize(100, 100)\n mask_snap_style_button.setStyleSheet(\"background-color: transparent\")\n mask_snap_style_button.setIcon(QIcon())\n mask_snap_style_button.setIconSize(QSize(100, 100))\n self.mask_snap_style_button_list.append(mask_snap_style_button)\n # mask_snap_style_button.hide()\n self.snap_gridlLayout.addWidget(mask_snap_style_button, 0, i)\n\n\n self.snap_scrollArea.setWidget(self.snap_scrollAreaWidgetContents)\n\n def add_input_img_button(self, Form): # 右上角当前编辑的图片\n self.input_img_button = QtWidgets.QPushButton(Form)\n self.input_img_button.setGeometry(QtCore.QRect(1770*SCALE , 15*SCALE, 100*SCALE, 100*SCALE))\n self.input_img_button.setStyleSheet(\"background-color: transparent\")\n self.input_img_button.setFixedSize(100, 100)\n self.input_img_button.setIcon(QIcon(None))\n self.input_img_button.setIconSize(QSize(100, 100))\n self.input_img_button.clicked.connect(partial(Form.set_ref_img_path, 0))\n\n def add_checkbox_widgets(self, Form): # 右上角的复选框\n self.checkBoxGroupBox = QtWidgets.QGroupBox(\"Replace Style of Components\", Form)\n self.checkBoxGroupBox.setGeometry(QtCore.QRect(920* SCALE, 10* SCALE, 800, 100))\n\n layout = QtWidgets.QGridLayout()\n self.checkBoxGroup = QtWidgets.QButtonGroup(Form)\n self.checkBoxGroup.setExclusive(False)\n for i, j in enumerate(my_number_object):\n cb = QtWidgets.QCheckBox(my_number_object[j])\n self.checkBoxGroup.addButton(cb, i)\n layout.addWidget(cb, i//10, i%10)\n\n cb = QtWidgets.QCheckBox('ALL')\n self.checkBoxGroup.addButton(cb, )\n layout.addWidget(cb, (i+1)//10, (i+1)%10)\n\n self.checkBoxGroupBox.setLayout(layout)\n\n for i in range(len(my_number_object)):\n self.checkBoxGroup.button(i).setChecked(False)\n\n checkbox_status = [cb.isChecked() for cb in self.checkBoxGroup.buttons()]\n checkbox_status = checkbox_status[:len(my_number_object)]\n self.checkbox_status = checkbox_status\n self.checkBoxGroup.buttonToggled.connect(self.cb_event)\n\n def add_brush_widgets(self, Form):\n # KaustLogo = QtWidgets.QLabel(self)\n # # KaustLogo.setPixmap(QPixmap('icons/kaust_logo.svg').scaled(60, 60))\n # KaustLogo.setPixmap(QPixmap('ui_run/icons/1999780_200.png').scaled(60, 60))\n # KaustLogo.setGeometry(QtCore.QRect(int(Lb_x - 1 * Lb_row_shift - 60), 25, 80* SCALE, 80* SCALE))\n\n self.add_style_imgs_buttons(Form) # 加载右边的备选图片\n self.brushsizeLabel = QtWidgets.QLabel(Form)\n self.brushsizeLabel.setObjectName(\"brushsizeLabel\")\n self.brushsizeLabel.setGeometry(QtCore.QRect(int(Tb_x), 25, int(150 * SCALE), int(20 * SCALE)))\n self.brushsizeLabel.setText('Brush size: 6')\n font = self.brushsizeLabel.font()\n font.setPointSize(10)\n font.setBold(True)\n self.brushsizeLabel.setFont(font)\n\n self.brushSlider = QtWidgets.QSlider(Form)\n self.brushSlider.setOrientation(QtCore.Qt.Horizontal)\n self.brushSlider.setGeometry(QtCore.QRect(int(Tb_x + 150* SCALE), 25, int(600* SCALE), int(10* SCALE)))\n self.brushSlider.setObjectName(\"brushSlider\")\n self.brushSlider.setMinimum(1)\n self.brushSlider.setMaximum(100)\n self.brushSlider.setValue(8)\n self.brushSlider.valueChanged.connect(Form.change_brush_size) # 绑定slider bar的数值变化\n\n def add_top_buttons(self, Form): # 添加顶部的按钮\n self.pushButton = QtWidgets.QPushButton(Form)\n self.pushButton.setGeometry(QtCore.QRect(int(Tb_x), int(Tb_y), int(Tb_width), int(Tb_height)))\n self.pushButton.setObjectName(\"pushButton\")\n self.pushButton.clicked.connect(Form.open)\n\n self.pushButton_2 = QtWidgets.QPushButton(Form)\n self.pushButton_2.setGeometry(QtCore.QRect(int(Tb_x + 1 * Tb_row_shift + 1 * Tb_width), int(Tb_y), int(Tb_width), int(Tb_height)))\n self.pushButton_2.setObjectName(\"pushButton_2\")\n self.pushButton_2.clicked.connect(Form.mixing_ref_img_style)\n\n self.pushButton_3 = QtWidgets.QPushButton(Form)\n self.pushButton_3.setGeometry(QtCore.QRect(int(Tb_x + 2 * Tb_row_shift + 2 * Tb_width), int(Tb_y), int(Tb_width), int(Tb_height)))\n self.pushButton_3.setObjectName(\"pushButton_3\")\n self.pushButton_3.clicked.connect(Form.editing)\n\n self.pushButton_4 = QtWidgets.QPushButton(Form)\n self.pushButton_4.setGeometry(QtCore.QRect(int(Tb_x + 3 * Tb_row_shift + 3 * Tb_width), int(Tb_y), int(Tb_width), int(Tb_height)))\n self.pushButton_4.setObjectName(\"pushButton_4\")\n self.pushButton_4.clicked.connect(Form.recon)\n\n self.saveImg = QtWidgets.QPushButton(Form)\n self.saveImg.setGeometry(QtCore.QRect(int(Tb_x + 4 * Tb_row_shift + 4 * Tb_width), int(Tb_y), int(Tb_width), int(Tb_height)))\n self.saveImg.setObjectName(\"saveImg\")\n self.saveImg.clicked.connect(Form.save_img)\n\n self.retranslateUi(Form)\n\n def add_tool_buttons(self, Form): # 左边的工具栏图片\n self.newButton = QtWidgets.QPushButton(Form)\n self.newButton.setGeometry(QtCore.QRect(int(Lb_x - 1 * Lb_row_shift - 60* SCALE), 140* SCALE + 60* SCALE*1 + 10* SCALE*1, 60* SCALE, 60* SCALE))\n self.newButton.setObjectName(\"openButton\")\n self.newButton.setIcon(QIcon('ui_run/icons/reset200.png'))\n self.newButton.setIconSize(QSize(60* SCALE, 60* SCALE))\n self.newButton.clicked.connect(Form.init_screen) # 重置\n\n # self.openButton = QtWidgets.QPushButton(Form)\n # self.openButton.setGeometry(QtCore.QRect(int(Lb_x - 1 * Lb_row_shift - 60* SCALE), 140* SCALE, 60* SCALE, 60* SCALE))\n # self.openButton.setObjectName(\"openButton\")\n # self.openButton.setIcon(QIcon('ui_run/icons/open.png'))\n # self.openButton.setIconSize(QSize(60* SCALE, 60* SCALE))\n # self.openButton.clicked.connect(Form.open) \n\n self.fillButton = QtWidgets.QPushButton(Form)\n self.fillButton.setGeometry(QtCore.QRect(int(Lb_x - 1*Lb_row_shift - 60* SCALE), 140* SCALE + 60* SCALE*2 + 10* SCALE*2, 60* SCALE, 60* SCALE))\n self.fillButton.setObjectName(\"fillButton\")\n self.fillButton.setIcon(QIcon('ui_run/icons/paint_can.png'))\n self.fillButton.setIconSize(QSize(60* SCALE, 60* SCALE))\n self.fillButton.clicked.connect(partial(Form.mode_select, 2))\n\n self.brushButton = QtWidgets.QPushButton(Form)\n self.brushButton.setGeometry(QtCore.QRect(int(Lb_x - 1*Lb_row_shift - 60* SCALE), 140* SCALE + 60* SCALE*3 + 10* SCALE*3, 60* SCALE, 60* SCALE))\n self.brushButton.setObjectName(\"brushButton\")\n self.brushButton.setIcon(QIcon('ui_run/icons/paint_brush.png'))\n self.brushButton.setIconSize(QSize(60* SCALE, 60* SCALE))\n self.brushButton.setStyleSheet(\"background-color: #85adad\")\n #self.brushButton.setStyleSheet(\"background-color:\")\n self.brushButton.clicked.connect(partial(Form.mode_select, 0))\n\n self.recButton = QtWidgets.QPushButton(Form)\n self.recButton.setGeometry(QtCore.QRect(int(Lb_x - 1 * Lb_row_shift - 60* SCALE), 140* SCALE + 60* SCALE * 4 + 10* SCALE * 4, 60* SCALE, 60* SCALE))\n self.recButton.setObjectName(\"undolButton\")\n self.recButton.setIcon(QIcon('ui_run/icons/brush_square.png'))\n self.recButton.setIconSize(QSize(60* SCALE, 60* SCALE))\n self.recButton.clicked.connect(partial(Form.mode_select, 1))\n\n self.undoButton = QtWidgets.QPushButton(Form)\n self.undoButton.setGeometry(QtCore.QRect(int(Lb_x - 1*Lb_row_shift - 60* SCALE), 140* SCALE + 60* SCALE*5 + 10* SCALE*5, 60* SCALE, 60* SCALE))\n self.undoButton.setObjectName(\"undolButton\")\n self.undoButton.setIcon(QIcon('ui_run/icons/undo.png'))\n self.undoButton.setIconSize(QSize(60* SCALE, 60* SCALE))\n self.undoButton.clicked.connect(Form.undo)\n\n # self.saveButton = QtWidgets.QPushButton(Form)\n # self.saveButton.setGeometry(QtCore.QRect(int(Lb_x - 1 * Lb_row_shift - 60* SCALE), 140* SCALE + 60* SCALE * 6 + 10* SCALE * 6, 60* SCALE, 60* SCALE))\n # self.saveButton.setObjectName(\"saveButton\")\n # self.saveButton.setIcon(QIcon('ui_run/icons/save.png'))\n # self.saveButton.setIconSize(QSize(60* SCALE, 60* SCALE))\n # self.saveButton.clicked.connect(Form.save_img)\n\n def add_style_imgs_buttons(self, Form): # 添加一个style图片的部分(右边的滚动框)\n\n self.scrollArea = QtWidgets.QScrollArea(Form)\n self.scrollArea.setGeometry(QtCore.QRect(int(1756* SCALE), int(140* SCALE), 140, 512))\n self.scrollArea.setWidgetResizable(True)\n self.scrollArea.setObjectName(\"scrollArea\")\n\n self.scrollArea.setAlignment(Qt.AlignCenter)\n # self.scrollArea.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n self.scrollArea.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)\n\n self.scrollAreaWidgetContents = QtWidgets.QWidget() # 一个父widget,用来存放滚动区域的小图片\n self.scrollAreaWidgetContents.setGeometry(QtCore.QRect(0, 0, int(140 * SCALE), int(512 * SCALE)))\n self.scrollAreaWidgetContents.setObjectName(\"scrollAreaWidgetContents\")\n\n\n verticalLayout = QtWidgets.QVBoxLayout(self.scrollAreaWidgetContents)\n verticalLayout.setContentsMargins(11, 11, 11, 11)\n verticalLayout.setSpacing(6)\n\n\n # img_path_list = glob.glob('imgs/style_imgs_test/*.jpg')\n img_path_list = glob.glob('ui_run/testset/CelebA-HQ/test/images/*.jpg')\n img_path_list.sort()\n\n # style_button = QtWidgets.QPushButton(self.scrollAreaWidgetContents)\n # style_button.setFixedSize(100, 100)\n # style_button.setIcon(QIcon('ui_run/icons/random.png'))\n # style_button.setIconSize(QSize(100, 100))\n # # style_button.clicked.connect(Form.load_partial_average_feature) # 随机加载一个特征,还没实现这个功能\n # verticalLayout.addWidget(style_button)\n\n for img_path in img_path_list:\n style_button = QtWidgets.QPushButton(self.scrollAreaWidgetContents)\n style_button.setFixedSize(100, 100)\n style_button.setIcon(QIcon(img_path))\n style_button.setIconSize(QSize(100, 100))\n style_button.clicked.connect(partial(Form.set_ref_img_path, img_path))\n verticalLayout.addWidget(style_button)\n\n\n verticalLayout.addWidget(style_button)\n self.scrollArea.setWidget(self.scrollAreaWidgetContents)\n\n def add_label_buttons(self, Form): # 12个 mask的 颜色按钮\n\n self.color_Button = QtWidgets.QPushButton(Form) # 当前选定的颜色\n self.color_Button.setGeometry(QtCore.QRect(int(Lb_x - 1*Lb_row_shift - 60), int(Lb_y), 60, 60))\n self.color_Button.setObjectName(\"labelButton_0\")\n self.color_Button.setStyleSheet(\"background-color: %s;\" % number_color[1]) # 默认为 idx = 1\n\n\n self.labelButton_0 = QtWidgets.QPushButton(Form)\n self.labelButton_0.setGeometry(QtCore.QRect(int(Lb_x), int(Lb_y), int(Lb_width), int(Lb_height)))\n self.labelButton_0.setObjectName(\"labelButton_0\")\n self.labelButton_0.setText(_translate(\"Form\", \"background\"))\n self.labelButton_0.setStyleSheet(\"background-color: %s;\" % number_color[0]+ \" color: black\")\n self.labelButton_0.clicked.connect(partial(Form.switch_labels, 0))\n\n\n\n self.labelButton_1 = QtWidgets.QPushButton(Form)\n self.labelButton_1.setGeometry(QtCore.QRect(Lb_x + 1*Lb_row_shift + 1*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_1.setObjectName(\"labelButton_1\")\n self.labelButton_1.setText(_translate(\"Form\", \"lip\"))\n self.labelButton_1.setStyleSheet(\"background-color: %s;\" % number_color[1] + \" color: black\")\n self.labelButton_1.clicked.connect(partial(Form.switch_labels, 1))\n\n\n self.labelButton_2 = QtWidgets.QPushButton(Form)\n self.labelButton_2.setGeometry(QtCore.QRect(Lb_x + 2*Lb_row_shift + 2*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_2.setObjectName(\"labelButton_2\")\n self.labelButton_2.setText(_translate(\"Form\", \"eyebrows\"))\n self.labelButton_2.setStyleSheet(\"background-color: %s;\" % number_color[2] + \" color: black\")\n self.labelButton_2.clicked.connect(partial(Form.switch_labels, 2))\n \n\n self.labelButton_3 = QtWidgets.QPushButton(Form)\n self.labelButton_3.setGeometry(QtCore.QRect(Lb_x + 3*Lb_row_shift + 3*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_3.setObjectName(\"labelButton_3\")\n self.labelButton_3.setText(_translate(\"Form\", \"eyes\"))\n self.labelButton_3.setStyleSheet(\"background-color: %s;\" % number_color[3] + \" color: black\")\n self.labelButton_3.clicked.connect(partial(Form.switch_labels, 3))\n\n\n self.labelButton_4 = QtWidgets.QPushButton(Form)\n self.labelButton_4.setGeometry(QtCore.QRect(Lb_x + 4*Lb_row_shift + 4*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_4.setObjectName(\"labelButton_4\")\n self.labelButton_4.setText(_translate(\"Form\", \"hair\"))\n self.labelButton_4.setStyleSheet(\"background-color: %s;\" % number_color[4] + \" color: black\")\n self.labelButton_4.clicked.connect(partial(Form.switch_labels, 4))\n\n\n self.labelButton_5 = QtWidgets.QPushButton(Form)\n self.labelButton_5.setGeometry(QtCore.QRect(Lb_x + 5*Lb_row_shift + 5*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_5.setObjectName(\"labelButton_5\")\n self.labelButton_5.setText(_translate(\"Form\", \"nose\"))\n self.labelButton_5.setStyleSheet(\"background-color: %s;\" % number_color[5] + \" color: black\")\n self.labelButton_5.clicked.connect(partial(Form.switch_labels, 5))\n\n\n self.labelButton_6 = QtWidgets.QPushButton(Form)\n self.labelButton_6.setGeometry(QtCore.QRect(Lb_x + 6*Lb_row_shift + 6*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_6.setObjectName(\"labelButton_6\")\n self.labelButton_6.setText(_translate(\"Form\", \"skin\"))\n self.labelButton_6.setStyleSheet(\"background-color: %s;\" % number_color[6] + \" color: black\")\n self.labelButton_6.clicked.connect(partial(Form.switch_labels, 6))\n\n\n self.labelButton_7 = QtWidgets.QPushButton(Form)\n self.labelButton_7.setGeometry(QtCore.QRect(Lb_x + 7*Lb_row_shift + 7*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_7.setObjectName(\"labelButton_7\")\n self.labelButton_7.setText(_translate(\"Form\", \"ears\"))\n self.labelButton_7.setStyleSheet(\"background-color: %s;\" % number_color[7] + \" color: black\")\n self.labelButton_7.clicked.connect(partial(Form.switch_labels, 7))\n\n\n self.labelButton_8 = QtWidgets.QPushButton(Form)\n self.labelButton_8.setGeometry(QtCore.QRect(Lb_x + 8*Lb_row_shift + 8*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_8.setObjectName(\"labelButton_8\")\n self.labelButton_8.setText(_translate(\"Form\", \"belowface\"))\n self.labelButton_8.setStyleSheet(\"background-color: %s;\" % number_color[8] + \" color: black\")\n self.labelButton_8.clicked.connect(partial(Form.switch_labels, 8))\n\n self.labelButton_9 = QtWidgets.QPushButton(Form)\n self.labelButton_9.setGeometry(QtCore.QRect(Lb_x + 9 * Lb_row_shift + 9 * Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_9.setObjectName(\"labelButton_9\")\n self.labelButton_9.setText(_translate(\"Form\", \"mouth\"))\n self.labelButton_9.setStyleSheet(\"background-color: %s;\" % number_color[9] + \" color: black\")\n self.labelButton_9.clicked.connect(partial(Form.switch_labels, 9))\n\n\n # Second Row\n self.labelButton_10 = QtWidgets.QPushButton(Form)\n self.labelButton_10.setGeometry(QtCore.QRect(Lb_x,\n Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))\n self.labelButton_10.setObjectName(\"labelButton_10\")\n self.labelButton_10.setText(_translate(\"Form\", \"eye_glass\"))\n self.labelButton_10.setStyleSheet(\"background-color: %s;\" % number_color[10] + \" color: black\")\n self.labelButton_10.clicked.connect(partial(Form.switch_labels, 10))\n\n\n self.labelButton_11 = QtWidgets.QPushButton(Form)\n self.labelButton_11.setGeometry(QtCore.QRect(Lb_x + 1*Lb_row_shift + 1*Lb_width,\n Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))\n self.labelButton_11.setObjectName(\"labelButton_11\")\n self.labelButton_11.setText(_translate(\"Form\", \"ear_rings\"))\n self.labelButton_11.setStyleSheet(\"background-color: %s;\" % number_color[11] + \" color: black\")\n self.labelButton_11.clicked.connect(partial(Form.switch_labels, 11))\n\n def add_label_buttons_seg19(self,Form): # 19个 mask的 颜色按钮\n self.color_Button = QtWidgets.QPushButton(Form) # 当前选定的颜色\n self.color_Button.setGeometry(QtCore.QRect(int(Lb_x - 1*Lb_row_shift - 60), Lb_y, 60, 60))\n self.color_Button.setObjectName(\"labelButton_0\")\n self.color_Button.setStyleSheet(\"background-color: %s;\" % number_color[1]) # 默认为 idx = 1\n\n\n self.labelButton_0 = QtWidgets.QPushButton(Form)\n self.labelButton_0.setGeometry(QtCore.QRect(Lb_x, Lb_y, Lb_width, Lb_height))\n self.labelButton_0.setObjectName(\"labelButton_0\")\n self.labelButton_0.setText(_translate(\"Form\", \"background\"))\n self.labelButton_0.setStyleSheet(\"background-color: %s;\" % number_color[0]+ \" color: black\")\n self.labelButton_0.clicked.connect(partial(Form.switch_labels, 0))\n\n\n self.labelButton_1 = QtWidgets.QPushButton(Form)\n self.labelButton_1.setGeometry(QtCore.QRect(Lb_x + 1*Lb_row_shift + 1*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_1.setObjectName(\"labelButton_1\")\n self.labelButton_1.setText(_translate(\"Form\", \"skin\"))\n self.labelButton_1.setStyleSheet(\"background-color: %s;\" % number_color[1] + \" color: black\")\n self.labelButton_1.clicked.connect(partial(Form.switch_labels, 1))\n\n\n self.labelButton_2 = QtWidgets.QPushButton(Form)\n self.labelButton_2.setGeometry(QtCore.QRect(Lb_x + 2*Lb_row_shift + 2*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_2.setObjectName(\"labelButton_2\")\n self.labelButton_2.setText(_translate(\"Form\", \"nose\"))\n self.labelButton_2.setStyleSheet(\"background-color: %s;\" % number_color[2] + \" color: black\")\n self.labelButton_2.clicked.connect(partial(Form.switch_labels, 2))\n \n\n self.labelButton_3 = QtWidgets.QPushButton(Form)\n self.labelButton_3.setGeometry(QtCore.QRect(Lb_x + 3*Lb_row_shift + 3*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_3.setObjectName(\"labelButton_3\")\n self.labelButton_3.setText(_translate(\"Form\", \"eye_g\"))\n self.labelButton_3.setStyleSheet(\"background-color: %s;\" % number_color[3] + \" color: black\")\n self.labelButton_3.clicked.connect(partial(Form.switch_labels, 3))\n\n\n self.labelButton_4 = QtWidgets.QPushButton(Form)\n self.labelButton_4.setGeometry(QtCore.QRect(Lb_x + 4*Lb_row_shift + 4*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_4.setObjectName(\"labelButton_4\")\n self.labelButton_4.setText(_translate(\"Form\", \"l_eye\"))\n self.labelButton_4.setStyleSheet(\"background-color: %s;\" % number_color[4] + \" color: black\")\n self.labelButton_4.clicked.connect(partial(Form.switch_labels, 4))\n\n\n self.labelButton_5 = QtWidgets.QPushButton(Form)\n self.labelButton_5.setGeometry(QtCore.QRect(Lb_x + 5*Lb_row_shift + 5*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_5.setObjectName(\"labelButton_5\")\n self.labelButton_5.setText(_translate(\"Form\", \"r_eye\"))\n self.labelButton_5.setStyleSheet(\"background-color: %s;\" % number_color[5] + \" color: black\")\n self.labelButton_5.clicked.connect(partial(Form.switch_labels, 5))\n\n\n self.labelButton_6 = QtWidgets.QPushButton(Form)\n self.labelButton_6.setGeometry(QtCore.QRect(Lb_x + 6*Lb_row_shift + 6*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_6.setObjectName(\"labelButton_6\")\n self.labelButton_6.setText(_translate(\"Form\", \"l_brow\"))\n self.labelButton_6.setStyleSheet(\"background-color: %s;\" % number_color[6] + \" color: black\")\n self.labelButton_6.clicked.connect(partial(Form.switch_labels, 6))\n\n\n self.labelButton_7 = QtWidgets.QPushButton(Form)\n self.labelButton_7.setGeometry(QtCore.QRect(Lb_x + 7*Lb_row_shift + 7*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_7.setObjectName(\"labelButton_7\")\n self.labelButton_7.setText(_translate(\"Form\", \"r_brow\"))\n self.labelButton_7.setStyleSheet(\"background-color: %s;\" % number_color[7] + \" color: black\")\n self.labelButton_7.clicked.connect(partial(Form.switch_labels, 7))\n\n\n self.labelButton_8 = QtWidgets.QPushButton(Form)\n self.labelButton_8.setGeometry(QtCore.QRect(Lb_x + 8*Lb_row_shift + 8*Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_8.setObjectName(\"labelButton_8\")\n self.labelButton_8.setText(_translate(\"Form\", \"l_ear\"))\n self.labelButton_8.setStyleSheet(\"background-color: %s;\" % number_color[8] + \" color: black\")\n self.labelButton_8.clicked.connect(partial(Form.switch_labels, 8))\n\n self.labelButton_9 = QtWidgets.QPushButton(Form)\n self.labelButton_9.setGeometry(QtCore.QRect(Lb_x + 9 * Lb_row_shift + 9 * Lb_width, Lb_y, Lb_width, Lb_height))\n self.labelButton_9.setObjectName(\"labelButton_9\")\n self.labelButton_9.setText(_translate(\"Form\", \"r_ear\"))\n self.labelButton_9.setStyleSheet(\"background-color: %s;\" % number_color[9] + \" color: black\")\n self.labelButton_9.clicked.connect(partial(Form.switch_labels, 9))\n\n\n # Second Row\n self.labelButton_10 = QtWidgets.QPushButton(Form)\n self.labelButton_10.setGeometry(QtCore.QRect(Lb_x,\n Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))\n self.labelButton_10.setObjectName(\"labelButton_10\")\n self.labelButton_10.setText(_translate(\"Form\", \"mouth\"))\n self.labelButton_10.setStyleSheet(\"background-color: %s;\" % number_color[10] + \" color: black\")\n self.labelButton_10.clicked.connect(partial(Form.switch_labels, 10))\n\n\n self.labelButton_11 = QtWidgets.QPushButton(Form)\n self.labelButton_11.setGeometry(QtCore.QRect(Lb_x + 1*Lb_row_shift + 1*Lb_width,\n Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))\n self.labelButton_11.setObjectName(\"labelButton_11\")\n self.labelButton_11.setText(_translate(\"Form\", \"u_lip\"))\n self.labelButton_11.setStyleSheet(\"background-color: %s;\" % number_color[11] + \" color: black\")\n self.labelButton_11.clicked.connect(partial(Form.switch_labels, 11))\n\n self.labelButton_12 = QtWidgets.QPushButton(Form)\n self.labelButton_12.setGeometry(QtCore.QRect(Lb_x + 2*Lb_row_shift + 2*Lb_width,\n Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))\n self.labelButton_12.setObjectName(\"labelButton_12\")\n self.labelButton_12.setText(_translate(\"Form\", \"l_lip\"))\n self.labelButton_12.setStyleSheet(\"background-color: %s;\" % number_color[12] + \" color: black\")\n self.labelButton_12.clicked.connect(partial(Form.switch_labels, 12))\n \n self.labelButton_13 = QtWidgets.QPushButton(Form)\n self.labelButton_13.setGeometry(QtCore.QRect(Lb_x + 3*Lb_row_shift + 3*Lb_width,\n Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))\n self.labelButton_13.setObjectName(\"labelButton_13\")\n self.labelButton_13.setText(_translate(\"Form\", \"hair\"))\n self.labelButton_13.setStyleSheet(\"background-color: %s;\" % number_color[13] + \" color: black\")\n self.labelButton_13.clicked.connect(partial(Form.switch_labels, 13))\n \n self.labelButton_14 = QtWidgets.QPushButton(Form)\n self.labelButton_14.setGeometry(QtCore.QRect(Lb_x + 4*Lb_row_shift + 4*Lb_width,\n Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))\n self.labelButton_14.setObjectName(\"labelButton_14\")\n self.labelButton_14.setText(_translate(\"Form\", \"hat\"))\n self.labelButton_14.setStyleSheet(\"background-color: %s;\" % number_color[14] + \" color: black\")\n self.labelButton_14.clicked.connect(partial(Form.switch_labels, 14))\n \n self.labelButton_15 = QtWidgets.QPushButton(Form)\n self.labelButton_15.setGeometry(QtCore.QRect(Lb_x + 5*Lb_row_shift + 5*Lb_width,\n Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))\n \n self.labelButton_15.setObjectName(\"labelButton_15\")\n self.labelButton_15.setText(_translate(\"Form\", \"ear_r\"))\n self.labelButton_15.setStyleSheet(\"background-color: %s;\" % number_color[15] + \" color: black\")\n self.labelButton_15.clicked.connect(partial(Form.switch_labels, 15))\n\n\n self.labelButton_16 = QtWidgets.QPushButton(Form)\n self.labelButton_16.setGeometry(QtCore.QRect(Lb_x + 6*Lb_row_shift + 6*Lb_width,\n Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))\n self.labelButton_16.setObjectName(\"labelButton_16\")\n self.labelButton_16.setText(_translate(\"Form\", \"neck_l\"))\n self.labelButton_16.setStyleSheet(\"background-color: %s;\" % number_color[16] + \" color: black\")\n self.labelButton_16.clicked.connect(partial(Form.switch_labels, 16))\n\n self.labelButton_17 = QtWidgets.QPushButton(Form)\n self.labelButton_17.setGeometry(QtCore.QRect(Lb_x + 7*Lb_row_shift + 7*Lb_width,\n Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))\n self.labelButton_17.setObjectName(\"labelButton_17\")\n self.labelButton_17.setText(_translate(\"Form\", \"neck\"))\n self.labelButton_17.setStyleSheet(\"background-color: %s;\" % number_color[17] + \" color: black\")\n self.labelButton_17.clicked.connect(partial(Form.switch_labels, 17))\n\n self.labelButton_18 = QtWidgets.QPushButton(Form)\n self.labelButton_18.setGeometry(QtCore.QRect(Lb_x + 8*Lb_row_shift + 8*Lb_width,\n Lb_y + Lb_height + Lb_col_shift, Lb_width, Lb_height))\n self.labelButton_18.setObjectName(\"labelButton_18\")\n self.labelButton_18.setText(_translate(\"Form\", \"cloth\"))\n self.labelButton_18.setStyleSheet(\"background-color: %s;\" % number_color[18] + \" color: black\")\n self.labelButton_18.clicked.connect(partial(Form.switch_labels, 18))\n\n\n def add_ops_log_textBox(self,Form): # 操作日志框\n\n self.opsLogLabel = QtWidgets.QLabel(Form)\n self.opsLogLabel.setObjectName(\"opsLogLabel\")\n self.opsLogLabel.setGeometry(QtCore.QRect(Lb_x + 10*SCALE * Lb_row_shift + 10*SCALE * Lb_width + 40*SCALE, Lb_y + 50, 150*SCALE, 20*SCALE))\n self.opsLogLabel.setText('Logging ')\n font = self.brushsizeLabel.font()\n font.setPointSize(10)\n font.setBold(True)\n self.opsLogLabel.setFont(font)\n\n self.opsLogTextBox = QtWidgets.QPlainTextEdit(Form)\n self.opsLogTextBox.setReadOnly(True)\n self.opsLogTextBox.setObjectName(\"opsLogTextBox\")\n self.opsLogTextBox.setGeometry(QtCore.QRect(Lb_x + 10*SCALE * Lb_row_shift + 10 *SCALE* Lb_width + 150*SCALE, Lb_y+35, 225*SCALE, 40*SCALE))\n \n def add_ref_img_button(self, Form): # 右下角当前reference 的图片\n self.ref_img_button = QtWidgets.QPushButton(Form)\n self.ref_img_button.setGeometry(QtCore.QRect(1770*SCALE , 800*SCALE, 100*SCALE, 100*SCALE))\n self.ref_img_button.setStyleSheet(\"background-color: transparent\")\n self.ref_img_button.setFixedSize(100, 100)\n self.ref_img_button.setIcon(QIcon(None))\n self.ref_img_button.setIconSize(QSize(100, 100))\n \n\n def cb_event(self, id, ifchecked):\n\n if id.text() == 'ALL':\n if ifchecked:\n for cb in self.checkBoxGroup.buttons():\n cb.setChecked(True)\n else:\n for cb in self.checkBoxGroup.buttons():\n cb.setChecked(False)\n self.change_cb_state()\n\n def change_cb_state(self):\n checkbox_status = [cb.isChecked() for cb in self.checkBoxGroup.buttons()]\n checkbox_status = checkbox_status[:len(my_number_object)]\n #self.obj_dic_back = copy.deepcopy(self.obj_dic)\n self.checkbox_status = checkbox_status" }, { "identifier": "GraphicsScene", "path": "ui_run/mouse_event.py", "snippet": "class GraphicsScene(QGraphicsScene):\n def __init__(self, modes, Form):\n QGraphicsScene.__init__(self)\n self.modes = modes\n self.mouse_clicked = False\n self.prev_pt = None\n self.history_list = []\n\n # brush color\n self.color = '#cc0000'\n self.label = 1\n self.brush_size = 6\n self.Form = Form\n\n\n def reset(self):\n self.prev_pt = None\n\n self.history_list = []\n\n\n def reset_items(self):\n for i in range(len(self.items())):\n item = self.items()[0]\n self.removeItem(item)\n\n\n\n def mousePressEvent(self, event):\n self.mouse_clicked = True\n\n if self.modes == 1:\n self.rec_top_left = event.scenePos()\n self.old_recItem = None\n\n elif self.modes == 2:\n\n img_current_point = (int(event.scenePos().y()), int(event.scenePos().x())) # label map的当前位置 (y,x)格式,即(行,列)\n scene_current_point= (int(event.scenePos().x()), int(event.scenePos().y())) # scene 的当前位置(x,y)格式\n\n current_color_label = self.Form.mat_img[img_current_point][0]\n thresh = np.uint8(self.Form.mat_img[:, :, 0] == current_color_label) * 255\n cnts = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n Contours_num = None\n\n for i in range(len(cnts[0])):\n whether_in_shape = cv2.pointPolygonTest(cnts[0][i], scene_current_point, False) # 判断一个点是否在多边形内,返回-1表示在外部,1表示在内部,0表示在多边形上\n if whether_in_shape == 1:\n Contours_num = i\n break\n\n if Contours_num != None:\n qpoints = [QPointF(pt[0][0], pt[0,1]) for pt in cnts[0][Contours_num]]\n PolygonItem = QGraphicsPolygonItem(QPolygonF(qpoints)) # 画多边形\n PolygonItem.setBrush(QBrush(QColor(self.color)))\n PolygonItem.setPen(QPen(QColor(self.color), 2, Qt.SolidLine))\n\n self.addItem(PolygonItem)\n\n\n fill = {}\n fill['contours'] = cnts[0]\n fill['contours_num'] = Contours_num\n fill['label'] = self.label\n fill['shape'] = 'Fill'\n\n\n self.history_list.append(fill)\n self.convert_fill(fill)\n # self.Form.run_deep_model()\n\n\n\n\n def mouseReleaseEvent(self, event):\n self.prev_pt = None\n self.mouse_clicked = False\n\n if self.modes == 1:\n self.old_recItem = None\n\n\n\n def mouseMoveEvent(self, event):\n if self.mouse_clicked:\n # print(event.scenePos())\n if self.modes == 0:\n if self.prev_pt:\n self.drawStroke(self.prev_pt, event.scenePos())\n self.prev_pt = event.scenePos()\n\n else:\n self.prev_pt = event.scenePos()\n\n if self.modes == 1:\n self.drawRec(self.rec_top_left, event.scenePos())\n\n\n elif self.modes == 2:\n print('do nothing')\n\n\n\n\n def drawStroke(self, prev_pt, curr_pt):\n lineItem = QGraphicsLineItem(QLineF(prev_pt, curr_pt))\n lineItem.setPen(QPen(QColor(self.color), self.brush_size, Qt.SolidLine, cap=Qt.RoundCap, join=Qt.RoundJoin)) # rect\n self.addItem(lineItem)\n\n stroke = {}\n stroke['prev'] = (int(prev_pt.x()), int(prev_pt.y()))\n stroke['curr'] = (int(curr_pt.x()), int(curr_pt.y()))\n stroke['label'] = self.label\n stroke['brush_size'] = self.brush_size\n stroke['shape'] = 'Stroke'\n self.history_list.append(stroke)\n self.convert_stroke(stroke)\n # self.Form.run_deep_model()\n\n def drawRec(self, prev_pt, curr_pt):\n\n top_left = (int(min(prev_pt.x(), curr_pt.x())), int(min(prev_pt.y(), curr_pt.y())))\n bottom_right = (int(max(prev_pt.x(), curr_pt.x())), int(max(prev_pt.y(), curr_pt.y())))\n\n recItem = QGraphicsRectItem(QRectF(QPointF(top_left[0], top_left[1]), QPointF(bottom_right[0], bottom_right[1])))\n recItem.setBrush(QBrush(QColor(self.color)))\n recItem.setPen(QPen(Qt.NoPen))\n\n self.addItem(recItem)\n\n if self.old_recItem == None:\n self.old_recItem = recItem\n self.old_rec_mat_img = self.Form.mat_img.copy()\n else:\n self.removeItem(self.old_recItem)\n self.old_recItem = recItem\n self.history_list.pop()\n\n rec = {}\n\n\n rec['prev'] = top_left\n rec['curr'] = bottom_right\n\n rec['label'] = self.label\n rec['brush_size'] = None\n rec['shape'] = 'Rec'\n\n self.history_list.append(rec)\n\n self.Form.mat_img = self.old_rec_mat_img.copy()\n self.convert_rec(rec)\n # self.Form.run_deep_model()\n\n\n\n\n def convert_stroke(self, stroke_point):\n if len(stroke_point) == 5:\n color = stroke_point['label']\n cv2.line(self.Form.mat_img, stroke_point['prev'], stroke_point['curr'], (color, color, color), stroke_point['brush_size'])\n else:\n print(\"wrong stroke\")\n\n\n def convert_rec(self, rectangle):\n if len(rectangle) == 5:\n color = rectangle['label']\n cv2.rectangle(self.Form.mat_img, rectangle['prev'], rectangle['curr'], (color, color, color), -1)\n else:\n print(\"wrong rectangle\")\n\n def convert_fill(self, fill):\n if len(fill) == 4:\n color = fill['label']\n cv2.drawContours(self.Form.mat_img, fill['contours'], fill['contours_num'],(color, color, color), -1) # 填充轮廓多边形\n else:\n print(\"wrong fill\")\n\n\n\n\n def undo(self):\n if len(self.items())>1:\n\n if self.history_list[-1]['shape'] == 'Rec':\n item = self.items()[0]\n self.removeItem(item)\n self.history_list.pop()\n\n elif self.history_list[-1]['shape'] == 'Stroke':\n if len(self.items())>=6:\n for i in range(6):\n item = self.items()[0]\n self.removeItem(item)\n self.history_list.pop()\n else:\n for i in range(len(self.items())-1):\n item = self.items()[0]\n self.removeItem(item)\n self.history_list.pop()\n elif self.history_list[-1]['shape'] == 'Fill':\n item = self.items()[0]\n self.removeItem(item)\n self.history_list.pop()\n\n self.Form.mat_img = self.Form.mat_img_org.copy()\n for pts in self.history_list:\n if pts['shape'] == 'Stroke':\n self.convert_stroke(pts)\n elif pts['shape'] == 'Rec':\n self.convert_rec(pts)\n elif pts['shape'] == 'Fill':\n self.convert_fill(pts)\n # self.Form.run_deep_model()" }, { "identifier": "number_color", "path": "ui_run/util.py", "snippet": "def color_pred(pred):\ndef celebAHQ_masks_to_faceParser_mask_detailed(celebA_mask):\nCOMPS = ['background', 'lip', 'eyebrows', 'eyes', 'hair', 'nose', 'skin', 'ears', 'belowface','mouth','eye_glass','ear_rings']" }, { "identifier": "Net3", "path": "models/networks.py", "snippet": "class Net3(nn.Module):\n \"\"\" FSEncoder + styleGAN2 \"\"\"\n\n def __init__(self,opts,):\n super(Net3, self).__init__()\n self.opts=opts\n assert self.opts.fsencoder_type in [\"psp\",\"sean\"]\n if self.opts.fsencoder_type==\"psp\":\n self.encoder = FSEncoder_PSP(mode='ir_se', opts=self.opts)\n dim_s_code = 256 + 512 + 512\n else:\n self.encoder = FSEncoder_SEAN(input_nc=3, output_nc=512,in_size = 256)\n dim_s_code = 512\n \n self.split_layer_idx = 5\n self.remaining_layer_idx = self.opts.remaining_layer_idx\n \n # 区分component 的 W+ space 的 MLPs\n self.MLPs = nn.ModuleList()\n for i in range(self.opts.num_seg_cls):\n self.MLPs.append(\n LocalMLP(\n dim_component=dim_s_code,\n dim_style=512,\n num_w_layers= self.remaining_layer_idx if self.remaining_layer_idx != 17 else 18\n )\n )\n \n self.G = Generator(size=self.opts.out_size, style_dim=512, n_mlp=8, split_layer_idx = self.split_layer_idx, remaining_layer_idx = self.remaining_layer_idx)\n\n # styleGAN的参数是否更新\n if not self.opts.train_G:\n for param in self.G.parameters():\n param.requires_grad = False\n # 注意,styleGAN的8层FC是永远不更新的\n else:\n for param in self.G.style.parameters():\n param.requires_grad = False\n \n # styleGAN的倒数几层不更新 (包括convs 和 ToRGBs)\n if self.remaining_layer_idx != 17:\n for param in self.G.convs[-(17-self.remaining_layer_idx):].parameters():\n param.requires_grad = False\n for param in self.G.to_rgbs[-(17-self.remaining_layer_idx)//2 - 1:].parameters():\n param.requires_grad = False\n \n \n def forward(self, img,mask, resize=False, randomize_noise=True,return_latents=False):\n \"\"\"输入一张RGB图和对应的mask,\n (1) encoder 得到对应的F/S空间的特征,\n (2) 再送到styleGAN得到一张输出的图片\n\n Args:\n img (Tensor): 一对RGB图, each with shape [bs,3,1024,1024]\n mask ([type]): 一对RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n resize (bool, optional): G生成的图片是否 resize. Defaults to True.\n randomize_noise (bool, optional): 是否加入随机噪声. Defaults to True.\n return_latents (bool, optional): 是否返回style codes. Defaults to False.\n\n Returns:\n [type]: [description]\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n codes=[]\n bs, num_comp = codes_vector.size(0), codes_vector.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](codes_vector[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 13, 512]\n \n \n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](codes_vector.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n # 1. 完全使用 style code i.e., G(w)\n images1, result_latent, structure_feats_GT = self.G([codes], structure_feats, mask, input_is_latent=True,\n randomize_noise=randomize_noise,return_latents=return_latents,\n use_structure_code=False)\n \n \n # # 2. 使用 style code 和 strcture code i.e., G(w,F)\n # images2, _ , _ = self.G([codes], structure_feats, mask, input_is_latent=True,\n # randomize_noise=randomize_noise,return_latents=return_latents,\n # use_structure_code=True)\n \n if return_latents:\n return images1, structure_feats_GT, result_latent\n else:\n return images1, structure_feats_GT\n\n def get_style(self, img, mask):\n \"\"\"输入一张RGB图和对应的mask, 得到各个component 对应的style codes\n \n Args:\n img (Tensor): RGB图, each with shape [bs,3,1024,1024]\n mask (Tensor): RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n \n Returns:\n structure_feats(Tensor): 图片的structure code, with shape [bs,512,32,32], 注意,这里其实是相对于StyleGAN第层输出的残差\n all_codes(Tensor): 各个component 对应的style codes, with shape [bs,#comp,18,512]。\n !!! 注意,前7层的各个compnent其实没有意义,只是为了统一接口让shape保持一致,用的时候只用第1个即可 !!!\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n codes_vector, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n codes=[]\n bs, num_comp = codes_vector.size(0), codes_vector.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](codes_vector[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 11,512]\n\n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](codes_vector.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n style_codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n return structure_feats, style_codes\n\n def get_style_vectors(self, img, mask):\n \"\"\"输入一张RGB图和对应的mask, 得到各个component 对应的style vectors\n \n Args:\n img (Tensor): RGB图, each with shape [bs,3,1024,1024]\n mask (Tensor): RGB图对应的mask图, each with shape [bs,#seg_cls,1024,1024]\n \n Returns:\n style_vectors(Tensor): with shape [bs,#seg_cls,512]\n \"\"\"\n if self.opts.fsencoder_type==\"psp\":\n style_vectors, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n else:\n style_vectors, structure_feats = self.encoder(F.interpolate(img,(256,256),mode='bilinear'), mask) # [bs,#seg_cls, D], [bs,C,32,32]\n \n return style_vectors, structure_feats\n \n def cal_style_codes(self,style_vectors):\n \"\"\"根据每个compnent的 style vector转到styleGAN的style code\"\"\"\n \n codes=[]\n bs, num_comp = style_vectors.size(0), style_vectors.size(1)\n for i in range(num_comp):\n codes.append(self.MLPs[i](style_vectors[:,i,:])) \n codes=torch.stack(codes,dim=1) # [bs, #seg_cls, 11,512]\n\n # # 剩下的几层不用分component\n # remaining_codes=[]\n # for i in range(len(self.remain_MLPs)):\n # remaining_codes.append(self.remain_MLPs[i](style_vectors.view(bs, -1)))\n # remaining_codes = torch.stack(remaining_codes,dim=1) # [bs,5,512]\n\n # normalize with respect to the center of an average face\n if self.opts.start_from_latent_avg:\n if self.opts.learn_in_w:\n # 为了保持接口统一,将后3层的 style code 也扩展出一个 #seg_cls 维度\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n if self.remaining_layer_idx != 17:\n codes = codes + self.latent_avg[:self.remaining_layer_idx, :].repeat(codes.shape[0],codes.shape[1],1, 1)\n remaining_codes = self.latent_avg[self.remaining_layer_idx:, :].repeat(bs, num_comp, 1, 1) \n style_codes = torch.cat([codes, remaining_codes],dim=2)\n else:\n style_codes = codes + self.latent_avg.repeat(codes.shape[0],codes.shape[1],1, 1)\n \n return style_codes\n\n def gen_img(self, struc_codes, style_codes, mask, randomize_noise=True, noise=None, return_latents=False):\n \"\"\"输入一张mask 和 对应各components的style codes,以及这张图片的structure code, 生成一张图片\n \n Args:\n style_codes (Tensor): 各个component 对应的style codes, with shape [bs,#comp,18,512]\n struc_codes (Tensor)\n mask (Tensor): mask图, with shape [bs,#seg_cls,1024,1024]\n \n randomize_noise (bool, optional): 是否加入随机噪声. Defaults to True.\n return_latents (bool, optional): 是否返回style codes. Defaults to False.\n\n Returns:\n [type]: [description]\n \"\"\"\n \n images, result_latent, structure_feats = self.G([style_codes], struc_codes, mask, input_is_latent=True,\n randomize_noise=randomize_noise,noise=noise,return_latents=return_latents,\n use_structure_code=False)\n\n if return_latents:\n return images, result_latent, structure_feats\n else:\n return images,-1, structure_feats" }, { "identifier": "torch_utils", "path": "utils/torch_utils.py", "snippet": "def saveTensorToFile(tensor, save_path):\ndef interpolate(img, size):\ndef readImgAsTensor(img_path, gray=False, to_tensor=True, size=1024):\ndef featMap2im(var):\ndef tensor2im(var, is_zero_center: bool = True, ):\ndef im2tensor(var, add_c_dim: bool = False, norm: bool = True, std: bool = False):\ndef tensor2map(var,shown_mask_indices=None):\ndef vis_mask_in_color(mask):\ndef get_colors():\ndef vis_faces(log_hooks1):\ndef vis_faces_no_id(hooks_dict1, fig, gs, i):\ndef aggregate_loss_dict(agg_loss_dict):\ndef labelMap2OneHot(label, num_cls):\ndef remove_module_prefix(state_dict,prefix):\ndef requires_grad(model, flag=True):\ndef accumulate(model1, model2, decay=0.999):\n C, H, W = tensor.size()" }, { "identifier": "CelebAHQDataset", "path": "datasets/dataset.py", "snippet": "class CelebAHQDataset(Dataset):\n \"\"\"\n CelebA-HQ数据集,具体数据来自于 https://github.com/ZPdesu/SEAN\n \"\"\"\n def __init__(self, dataset_root, mode=\"test\",\n img_transform=TO_TENSOR, label_transform=TO_TENSOR,\n load_vis_img=False, fraction=1.0,\n flip_p=-1, # negative means not flipping\n specific_ids: Union[list, tuple] = None,\n paired: bool = False,\n shuffle: bool = False,\n ):\n assert mode in (\"train\", \"test\", \"all\"), \"CelebAHQDataset mode type unsupported!\"\n self.mode = mode\n if mode in (\"all\",):\n self.roots = [osp.join(dataset_root, \"train\"), osp.join(dataset_root, \"test\")]\n else:\n self.roots = [osp.join(dataset_root, self.mode)]\n self.img_transform = img_transform\n self.label_transform = label_transform\n self.load_vis_img = load_vis_img\n self.fraction = fraction\n self.flip_p = flip_p\n self.paired = paired\n\n self.imgs = []\n self.labels = []\n self.labels_vis = []\n for root in self.roots:\n imgs = sorted(make_dataset(osp.join(root, \"images\")))\n imgs = imgs[:int(len(imgs)*self.fraction)]\n\n labels = sorted(make_dataset(osp.join(root, \"labels\")))\n labels = labels[:int(len(labels)*self.fraction)]\n\n labels_vis = sorted(make_dataset(osp.join(root, \"vis\"))) if self.load_vis_img else None\n labels_vis = labels_vis[:int(len(labels_vis)*self.fraction)] if self.load_vis_img else []\n\n self.imgs.extend(imgs)\n self.labels.extend(labels)\n self.labels_vis.extend(labels_vis)\n\n self.imgs, self.labels, self.labels_vis = self._filter_specific_ids(specific_ids)\n\n if self.load_vis_img:\n assert len(self.imgs) == len(self.labels) == len(self.labels_vis)\n else:\n assert len(self.imgs) == len(self.labels)\n\n print(f\"[CelebAHQDataset] files loaded. mode={self.mode}, #imgs={len(self.imgs)}, \"\n f\"#labels={len(self.labels)}, #vis={len(self.labels_vis)}\")\n\n # # 优化 600 个iteration 的style code保存路径\n # self.optim_codes_dir = \"/apdcephfs/share_1290939/zhianliu/py_projects/pytorch-DDP-demo/work_dirs/v0_8_stage2_entypeSEAN/optim_Results\"\n \n # image pairs indices\n self.indices = np.arange(len(self.imgs))\n\n # TODO: shuffle the indices\n if shuffle:\n np.random.shuffle(self.indices)\n\n self.pair_indices = self.indices.reshape(-1, 2)\n\n def __len__(self):\n if not self.paired:\n return len(self.indices)\n else:\n return len(self.pair_indices)\n\n def _filter_specific_ids(self, specific_ids: tuple):\n \"\"\" filter the images according to the specific_ids\n \"\"\"\n if specific_ids is None:\n return self.imgs, self.labels, self.labels_vis\n elif self.fraction < 1.0:\n raise ValueError(\"[CelebAHQDataset] specific_ids and fraction cannot be set simultaneously!\")\n\n # parse the tuple into two lists, e.g. ((\"train\",\"12\"), (\"test\",\"45\")) -> (\"train\",\"train\") and (\"12\",\"45\")\n spec_modes, spec_ids = [], []\n id_order_dict = {}\n for idx, spec_id in enumerate(specific_ids):\n one_mode, one_id = spec_id[0], spec_id[1]\n spec_modes.append(one_mode)\n spec_ids.append(one_id)\n id_order_dict[one_id] = {\n \"mode\": one_mode, \"order\": idx,\n }\n\n # filter and re-order\n ret_imgs = [\"\"] * len(specific_ids)\n ret_labels = [\"\"] * len(specific_ids)\n ret_labels_vis = [\"\"] * len(specific_ids)\n found_cnt = 0\n for k in range(len(spec_ids)): # target specific ids\n one_spec_mode = spec_modes[k]\n one_spec_id = spec_ids[k]\n for idx in range(len(self.imgs)): # full dataset\n one_img = self.imgs[idx]\n one_label = self.labels[idx]\n one_label_vis = self.labels_vis[idx] if self.load_vis_img else None\n if one_spec_mode in one_img and one_spec_id == osp.basename(one_img): # found one\n found_cnt += 1\n one_spec_order = id_order_dict[one_spec_id][\"order\"]\n ret_imgs[one_spec_order] = one_img\n ret_labels[one_spec_order] = one_label\n ret_labels_vis[one_spec_order] = one_label_vis\n break\n\n if found_cnt < len(specific_ids):\n print(f\"[[Warning]][CelebAHQDataset] not enough images found (={found_cnt}) for \"\n f\"specific ids (={len(specific_ids)})!\")\n\n ret_imgs = list(filter(None, ret_imgs))\n ret_labels = list(filter(None, ret_labels))\n ret_labels_vis = list(filter(None, ret_labels_vis))\n return ret_imgs, ret_labels, ret_labels_vis\n\n def load_single_image(self, index):\n \"\"\"把一张图片的 原图, seg mask, 以及mask对应可视化的图都加载进来\n Args:\n index (int): 图片的索引\n Return:\n img: RGB图\n label: seg mask\n label_vis: seg mask的可视化图\n \"\"\"\n img_path = self.imgs[index]\n img = Image.open(img_path).convert('RGB')\n if self.img_transform is not None:\n img = self.img_transform(img)\n\n label = self.labels[index]\n # label = osp.join(\"/apdcephfs/share_1290939/zhianliu/py_projects/our_editing/ui_results\",\"%s_mask.png\"%osp.basename(label)[:-4])\n label = Image.open(label).convert('L')\n if self.label_transform is not None:\n label = self.label_transform(label)\n\n if self.load_vis_img:\n label_vis = self.labels_vis[index]\n label_vis = Image.open(label_vis).convert('RGB')\n label_vis = TO_TENSOR(label_vis)\n else:\n label_vis = -1 # unified interface\n return img, label, label_vis, img_path\n\n def _output_item(self, idx):\n if not self.paired:\n index = self.indices[idx]\n img, label, label_vis, img_path = self.load_single_image(index)\n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img = TF.hflip(img)\n label = TF.hflip(label)\n return img, label, label_vis, img_path\n else:\n index1 = self.indices[idx * 2]\n index2 = self.indices[idx * 2 + 1]\n img1, label1, label_vis1, img_path1 = self.load_single_image(index1)\n img2, label2, label_vis2, img_path2 = self.load_single_image(index2)\n if self.flip_p > 0:\n if random.random() < self.flip_p:\n img1 = TF.hflip(img1)\n label1 = TF.hflip(label1)\n if random.random() < self.flip_p:\n img2 = TF.hflip(img2)\n label2 = TF.hflip(label2)\n return {\n \"bag1\": (img1, label1, label_vis1, img_path1),\n \"bag2\": (img2, label2, label_vis2, img_path2)\n }\n\n def __getitem__(self, idx):\n return self._output_item(idx)\n \n # # 1阶段重建的图片\n # img_name = osp.basename(self.imgs[index])[:-4]\n # recon_img = Image.open(osp.join(self.optim_codes_dir,img_name,\"%s_recon.png\"%img_name)).convert('RGB')\n # if self.img_transform is not None:\n # recon_img = self.img_transform(recon_img)\n \n # # 优化后的code\n # optim_code_path = osp.join(self.optim_codes_dir,img_name,\"%s_0600.npy\"%img_name)\n # assert osp.exists(optim_code_path), \"%s 文件不存在!\"%optim_code_path\n # optimed_style_code = np.load(optim_code_path)[0]\n \n # return img, recon_img, optimed_style_code, label, label_vis\n \n # pair_indices = self.pair_indices[idx, :]\n\n # img1, label1, label_vis1 = self.load_single_image(pair_indices[0])\n # img2, label2, label_vis2 = self.load_single_image(pair_indices[1])\n\n # return (img1, img2), (label1, label2), (label_vis1, label_vis2)" }, { "identifier": "get_transforms", "path": "datasets/dataset.py", "snippet": "def get_transforms(normalize=True, toTensor=True):\n transform_list = []\n if toTensor:\n transform_list += [transforms.ToTensor()]\n\n if normalize:\n transform_list += [transforms.Normalize((0.5, 0.5, 0.5),\n (0.5, 0.5, 0.5))]\n return transforms.Compose(transform_list)" }, { "identifier": "TO_TENSOR", "path": "datasets/dataset.py", "snippet": "TO_TENSOR = transforms.ToTensor()" }, { "identifier": "NORMALIZE", "path": "datasets/dataset.py", "snippet": "NORMALIZE = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))" } ]
from options.ui_options import UIOptions from PyQt5.QtCore import * from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtPrintSupport import QPrintDialog, QPrinter from ui_run.ui import Ui_Form from ui_run.mouse_event import GraphicsScene from ui_run.util import number_color, color_pred,celebAHQ_masks_to_faceParser_mask_detailed, my_number_object, COMPS from PIL import Image from PyQt5 import QtGui from models.networks import Net3 from glob import glob from utils import torch_utils from datasets.dataset import CelebAHQDataset, get_transforms, TO_TENSOR, NORMALIZE import sys import cv2 import skimage.io import qdarkstyle import qdarkgraystyle import os import numpy as np import skimage.io import os import torch import copy import torchvision.transforms as transforms
18,980
@pyqtSlot() def change_alpha_value(self): self.alpha = self.alphaSlider.value() / 20 self.alphaLabel.setText('Alpha: %.2f' % self.alpha) @pyqtSlot() def switch_labels(self, label): # 换了一种label颜色按钮 self.scene.label = label self.scene.color = number_color[label] self.color_Button.setStyleSheet("background-color: %s;" % self.scene.color) @pyqtSlot() def undo(self): self.scene.undo() def __init__(self, opt): super().__init__() self.init_deep_model(opt) self.setupUi(self) self.show() # 下面都是一些默认值 self.modes = 0 self.alpha = 1 # 插值的alpha self.ref_style_img_path = None self.mouse_clicked = False self.scene = GraphicsScene(self.modes, self) # 用来编辑的 scene self.scene.setSceneRect(0, 0, 512, 512) self.graphicsView.setScene(self.scene) self.graphicsView.setAlignment(Qt.AlignCenter) self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.result_scene = QGraphicsScene() self.graphicsView_2.setScene(self.result_scene) self.graphicsView_2.setAlignment(Qt.AlignCenter) self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.GT_scene = QGraphicsScene() self.graphicsView_GT.setScene(self.GT_scene) self.graphicsView_GT.setAlignment(Qt.AlignCenter) self.graphicsView_GT.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView_GT.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.dlg = QColorDialog(self.graphicsView) self.init_screen() # 初始化screen def init_screen(self): #self.image = QPixmap(self.graphicsView.size()) self.image = QPixmap(QSize(512, 512)) # 这张是待编辑的mask可视化图片 self.image.fill(QColor('#FFFFFF')) self.mat_img = np.zeros([512, 512, 3], np.uint8) # mask图片, [0-12], 3通道 self.mat_img_org = self.mat_img.copy() self.GT_img_path = None GT_img = np.ones([512, 512, 3], np.uint8)*255 self.GT_img = Image.fromarray(GT_img) self.GT_img = self.GT_img.convert('RGB') #################### add GT image self.update_GT_image(GT_img) ##################### self.scene.reset() if len(self.scene.items()) > 0: self.scene.reset_items() self.scene.addPixmap(self.image) ############### load average features # TODO: 把这两行注释打开 # self.load_average_feature() # self.run_deep_model() self.recorded_img_names = [] self.clean_snapshots() self.clean_generated_result() def init_deep_model(self, opt): # 初始化模型 self.opt = opt assert self.opt.checkpoint_path is not None, "please specify the pre-trained weights!" print("Loading model and weights, please wait a few seconds...") self.net = Net3(self.opt).eval().to(self.opt.device) ckpt_dict=torch.load(self.opt.checkpoint_path) self.net.latent_avg = ckpt_dict['latent_avg'].to(self.opt.device) if self.opt.start_from_latent_avg else None self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) print("Loading Done!") # 固定noise channels = { 4: 512, 8: 512, 16: 512, 32: 512, 64: 256 * 2, 128: 128 * 2, 256: 64 * 2, 512: 32 * 2, 1024: 16 * 2, } self.noise = [torch.randn(1,512,4,4).to(self.opt.device)] for i in [8,16,32,64,128,256,512,1024]: self.noise.append(torch.randn(1,channels[i],i,i).to(self.opt.device)) self.noise.append(torch.randn(1,channels[i],i,i).to(self.opt.device)) # =================================================== def editing(self): # 生成编辑的结果
class ExWindow(QMainWindow): def __init__(self, opt): super().__init__() self.EX = Ex(opt) self.setWindowIcon(QtGui.QIcon('ui_run/icons/edit_icon.svg')) class Ex(QWidget, Ui_Form): @pyqtSlot() def change_brush_size(self): # 改变画刷的 粗细 self.scene.brush_size = self.brushSlider.value() self.brushsizeLabel.setText('Brush size: %d' % self.scene.brush_size) @pyqtSlot() def change_alpha_value(self): self.alpha = self.alphaSlider.value() / 20 self.alphaLabel.setText('Alpha: %.2f' % self.alpha) @pyqtSlot() def switch_labels(self, label): # 换了一种label颜色按钮 self.scene.label = label self.scene.color = number_color[label] self.color_Button.setStyleSheet("background-color: %s;" % self.scene.color) @pyqtSlot() def undo(self): self.scene.undo() def __init__(self, opt): super().__init__() self.init_deep_model(opt) self.setupUi(self) self.show() # 下面都是一些默认值 self.modes = 0 self.alpha = 1 # 插值的alpha self.ref_style_img_path = None self.mouse_clicked = False self.scene = GraphicsScene(self.modes, self) # 用来编辑的 scene self.scene.setSceneRect(0, 0, 512, 512) self.graphicsView.setScene(self.scene) self.graphicsView.setAlignment(Qt.AlignCenter) self.graphicsView.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.result_scene = QGraphicsScene() self.graphicsView_2.setScene(self.result_scene) self.graphicsView_2.setAlignment(Qt.AlignCenter) self.graphicsView_2.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView_2.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.GT_scene = QGraphicsScene() self.graphicsView_GT.setScene(self.GT_scene) self.graphicsView_GT.setAlignment(Qt.AlignCenter) self.graphicsView_GT.setVerticalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.graphicsView_GT.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff) self.dlg = QColorDialog(self.graphicsView) self.init_screen() # 初始化screen def init_screen(self): #self.image = QPixmap(self.graphicsView.size()) self.image = QPixmap(QSize(512, 512)) # 这张是待编辑的mask可视化图片 self.image.fill(QColor('#FFFFFF')) self.mat_img = np.zeros([512, 512, 3], np.uint8) # mask图片, [0-12], 3通道 self.mat_img_org = self.mat_img.copy() self.GT_img_path = None GT_img = np.ones([512, 512, 3], np.uint8)*255 self.GT_img = Image.fromarray(GT_img) self.GT_img = self.GT_img.convert('RGB') #################### add GT image self.update_GT_image(GT_img) ##################### self.scene.reset() if len(self.scene.items()) > 0: self.scene.reset_items() self.scene.addPixmap(self.image) ############### load average features # TODO: 把这两行注释打开 # self.load_average_feature() # self.run_deep_model() self.recorded_img_names = [] self.clean_snapshots() self.clean_generated_result() def init_deep_model(self, opt): # 初始化模型 self.opt = opt assert self.opt.checkpoint_path is not None, "please specify the pre-trained weights!" print("Loading model and weights, please wait a few seconds...") self.net = Net3(self.opt).eval().to(self.opt.device) ckpt_dict=torch.load(self.opt.checkpoint_path) self.net.latent_avg = ckpt_dict['latent_avg'].to(self.opt.device) if self.opt.start_from_latent_avg else None self.net.load_state_dict(torch_utils.remove_module_prefix(ckpt_dict["state_dict"],prefix="module.")) print("Loading Done!") # 固定noise channels = { 4: 512, 8: 512, 16: 512, 32: 512, 64: 256 * 2, 128: 128 * 2, 256: 64 * 2, 512: 32 * 2, 1024: 16 * 2, } self.noise = [torch.randn(1,512,4,4).to(self.opt.device)] for i in [8,16,32,64,128,256,512,1024]: self.noise.append(torch.randn(1,channels[i],i,i).to(self.opt.device)) self.noise.append(torch.randn(1,channels[i],i,i).to(self.opt.device)) # =================================================== def editing(self): # 生成编辑的结果
mat_img_seg12 = celebAHQ_masks_to_faceParser_mask_detailed(self.mat_img[:,:,0])
3
2023-10-15 12:15:01+00:00
24k
sotopia-lab/sotopia
sotopia/server.py
[ { "identifier": "Agents", "path": "sotopia/agents/llm_agent.py", "snippet": "class Agents(dict[str, BaseAgent[Observation, AgentAction]]):\n def reset(self) -> None:\n for agent in self.values():\n agent.reset()\n\n def act(self, obs: dict[str, Observation]) -> dict[str, AgentAction]:\n return {\n agent_name: agent.act(obs[agent_name])\n for agent_name, agent in self.items()\n }" }, { "identifier": "HumanAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class HumanAgent(BaseAgent[Observation, AgentAction]):\n \"\"\"\n A human agent that takes input from the command line.\n \"\"\"\n\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n\n @property\n def goal(self) -> str:\n if self._goal is not None:\n return self._goal\n goal = input(\"Goal: \")\n return goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n print(\"Available actions:\")\n for i, action in enumerate(obs.available_actions):\n print(f\"{i}: {action}\")\n\n action_type = obs.available_actions[int(input(\"Action type: \"))]\n argument = input(\"Argument: \")\n\n return AgentAction(action_type=action_type, argument=argument)\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n print(\"Available actions:\")\n for i, action in enumerate(obs.available_actions):\n print(f\"{i}: {action}\")\n\n if obs.available_actions != [\"none\"]:\n action_type_number = await ainput(\n \"Action type (Please only input the number): \"\n )\n try:\n action_type_number = int(action_type_number) # type: ignore\n except:\n print(\"Please input a number.\")\n action_type_number = await ainput(\n \"Action type (Please only input the number): \"\n )\n action_type_number = int(action_type_number) # type: ignore\n assert isinstance(\n action_type_number, int\n ), \"Please input a number.\"\n action_type = obs.available_actions[action_type_number]\n else:\n action_type = \"none\"\n if action_type in [\"speak\", \"non-verbal communication\"]:\n argument = await ainput(\"Argument: \")\n else:\n argument = \"\"\n\n return AgentAction(action_type=action_type, argument=argument)" }, { "identifier": "LLMAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class LLMAgent(BaseAgent[Observation, AgentAction]):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n script_like: bool = False,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n self.model_name = model_name\n self.script_like = script_like\n\n @property\n def goal(self) -> str:\n if self._goal is not None:\n return self._goal\n assert (\n len(self.inbox) > 0\n ), \"attribute goal has to be called after at least one step\"\n goal = generate_goal(\n self.model_name,\n background=self.inbox[0][\n 1\n ].to_natural_language(), # Only consider the first message for now\n )\n return goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(\n self,\n obs: Observation,\n gen_func: Callable[..., AgentAction] = generate_action,\n ) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action = gen_func(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n )\n return action\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n action, prompt = await agenerate_action(\n self.model_name,\n history=\"\\n\".join(\n f\"{y.to_natural_language()}\" for x, y in self.inbox\n ),\n turn_number=obs.turn_number,\n action_types=obs.available_actions,\n agent=self.agent_name,\n goal=self.goal,\n script_like=self.script_like,\n )\n return action" }, { "identifier": "ScriptWritingAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class ScriptWritingAgent(LLMAgent):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n agent_names: list[str] = [],\n background: ScriptBackground | None = None,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n self.model_name = model_name\n self.agent_names = agent_names\n assert background is not None, \"background cannot be None\"\n self.background = background\n\n async def aact(self, obs: Observation) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n message_to_compose = [\n y for idx, (x, y) in enumerate(self.inbox) if idx != 0\n ]\n\n history = \"\\n\".join(\n f\"{y.to_natural_language()}\" for y in message_to_compose\n )\n print(\"Current agent: \", self.agent_name)\n print(\"Composed history: \", history)\n\n action, prompt = await agenerate_script(\n model_name=self.model_name,\n background=self.background,\n agent_names=self.agent_names,\n history=history,\n agent_name=self.agent_name,\n single_step=True,\n )\n # action: tuple[\n # list[list[tuple[str, str, Message]]], list[tuple[str, Message]]\n # ]\n returned_action = cast(AgentAction, action[1][0][1])\n print(\"Action: \", returned_action, type(returned_action))\n # print(\"Action: \", action)\n # exit(0)\n\n return returned_action" }, { "identifier": "SpeakAgent", "path": "sotopia/agents/llm_agent.py", "snippet": "class SpeakAgent(LLMAgent):\n def act(\n self,\n obs: Observation,\n gen_func: Callable[..., AgentAction] = generate_action_speak,\n ) -> AgentAction:\n return super().act(obs, gen_func=gen_func)" }, { "identifier": "RedisAgent", "path": "sotopia/agents/redis_agent.py", "snippet": "class RedisAgent(BaseAgent[Observation, AgentAction]):\n \"\"\"An agent use redis as a message broker.\"\"\"\n\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n session_id: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n super().__init__(\n agent_name=agent_name,\n uuid_str=uuid_str,\n agent_profile=agent_profile,\n )\n # super().__init__(agent_name=agent_name, uuid_str=uuid_str)\n self.session_id = session_id or str(uuid4())\n self.sender_id = str(uuid4())\n print(f\"session id: {self.session_id}\")\n print(\"step 1: connect to the server\")\n assert (\n \"FASTAPI_URL\" in os.environ\n ), \"To use redis agent, you have to launch a FastAPI server and set FASTAPI_URL\"\n self._URL = os.environ[\"FASTAPI_URL\"]\n response = requests.request(\n \"POST\",\n f\"{self._URL}/connect/{self.session_id}/server/{self.sender_id}\",\n )\n assert (\n response.status_code == 200 and response.text == \"[]\"\n ), \"Failed to connect to the server\"\n logging.info(f\"Session ID: {self.session_id}\")\n # logging.info(f\"Sender ID: {self.sender_id}\")\n\n def act(\n self,\n obs: Observation,\n ) -> AgentAction:\n raise NotImplementedError\n\n async def aact(\n self,\n obs: Observation,\n ) -> AgentAction:\n self.recv_message(\"Environment\", obs)\n\n if len(obs.available_actions) == 1 and \"none\" in obs.available_actions:\n if obs.turn_number == 0:\n async with aiohttp.ClientSession() as session:\n print(\"step 2: post observation to the message list\")\n response = await session.request(\n \"POST\",\n f\"{self._URL}/send/{self.session_id}/{self.sender_id}\",\n data=obs.to_natural_language(),\n )\n assert response.status == 200, response\n sorted_message_list: list[tuple[float, str, str]] = list(\n map(\n lambda x: MessageTransaction.parse_obj(\n x\n ).to_tuple(),\n await response.json(),\n )\n )\n last_timestamp = sorted_message_list[-1][0]\n return AgentAction(action_type=\"none\", argument=\"\")\n else:\n async with aiohttp.ClientSession() as session:\n # 1. post observation to the message list\n response = await session.request(\n \"POST\",\n f\"{self._URL}/send/{self.session_id}/{self.sender_id}\",\n data=obs.to_natural_language(),\n )\n assert response.status == 200, response\n sorted_message_list = list(\n map(\n lambda x: MessageTransaction.parse_obj(x).to_tuple(),\n await response.json(),\n )\n )\n last_timestamp = sorted_message_list[-1][0]\n\n print(\"step 2: unlock the server for the client\")\n # 2. unlock the server for the client\n response = await session.request(\n \"PUT\",\n f\"{self._URL}/lock/{self.session_id}/{self.sender_id}/action\",\n )\n assert response.status == 200, response\n\n print(\"step 3: wait for the client to post their message\")\n # 3. wait for the client to post their message\n for _ in range(300):\n response = await session.request(\n \"GET\",\n f\"{self._URL}/get/{self.session_id}\",\n )\n # print(f\"get response: {response}\")\n assert response.status == 200, response\n sorted_message_list = list(\n map(\n lambda x: MessageTransaction.parse_obj(\n x\n ).to_tuple(),\n await response.json(),\n )\n )\n if (\n sorted_message_list[-1][0] > last_timestamp\n and sorted_message_list[-1][1] == \"client\"\n ):\n # 3.a if the client has posted their message, lock the server for the client\n response = await session.request(\n \"PUT\",\n f\"{self._URL}/lock/{self.session_id}/{self.sender_id}/no%20action\",\n )\n assert response.status == 200, response\n break\n else:\n # 3.b if the client has not posted their message, wait for 0.1 second and retry\n await asyncio.sleep(1)\n else:\n response = await session.request(\n \"PUT\",\n f\"{self._URL}/lock/{self.session_id}/{self.sender_id}/no%20action\",\n )\n self.reset(\n \"Someone has left or the conversation is too long.\"\n )\n return AgentAction(action_type=\"leave\", argument=\"\")\n action_string = sorted_message_list[-1][2]\n try:\n action = AgentAction.parse_raw(action_string)\n return action\n except pydantic.error_wrappers.ValidationError:\n logging.warn(\n \"Failed to parse action string {}. Fall back to speak\".format(\n action_string\n )\n )\n return AgentAction(\n action_type=\"speak\", argument=sorted_message_list[-1][2]\n )\n\n def reset(\n self,\n reset_reason: str = \"\",\n ) -> None:\n super().reset()\n try:\n if reset_reason != \"\":\n response = requests.request(\n \"POST\",\n f\"{self._URL}/send/{self.session_id}/{self.sender_id}\",\n json=reset_reason,\n )\n assert response.status_code == 200\n\n except Exception as e:\n logging.error(f\"Failed to reset RedisAgent {self.sender_id}: {e}\")" }, { "identifier": "BaseAgent", "path": "sotopia/agents/base_agent.py", "snippet": "class BaseAgent(Generic[ObsType, ActType], MessengerMixin):\n def __init__(\n self,\n agent_name: str | None = None,\n uuid_str: str | None = None,\n agent_profile: AgentProfile | None = None,\n ) -> None:\n MessengerMixin.__init__(self)\n if agent_profile is not None:\n self.profile = agent_profile\n self.agent_name = (\n self.profile.first_name + \" \" + self.profile.last_name\n )\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = AgentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n self.agent_name = (\n self.profile.first_name + \" \" + self.profile.last_name\n )\n else:\n assert (\n agent_name is not None\n ), \"Either agent_name or uuid_str must be provided\"\n self.agent_name = agent_name\n\n self._goal: str | None = None\n\n @property\n def goal(self) -> str:\n assert (\n self._goal is not None\n ), \"attribute goal has to be set before use\"\n return self._goal\n\n @goal.setter\n def goal(self, goal: str) -> None:\n self._goal = goal\n\n def act(self, obs: ObsType) -> ActType:\n raise NotImplementedError\n\n async def aact(self, obs: ObsType) -> ActType:\n raise NotImplementedError\n\n def reset(self) -> None:\n self.reset_inbox()" }, { "identifier": "EpisodeLog", "path": "sotopia/database/logs.py", "snippet": "class EpisodeLog(JsonModel):\n # Note that we did not validate the following constraints:\n # 1. The number of turns in messages and rewards should be the same or off by 1\n # 2. The agents in the messages are the same as the agetns\n\n environment: str = Field(index=True)\n agents: list[str] = Field(index=True)\n tag: str | None = Field(index=True)\n models: list[str] | None = Field(index=True)\n messages: list[list[tuple[str, str, str]]] # Messages arranged by turn\n reasoning: str\n rewards: list[\n tuple[float, dict[str, float]] | float\n ] # Rewards arranged by turn\n rewards_prompt: str\n\n @root_validator\n def agent_number_message_number_reward_number_turn_number_match(\n cls, values: Any\n ) -> Any:\n agents, _, reasoning, rewards = (\n values.get(\"agents\"),\n values.get(\"messages\"),\n values.get(\"reasoning\"),\n values.get(\"rewards\"),\n )\n agent_number = len(agents)\n\n assert (\n len(rewards) == agent_number\n ), f\"Number of agents in rewards {len(rewards)} and agents {agent_number} do not match\"\n return values\n\n def render_for_humans(self) -> tuple[list[AgentProfile], list[str]]:\n \"\"\"Generate a human readable version of the episode log.\n\n Returns:\n A tuple of (a list of agent_profiles, a list of str): The agent profiles, and the messages and rewards in each turn.\n \"\"\"\n\n agent_profiles = [\n AgentProfile.get(pk=uuid_str) for uuid_str in self.agents\n ]\n messages_and_rewards = []\n for idx, turn in enumerate(self.messages):\n messages_in_this_turn = []\n if idx == 0:\n assert (\n len(turn) >= 2\n ), \"The first turn should have at least environemnt messages\"\n messages_in_this_turn.append(turn[0][2])\n messages_in_this_turn.append(turn[1][2])\n for sender, receiver, message in turn:\n if receiver == \"Environment\":\n if sender != \"Environment\":\n if \"did nothing\" in message:\n continue\n else:\n if \"said:\" in message:\n messages_in_this_turn.append(\n f\"{sender} {message}\"\n )\n else:\n messages_in_this_turn.append(\n f\"{sender}: {message}\"\n )\n else:\n messages_in_this_turn.append(message)\n messages_and_rewards.append(\"\\n\".join(messages_in_this_turn))\n messages_and_rewards.append(f\"The reasoning is:\\n{self.reasoning}\")\n messages_and_rewards.append(\n f\"The rewards are:\\nAgent 1: {self.rewards[0]}\\nAgent 2: {self.rewards[1]}\"\n )\n return agent_profiles, messages_and_rewards" }, { "identifier": "AgentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class AgentProfile(JsonModel):\n first_name: str = Field(index=True)\n last_name: str = Field(index=True)\n age: int = Field(index=True, default_factory=lambda: 0)\n occupation: str = Field(index=True, default_factory=lambda: \"\")\n gender: str = Field(index=True, default_factory=lambda: \"\")\n gender_pronoun: str = Field(index=True, default_factory=lambda: \"\")\n public_info: str = Field(index=True, default_factory=lambda: \"\")\n big_five: str = Field(index=True, default_factory=lambda: \"\")\n moral_values: list[str] = Field(index=False, default_factory=lambda: [])\n schwartz_personal_values: list[str] = Field(\n index=False, default_factory=lambda: []\n )\n personality_and_values: str = Field(index=True, default_factory=lambda: \"\")\n decision_making_style: str = Field(index=True, default_factory=lambda: \"\")\n secret: str = Field(default_factory=lambda: \"\")\n model_id: str = Field(default_factory=lambda: \"\")" }, { "identifier": "EnvironmentProfile", "path": "sotopia/database/persistent_profile.py", "snippet": "class EnvironmentProfile(JsonModel):\n codename: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The codename of the environment\",\n )\n source: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"The source of the environment\",\n )\n scenario: str = Field(\n index=True,\n default_factory=lambda: \"\",\n description=\"A concrete scenario of where the social interaction takes place, the scenario should have two agents (agent1 and agent2), and you should illustrate the relationship between the two agents, and for what purpose agent1 is interacting with agent2. Please avoid mentioning specific names and occupations in the scenario and keep all the mentions gender-neutral. Also avoid generating scenarios that requires childrend (below 18) or elderly (above 70) to be involved.\",\n )\n agent_goals: list[str] = Field(\n default_factory=lambda: [],\n description=\"The social goals of each agent, which could include <extra_info>...</extra_info>, <clarification_hint>...</clarification_hint>, and <strategy_hint>...</strategy_hint> to help the agent achieve the goal. Avoid providing too specific strategy hint, try to be as abstract as possible. For example, use 'you can provide financial benefits to achieve your goal' instead of 'you can buy him a boba tea to achieve your goal.'\",\n )\n relationship: RelationshipType = Field(\n index=True,\n default_factory=lambda: RelationshipType.stranger,\n description=\"The relationship between the two agents, choose from: stranger, know_by_name, acquaintance, friend, romantic_relationship, family_member. Do not make up a relationship, but choose from the list, 0 means stranger, 1 means know_by_name, 2 means acquaintance, 3 means friend, 4 means romantic_relationship, 5 means family_member\",\n )\n age_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The age constraint of the environment, a list of tuples, each tuple is a range of age, e.g., '[(18, 25), (30, 40)]' means the environment is only available to agent one between 18 and 25, and agent two between 30 and 40\",\n )\n occupation_constraint: str | None = Field(\n default_factory=lambda: None,\n description=\"The occupation constraint of the environment, a list of lists, each list is a list of occupations, e.g., '[['student', 'teacher'], ['doctor', 'nurse']]' means the environment is only available to agent one if agent one is a student or a teacher, and agent two is a doctor or a nurse\",\n )\n agent_constraint: list[list[str]] | None = Field(\n default_factory=lambda: None,\n )" }, { "identifier": "ParallelSotopiaEnv", "path": "sotopia/envs/parallel.py", "snippet": "class ParallelSotopiaEnv(\n ParallelEnv[str, Observation, AgentAction], MessengerMixin\n):\n def __init__(\n self,\n available_action_types: set[ActionType] = set(\n [\"none\", \"speak\", \"non-verbal communication\", \"action\", \"leave\"]\n ),\n action_order: Literal[\n \"simutaneous\", \"round-robin\", \"random\"\n ] = \"simutaneous\",\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n evaluators: list[Evaluator] = [],\n terminal_evaluators: list[Evaluator] = [],\n uuid_str: str | None = None,\n env_profile: EnvironmentProfile | None = None,\n ) -> None:\n \"\"\"A sotopia environment for parallel agents.\n\n Args:\n available_action_types (set[ActionType], optional): The action types that are available to the agents. Defaults to set([\"none\", \"speak\", \"non-verbal communication\", \"action\"]).\n action_order (Literal[\"simutaneous\", \"round-robin\", \"random\"], optional): The order in which the agents take actions. Defaults to \"simutaneous\".\n model_name (LLM_Name, optional): The name of the language model to use. Defaults to \"gpt-3.5-turbo\".\n \"\"\"\n super().__init__()\n self.model_name = model_name\n self.background = ScriptBackground(\n scenario=\"\",\n p1_background=\"\",\n p2_background=\"\",\n p1_goal=\"\",\n p2_goal=\"\",\n p1_name=\"\",\n p2_name=\"\",\n )\n\n self.agents = []\n self.action_spaces = {}\n self.available_action_types = list(available_action_types)\n self.action_order = action_order\n self.action_mask: list[bool] = []\n self.evaluators = evaluators\n self.terminal_evaluators = terminal_evaluators\n\n # if an environment profile is provided, use it\n assert (\n env_profile or uuid_str\n ), \"Either env_profile or uuid_str must be provided\"\n if env_profile is not None:\n self.profile = env_profile\n # if a uuid is provided, try to load the environment profile from the database\n elif uuid_str is not None:\n # try retrieving profile from database\n try:\n self.profile = EnvironmentProfile.get(pk=uuid_str)\n except NotFoundError:\n raise ValueError(\n f\"Agent with uuid {uuid_str} not found in database\"\n )\n\n @configurable\n def reset(\n self,\n seed: int | None = None,\n options: dict[str, str] | None = None,\n agents: Agents | None = None,\n omniscient: bool = False,\n lite: bool = False,\n ) -> dict[str, Observation]:\n \"\"\"Starting a new episode. Must be called before step().\n\n Args:\n seed (int, optional): Seed for the environment. Defaults to None. Not used right now.\n options (dict, optional): Options for the environment. Defaults to None.\n \"partial_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound can be incompleted (\"unknown\" for missing parts), and the missing parts will be filled in by the environment.\n \"full_background_file\" (str): Path to a json file which need to contain a ScriptBackground object. The backgound must be completed (no \"unknown\" for missing parts).\n omniscient (bool, optional): Whether the agents know the other agent's goal. Defaults to False.\n \"\"\"\n super().__init__()\n MessengerMixin.reset_inbox(self)\n assert (\n not options\n or not (\"partial_background_file\" in options)\n and not (\"full_background_file\" in options)\n ), \"partial_background_file and full_background_file are not supported anymore\"\n if agents is not None:\n assert agents, \"agents must be provided\"\n assert len(agents) == 2, \"Only supporting two agents right now\"\n agent_names = list(agents.keys())\n agent_goals = self.profile.agent_goals\n assert (\n len(agent_goals) == 2\n ), \"Only supporting two agents right now\"\n\n raw_background = ScriptBackground(\n scenario=self.profile.scenario,\n p1_background=get_bio(\n self.profile.relationship,\n agents[agent_names[0]].profile,\n agent_id=0,\n ),\n p2_background=get_bio(\n self.profile.relationship,\n agents[agent_names[1]].profile,\n agent_id=1,\n ),\n p1_goal=f\"<root viewer='agent_0'>{agent_goals[0]}</root>\",\n p2_goal=f\"<root viewer='agent_1'>{agent_goals[1]}</root>\",\n p1_name=agent_names[0],\n p2_name=agent_names[1],\n )\n\n if lite:\n raw_background.p1_background = \"\"\n raw_background.p2_background = \"\"\n\n self.background = ScriptBackground(\n scenario=render_text_for_environment(raw_background.scenario),\n p1_background=render_text_for_environment(\n raw_background.p1_background\n ),\n p2_background=render_text_for_environment(\n raw_background.p2_background\n ),\n p1_goal=render_text_for_environment(raw_background.p1_goal),\n p2_goal=render_text_for_environment(raw_background.p2_goal),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n else:\n raise ValueError(\"agents must be provided\")\n\n self.agents = [self.background.p1_name, self.background.p2_name]\n agent_backgrounds: list[ScriptBackground] = []\n if omniscient:\n for i in range(self.num_agents):\n agent_backgrounds.append(copy.deepcopy(self.background))\n else:\n for i in range(self.num_agents):\n agent_backgrounds.append(\n ScriptBackground(\n scenario=render_text_for_agent(\n raw_background.scenario, i\n ),\n p1_background=render_text_for_agent(\n raw_background.p1_background, i\n ),\n p2_background=render_text_for_agent(\n raw_background.p2_background, i\n ),\n p1_goal=render_text_for_agent(\n raw_background.p1_goal, i\n ),\n p2_goal=render_text_for_agent(\n raw_background.p2_goal, i\n ),\n p1_name=raw_background.p1_name,\n p2_name=raw_background.p2_name,\n )\n )\n background_for_a = agent_backgrounds[0]\n background_for_b = agent_backgrounds[1]\n\n print(\"Is the agent omniscient?\", omniscient)\n if not omniscient:\n background_for_a.p2_goal = \"Unknown\"\n background_for_b.p1_goal = \"Unknown\"\n\n self.action_spaces = {\n agent: Dict(\n dict(\n action_type=Discrete(len(self.available_action_types)),\n argument=Text(256),\n )\n )\n for agent in self.agents\n }\n self.turn_number = 0\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[0] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n\n self.recv_message(\"Environment\", self.background)\n\n return {\n self.background.p1_name: Observation(\n last_turn=background_for_a.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=background_for_b.to_natural_language(),\n turn_number=0,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n }\n\n @beartype\n def step(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *(\n evaluator(\n turn_number=self.turn_number, messages=self.inbox\n )\n for evaluator in self.evaluators\n )\n )\n )\n )\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n },\n )\n\n @beartype\n async def astep(\n self, actions: dict[str, AgentAction] | dict[str, dict[str, int | str]]\n ) -> tuple[\n dict[str, Observation],\n dict[str, float],\n dict[str, bool],\n dict[str, bool],\n dict[str, dict[Any, Any]],\n ]:\n # Time step ++\n self.turn_number += 1\n\n # For action sampled from action space, it needs to be converted into AgentAction\n complied_actions: dict[str, AgentAction] = {}\n for key in actions.keys():\n action = actions[key]\n if isinstance(action, AgentAction):\n complied_actions[key] = action\n else:\n action[\"action_type\"] = self.available_action_types[\n int(action[\"action_type\"])\n ]\n complied_actions[key] = AgentAction.parse_obj(action)\n\n # Masking actions from agent that are in turn\n for idx, agent in enumerate(self.agents):\n if not self.action_mask[idx]:\n complied_actions[agent] = AgentAction(\n action_type=\"none\", argument=\"\"\n )\n\n self.recv_message(\n \"Environment\", SimpleMessage(message=f\"Turn #{self.turn_number}\")\n )\n for agent, action in complied_actions.items():\n self.recv_message(agent, action)\n\n response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.evaluators\n ]\n )\n )\n )\n )\n\n if response.terminated:\n terminal_response = unweighted_aggregate_evaluate(\n list(\n itertools.chain(\n *await asyncio.gather(\n *[\n evaluator.__acall__(\n turn_number=self.turn_number,\n messages=self.inbox,\n )\n for evaluator in self.terminal_evaluators\n ]\n )\n )\n )\n )\n # incorporate terminal response into response\n response.p1_rate = response.p1_rate or terminal_response.p1_rate\n response.p2_rate = response.p2_rate or terminal_response.p2_rate\n if response.comments and terminal_response.comments:\n response.comments += terminal_response.comments\n elif terminal_response.comments:\n response.comments = terminal_response.comments\n\n self.action_mask = [False for _ in self.agents]\n if self.action_order == \"round-robin\":\n self.action_mask[self.turn_number % len(self.action_mask)] = True\n elif self.action_order == \"random\":\n self.action_mask[\n random.randint(0, len(self.action_mask) - 1)\n ] = True\n else:\n self.action_mask = [True for _ in self.agents]\n obs = _actions_to_natural_language(complied_actions)\n info = {\n self.background.p1_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p1_rate or 0,\n },\n self.background.p2_name: {\n \"comments\": response.comments or \"\",\n \"complete_rating\": response.p2_rate or 0,\n },\n }\n if response.terminated:\n info[\"rewards_prompt\"] = {\"overall_prompt\": self.terminal_evaluators[0].prompt} # type: ignore\n\n return (\n {\n self.background.p1_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=0),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[0]\n else [\"none\"],\n ),\n self.background.p2_name: Observation(\n last_turn=render_text_for_agent(obs, agent_id=1),\n turn_number=self.turn_number,\n available_actions=list(self.available_action_types)\n if self.action_mask[1]\n else [\"none\"],\n ),\n },\n {\n self.background.p1_name: (\n response.p1_rate\n if isinstance(response.p1_rate, float)\n else response.p1_rate[0]\n )\n if response.p1_rate\n else 0,\n self.background.p2_name: (\n response.p2_rate\n if isinstance(response.p2_rate, float)\n else response.p2_rate[0]\n )\n if response.p2_rate\n else 0,\n },\n {\n self.background.p1_name: response.terminated,\n self.background.p2_name: response.terminated,\n },\n {\n self.background.p1_name: False,\n self.background.p2_name: False,\n },\n info,\n )\n\n def render(self, mode: str = \"human\") -> None:\n pass\n\n def close(self) -> None:\n pass" }, { "identifier": "ReachGoalLLMEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class ReachGoalLLMEvaluator(Evaluator):\n @beartype\n def __init__(\n self, model_name: LLM_Name, response_format: str = \"basic\"\n ) -> None:\n self.model_name = model_name\n self.prompt = \"\"\n self.response_format = response_format\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n raise NotImplementedError(\n \"ReachGoalLLMEvaluator is not implemented for synchronous evaluation\"\n )\n\n @gin.configurable\n @beartype\n async def __acall__(\n self,\n turn_number: int,\n messages: list[tuple[str, Message]] | None,\n history: str = \"\",\n temperature: float = 0.7,\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # filter did nothing\n if not history and messages:\n messages_filtered = [\n (x, y)\n for x, y in messages\n if \"did nothing\" not in y.to_natural_language()\n ]\n history = \"\\n\".join(\n [\n f\"{x} {y.to_natural_language()}\"\n if x != \"Environment\"\n else y.to_natural_language()\n for x, y in messages_filtered\n ]\n )\n response_format_class = (\n EnvResponsePlus if self.response_format == \"plus\" else EnvResponse\n )\n\n try:\n response: EnvResponsePlus | EnvResponse # fix type error from langchain 0.0.264. we don't need this line for langchain 0.0.263\n response, prompt = await agenerate(\n model_name=self.model_name,\n template=\"\"\"{history},\n Based on previous interactions, evaluate how well participants achieve their goals.\n Please following the format:\n {format_instructions}\n \"\"\",\n input_values=dict(history=history),\n output_parser=PydanticOutputParser[\n EnvResponsePlus | EnvResponse\n ](pydantic_object=response_format_class),\n temperature=temperature,\n )\n self.prompt = prompt\n response_list = []\n # TODO: multiple agents\n for dimension in response.agent_1_evaluation.dict().keys():\n response_list.append(\n (\n \"agent_1\",\n (\n (\n dimension,\n response.agent_1_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_1_evaluation.dict()[dimension][0],\n ),\n )\n )\n response_list.append(\n (\n \"agent_2\",\n (\n (\n dimension,\n response.agent_2_evaluation.dict()[dimension][\n 1\n ],\n ),\n response.agent_2_evaluation.dict()[dimension][0],\n ),\n )\n )\n return response_list\n except Exception as e:\n log.debug(f\"[red] Failed to generate environment response. {e}\")\n return []" }, { "identifier": "RuleBasedTerminatedEvaluator", "path": "sotopia/envs/evaluators.py", "snippet": "class RuleBasedTerminatedEvaluator(Evaluator):\n def __init__(\n self, max_turn_number: int = 20, max_stale_turn: int = 2\n ) -> None:\n self.max_turn_number = max_turn_number\n self.max_stale_turn = max_stale_turn\n\n def __call__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n # Rule 1: If the conversation is too long, terminate the conversation\n conversation_too_long = turn_number > self.max_turn_number\n # Rule 2: If one of the players leaves, terminate the conversation\n p1_leaving = (\n len(messages) > 1\n and isinstance(messages[-2][1], AgentAction)\n and messages[-2][1].action_type == \"leave\"\n )\n p2_leaving = (\n bool(len(messages))\n and isinstance(messages[-1][1], AgentAction)\n and messages[-1][1].action_type == \"leave\"\n )\n # Rule 3: If the conversation is stale for too long, terminate the conversation\n stale_count = 0\n for message in messages[::-1]:\n if message[0] == \"Environment\":\n continue\n assert isinstance(message[1], AgentAction)\n if message[1].action_type == \"none\":\n stale_count += 1\n else:\n break\n if stale_count > self.max_stale_turn:\n break\n stale_too_long = stale_count > self.max_stale_turn\n terminated = (\n conversation_too_long or p1_leaving or p2_leaving or stale_too_long\n )\n reasons_for_termination = (\n f\"{'The conversation is too long; ' if conversation_too_long else ''}\"\n f\"{'Agent 1 is leaving; ' if p1_leaving else ''}\"\n f\"{'Agent 2 is leaving; ' if p2_leaving else ''}\"\n f\"{'The conversation stales for too long; ' if stale_too_long else ''}\"\n )\n return [\n (\n \"environment\",\n ((\"terminated\", terminated), reasons_for_termination),\n )\n ]\n\n async def __acall__(\n self, turn_number: int, messages: list[tuple[str, Message]]\n ) -> list[tuple[str, tuple[tuple[str, int | float | bool], str]]]:\n return self(turn_number, messages)" }, { "identifier": "unweighted_aggregate_evaluate", "path": "sotopia/envs/evaluators.py", "snippet": "@beartype\ndef unweighted_aggregate_evaluate(\n responses: list[tuple[str, tuple[tuple[str, int | float | bool], str]]],\n) -> ScriptEnvironmentResponse:\n \"\"\"\n Aggregate the responses from the environment\n\n Args:\n responses (list[tuple[str, tuple[tuple[str, int | bool], str]]]): list of responses from the environment\n Each response is a tuple of (agent_name/environment, (response, reasoning))\n \"\"\"\n responses_dict: dict[\n str, list[tuple[tuple[str, int | float | bool], str]]\n ] = defaultdict(list)\n for response in responses:\n assert response[0] == \"environment\" or response[0].startswith(\"agent\")\n responses_dict[response[0]].append(response[1])\n\n environment_responses: tuple[dict[str, float | int | bool], str] = ({}, \"\")\n agent_1_responses: tuple[dict[str, float | int | bool], str] = ({}, \"\")\n agent_2_responses: tuple[dict[str, float | int | bool], str] = ({}, \"\")\n for k, v in responses_dict.items():\n if k == \"environment\":\n environment_responses = _reduce(v)\n else:\n if k == \"agent_1\":\n agent_1_responses = _reduce(v)\n elif k == \"agent_2\":\n agent_2_responses = _reduce(v)\n else:\n # TODO: supports more than two agents\n raise ValueError(f\"Only supports agent_1 and agent_2, got {k}\")\n\n comments = (\n (\n f\"Environment comments: {environment_responses[1]}\\n\"\n if environment_responses[1]\n else \"\"\n )\n + (\n f\"Agent 1 comments:\\n{agent_1_responses[1]}\\n\"\n if agent_1_responses[1]\n else \"\"\n )\n + (\n f\"Agent 2 comments:\\n{agent_2_responses[1]}\\n\"\n if agent_2_responses[1]\n else \"\"\n )\n )\n if (\n \"terminated\" in environment_responses[0]\n and environment_responses[0][\"terminated\"]\n ):\n log.debug(f\"[green] The conversation is terminated. {response}\")\n return ScriptEnvironmentResponse(\n terminated=environment_responses[0][\"terminated\"]\n if \"terminated\" in environment_responses[0]\n else False,\n p1_rate=(\n agent_1_responses[0][\"overall_score\"]\n if \"overall_score\" in agent_1_responses[0]\n else 0,\n agent_1_responses[0],\n )\n if agent_1_responses != ({}, \"\")\n else None,\n p2_rate=(\n agent_2_responses[0][\"overall_score\"]\n if \"overall_score\" in agent_2_responses[0]\n else 0,\n agent_2_responses[0],\n )\n if agent_2_responses != ({}, \"\")\n else None,\n comments=comments,\n )" }, { "identifier": "LLM_Name", "path": "sotopia/generation_utils/generate.py", "snippet": "class EnvResponse(BaseModel):\nclass EnvResponsePydanticOutputParser(PydanticOutputParser[EnvResponse]):\nclass ListOfIntOutputParser(BaseOutputParser[list[int]]):\nclass ListOfStrOutputParser(BaseOutputParser[list[str]]):\nclass StrOutputParser(BaseOutputParser[str]):\nclass ScriptOutputParser(BaseOutputParser[ScriptInteractionReturnType]):\n def __init__(self, pydantic_object: Type[BaseModel] = EnvResponse) -> None:\n def parse(self, text: str) -> EnvResponse:\n def get_format_instructions(self) -> str:\n def __init__(\n self,\n number_of_int: int | None = None,\n range_of_int: tuple[int, int] | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[int]:\n def _type(self) -> str:\n def __init__(\n self,\n number_of_str: int | None = None,\n ):\n def _get_description_text(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> list[str]:\n def _type(self) -> str:\n def __init__(self) -> None:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> str:\n def _type(self) -> str:\n def get_format_instructions(self) -> str:\n def parse(self, output: str) -> ScriptInteractionReturnType:\n def _type(self) -> str:\ndef _return_fixed_model_version(\n model_name: Literal[\"gpt-3.5-turbo\", \"gpt-4\", \"gpt-4-turbo\"]\n) -> str:\ndef obtain_chain(\n model_name: LLM_Name,\n template: str,\n input_variables: list[str],\n temperature: float = 0.7,\n max_retries: int = 6,\n) -> LLMChain:\ndef format_bad_output_for_script(\n ill_formed_output: str,\n format_instructions: str,\n agents: list[str],\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef format_bad_output(\n ill_formed_output: str,\n format_instructions: str,\n model_name: LLM_Name = \"gpt-3.5-turbo\",\n) -> str:\ndef generate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> OutputType:\nasync def agenerate(\n model_name: LLM_Name,\n template: str,\n input_values: dict[str, str],\n output_parser: BaseOutputParser[OutputType],\n temperature: float = 0.7,\n) -> tuple[OutputType, str]:\ndef generate_episode(\n model_name: LLM_Name,\n participants: str = \"Jack (a greedy person), Rose\",\n topic: str = \"lawsuit\",\n extra_info: str = \"\",\n) -> EnvResponse:\nasync def agenerate_env_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n temperature: float = 0.7,\n) -> tuple[EnvironmentProfile, str]:\nasync def agenerate_relationship_profile(\n model_name: LLM_Name,\n agents_profiles: list[str],\n) -> tuple[RelationshipProfile, str]:\nasync def agenerate_enviroment_profile(\n model_name: LLM_Name,\n inspiration_prompt: str = \"asking my boyfriend to stop being friends with his ex\",\n examples: str = \"\",\n) -> tuple[EnvironmentProfile, str]:\ndef fill_in_background(\n model_name: LLM_Name,\n partial_background: ScriptBackground,\n) -> ScriptBackground:\ndef generate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\ndef generate_action_speak(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n) -> AgentAction:\nasync def agenerate_action(\n model_name: LLM_Name,\n history: str,\n turn_number: int,\n action_types: list[ActionType],\n agent: str,\n goal: str,\n temperature: float = 0.7,\n script_like: bool = False,\n) -> tuple[AgentAction, str]:\nasync def agenerate_script(\n model_name: LLM_Name,\n background: ScriptBackground,\n temperature: float = 0.7,\n agent_names: list[str] = [],\n agent_name: str = \"\",\n history: str = \"\",\n single_step: bool = False,\n) -> tuple[ScriptInteractionReturnType, str]:\ndef process_history(\n script: ScriptBackground | EnvResponse | dict[str, AgentAction]\n) -> str:\ndef generate_init_profile(\n model_name: LLM_Name, basic_info: dict[str, str]\n) -> str:\ndef convert_narratives(model_name: LLM_Name, narrative: str, text: str) -> str:\ndef generate_goal(model_name: LLM_Name, background: str) -> str:" }, { "identifier": "AgentAction", "path": "sotopia/messages/message_classes.py", "snippet": "class AgentAction(Message):\n action_type: ActionType = Field(\n description=\"whether to speak at this turn or choose to not do anything\"\n )\n argument: str = Field(\n description=\"the utterance if choose to speak, the expression or gesture if choose non-verbal communication, or the physical action if choose action\"\n )\n\n def to_natural_language(self) -> str:\n match self.action_type:\n case \"none\":\n return \"did nothing\"\n case \"speak\":\n return f'said: \"{self.argument}\"'\n case \"non-verbal communication\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"action\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"leave\":\n return \"left the conversation\"" }, { "identifier": "Message", "path": "sotopia/messages/message_classes.py", "snippet": "class Message(BaseModel):\n \"\"\"\n An interface for messages.\n There is only one required method: to_natural_language\n \"\"\"\n\n def to_natural_language(self) -> str:\n raise NotImplementedError" }, { "identifier": "Observation", "path": "sotopia/messages/message_classes.py", "snippet": "class Observation(Message):\n last_turn: str = Field(description=\"the last turn of the conversation\")\n turn_number: int = Field(description=\"the turn number of the conversation\")\n available_actions: list[ActionType] = Field(\n description=\"the available actions\"\n )\n\n def to_natural_language(self) -> str:\n if self.turn_number == 0:\n return f\"\\n{self.last_turn}\\nConversation Starts:\\n\"\n else:\n return f\"Turn #{self.turn_number-1}: {self.last_turn}\\n\"" }, { "identifier": "ScriptBackground", "path": "sotopia/messages/message_classes.py", "snippet": "class ScriptBackground(Message):\n scenario: str = Field(description=\"scenario of the episode\")\n p1_name: str = Field(description=\"name of participant 1\")\n p2_name: str = Field(description=\"name of participant 2\")\n p1_background: str = Field(description=\"background of participant 1\")\n p2_background: str = Field(description=\"background of participant 2\")\n p1_goal: str = Field(description=\"goal of participant 1\")\n p2_goal: str = Field(description=\"goal of participant 2\")\n\n def to_natural_language(self) -> str:\n if self.p1_background and self.p2_background:\n return format_docstring(\n f\"\"\"Here is the context of this interaction:\n Scenario: {self.scenario}\n Participants: {self.p1_name} and {self.p2_name}\n {self.p1_name}'s background: {self.p1_background}\n {self.p2_name}'s background: {self.p2_background}\n {self.p1_name}'s goal: {self.p1_goal}\n {self.p2_name}'s goal: {self.p2_goal}\n \"\"\"\n )\n else:\n return format_docstring(\n f\"\"\"Here is the context of this interaction:\n Scenario: {self.scenario}\n Participants: {self.p1_name} and {self.p2_name}\n {self.p1_name}'s goal: {self.p1_goal}\n {self.p2_name}'s goal: {self.p2_goal}\n \"\"\"\n )" }, { "identifier": "ScriptEnvironmentResponse", "path": "sotopia/messages/message_classes.py", "snippet": "class ScriptEnvironmentResponse(Message):\n terminated: bool = Field(\n description=\"whether the conversation is terminated\",\n default_factory=lambda: False,\n )\n p1_rate: float | tuple[float, dict[str, float]] | None = Field(\n description=\"rating of participant 1, on the scale of 1 to 10\"\n )\n p2_rate: float | tuple[float, dict[str, float]] | None = Field(\n description=\"rating of participant 2, on the scale of 1 to 10\"\n )\n comments: str | None = Field(\n description=\"All of the comments supporting the termination and rating\"\n )\n\n def to_natural_language(self) -> str:\n reason_to_stop = format_docstring(\n f\"\"\"Environment response:\n {\"The conversation is terminated.\" if self.terminated else \"\"}\n {\"Rating of participant 1\" + str(self.p1_rate) if self.p1_rate is not None else \"\"}\n {\"Rating of participant 2\" + str(self.p2_rate) if self.p2_rate is not None else \"\"}\n {self.comments if self.comments is not None else \"\"}\n \"\"\"\n )\n clean_text = \"\"\n for line in reason_to_stop.split(\"\\n\"):\n if line.strip():\n clean_text += line + \"\\n\"\n return clean_text" }, { "identifier": "ScriptInteraction", "path": "sotopia/messages/message_classes.py", "snippet": "class ScriptInteraction(Message):\n interactions: str = Field(\n description=\"\"\"The interaction between the two participants in maximum 20 turns. Each turn is separated by a newline, and should only describe one agent. Following the structure:\n Turn #x\n [participant's name] [action] {argument for some actions}\n\n You can use different types of actions, but only use one in each turn. You should move other information into argument part. Below shows a python code snippet of the format for each action type:\n match self.action_type:\n case \"none\":\n return \"did nothing\"\n case \"speak\":\n return f'said: \"{self.argument}\"'\n case \"non-verbal communication\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"action\":\n return f\"[{self.action_type}] {self.argument}\"\n case \"leave\":\n return \"left the conversation\"\n\n For example, the following is acceptable:\n Turn #x\n Oliver Thompson said: \"Hey Esmeralda, what's wrong? You seem upset.\"\n Turn #x\n Esmeralda Solis [action] moved closer\n Turn #x\n Oliver Thompson [non-verbal communication] smiled\n Turn #x\n Esmeralda Solis did nothing\n Turn #x\n Oliver Thompson left the conversation\n Turn #x\n Esmeralda Solis [action] leaned in and lowered her voice: \"Sorry\"\n\n And the following is not acceptable:\n Turn #1\n Oliver Thompson [speak] said: \"Hey Esmeralda, what's wrong? You seem upset.\"\n Turn #1\n Esmeralda Solis non-verbal communication moved closer\n \"\"\"\n )\n\n def to_natural_language(self) -> str:\n return self.interactions\n\n def parse(\n self, agent_names: list[str], background: str\n ) -> tuple[\n list[list[tuple[str, str, Message]]], list[tuple[str, Message]]\n ]:\n interaction = self.interactions\n # print(\"Interaction: \", interaction)\n lines = self.split_by_turn(interaction)\n\n agent_results = []\n results: list[list[tuple[str, str, Message]]] = [\n [\n (\n \"Environment\",\n name,\n Observation(\n last_turn=background,\n turn_number=0,\n available_actions=[\"none\"],\n ),\n )\n for name in agent_names\n ]\n ]\n\n for line_idx, line in enumerate(lines):\n try:\n res = self.parse_single_dialogue(line)\n action: AgentAction = cast(AgentAction, res[\"action\"])\n argument: str = cast(str, res[\"argument\"])\n turn: int = cast(int, res[\"turn\"])\n name: str = cast(str, res[\"name\"])\n\n parsed_action = AgentAction(\n action_type=action, argument=argument\n )\n if name not in agent_names:\n print(\n f\"The name of the agent, {name}, is not in the list of agent names, {agent_names}\"\n )\n name = agent_names[\n line_idx % 2\n ] # TODO Not sure what name to be set here\n except Exception as e:\n print(\n f\"Error when parsing the dialogue: {line}\",\n f\"The error is: {e}\",\n )\n raise e\n parsed_action = AgentAction(action_type=\"none\", argument=\"\")\n name = agent_names[line_idx % 2] # TODO same question as above\n inactive_agent_name = (\n agent_names[0] if name == agent_names[1] else agent_names[1]\n )\n results.append(\n [\n (\n \"Environment\",\n name,\n Observation(\n last_turn=\"environment is the agent\",\n turn_number=line_idx + 1,\n available_actions=[\"none\"],\n ),\n )\n for name in agent_names\n ]\n + [\n (name, \"Environment\", parsed_action),\n (\n inactive_agent_name,\n \"Environment\",\n AgentAction(\n action_type=\"none\", argument=\"did nothing\"\n ),\n ),\n ]\n )\n\n agent_results.append((name, parsed_action))\n # print(\"Parsed agent results: \", agent_results)\n return (results, agent_results) # type: ignore\n\n def parse_single_dialogue(\n self, dialogue: str\n ) -> dict[str, str | int | AgentAction | None]:\n \"\"\"Parse a single dialogue string and return a dictionary with turn, name, action, and argument.\"\"\"\n\n # Match the turn number and name. Assume all agent name starts with a capital letter and is followed by lowercase letters\n match_turn_name = re.match(\n r\"Turn #?(\\d+):?\\s*\\n((?:[A-Z]['a-z]* ?)+)\", dialogue\n )\n\n if not match_turn_name:\n raise ValueError(\n f\"The dialogue does not match the expected format: {dialogue}\"\n )\n return (\n None # TODO Which should we use, return None or raise error?\n )\n\n turn, name = match_turn_name.groups()\n action_content = dialogue[\n len(match_turn_name.group(0)) :\n ].strip() # Extract the action content\n\n # Check for different action types\n if \"did nothing\" in action_content:\n action, argument = \"none\", \"\"\n elif match := re.match(r'said: \"(.*?)\"', action_content):\n action, argument = \"speak\", match.group(1)\n action, argument = action.strip(), argument.strip()\n elif match := re.match(r'\\[speak\\] said: \"(.*?)\"', action_content):\n action, argument = \"speak\", match.group(1)\n action, argument = action.strip(), argument.strip()\n elif match := re.match(\n r\"\\[(non-verbal communication|action)\\] (.*)\", action_content\n ):\n action, argument = match.groups()\n elif \"left the conversation\" in action_content:\n # TODO Make it more elegant to handle the situation of `left the conversation.`\n action, argument = \"leave\", \"\"\n else:\n action, argument = None, None\n\n parsed_item = {\n \"turn\": int(turn),\n \"name\": name.strip(),\n \"action\": action,\n \"argument\": argument,\n }\n return parsed_item\n\n def split_by_turn(self, input_string: str) -> list[str]:\n \"\"\"Split the input dialogue string by turn and return a list of dialogues.\"\"\"\n # Split using 'Turn #' as delimiter, but keep the delimiter in the results\n dialogues = re.split(r\"(?=Turn #?\\d+)\", input_string)\n # Remove any empty strings and strip whitespace\n dialogues = [\n dialogue.strip() for dialogue in dialogues if dialogue.strip()\n ]\n dialogues = [\n dialogue for dialogue in dialogues if dialogue.startswith(\"Turn\")\n ]\n # Change from Turn #x to Turn (#)x (# is optional)\n dialogues[-1] = \"\\n\".join(\n dialogues[-1].split(\"\\n\")[:2]\n ) # Discard further input in the last turn\n # print(\"Dialogues: \", dialogues)\n return dialogues\n\n @staticmethod\n def default_value_for_return_type() -> ScriptInteractionReturnType:\n results_1: list[list[tuple[str, str, Message]]] = [\n [\n (\n \"Environment\",\n name,\n Observation(\n last_turn=\"Environment is the agent\",\n turn_number=0,\n available_actions=[\"none\"],\n ),\n )\n for name in [\"none\", \"none\"]\n ]\n ]\n results_2: list[tuple[str, Message]] = [\n (\"\", AgentAction(action_type=\"none\", argument=\"\"))\n ]\n return (results_1, results_2)" }, { "identifier": "BaseSampler", "path": "sotopia/samplers/base_sampler.py", "snippet": "class BaseSampler(Generic[ObsType, ActType]):\n def __init__(\n self,\n env_candidates: Sequence[EnvironmentProfile | str] | None = None,\n agent_candidates: Sequence[AgentProfile | str] | None = None,\n ) -> None:\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 1,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:" }, { "identifier": "ConstraintBasedSampler", "path": "sotopia/samplers/constraint_based_sampler.py", "snippet": "class ConstraintBasedSampler(BaseSampler[ObsType, ActType]):\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 10,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:\n \"\"\"\n Sample an environment and a list of agents based on the constraints of the environment.\n\n Note: Sampling without replacement is only restricted to single env candidate.\n This is due to the fact that the number of possible combinations of env and agents is huge.\n Please sample for each env separately if you want to sample without replacement.\n \"\"\"\n assert (\n not isinstance(agent_classes, list)\n or len(agent_classes) == n_agent\n ), f\"agent_classes should be a list of length {n_agent} or a single agent class\"\n\n if not isinstance(agent_classes, list):\n agent_classes = [agent_classes] * n_agent\n assert (\n len(agents_params) == n_agent\n ), f\"agents_params should be a list of length {n_agent}\"\n\n env_profiles: list[EnvironmentProfile] = []\n agents_which_fit_scenario: list[list[str]] = []\n\n agent_candidate_ids: set[str] | None = None\n if self.agent_candidates:\n agent_candidate_ids = set(\n str(agent.pk) if not isinstance(agent, str) else agent\n for agent in self.agent_candidates\n )\n else:\n agent_candidate_ids = None\n\n if not replacement:\n assert self.env_candidates and len(self.env_candidates) == 1, (\n \"Sampling without replacement is only restricted to single env candidate (must be provided in the constructor). \"\n \"This is due to the fact that the number of possible combinations of env and agents is huge. \"\n \"Please sample for each env separately if you want to sample without replacement.\"\n )\n\n env_profile_id = (\n self.env_candidates[0].pk\n if not isinstance(self.env_candidates[0], str)\n else self.env_candidates[0]\n )\n\n assert env_profile_id, \"Env candidate must have an id\"\n\n agents_which_fit_scenario = _get_fit_agents_for_one_env(\n env_profile_id, agent_candidate_ids, size\n )\n env_profiles = (\n [EnvironmentProfile.get(env_profile_id)] * size\n if isinstance(self.env_candidates[0], str)\n else [self.env_candidates[0]] * size\n )\n else:\n for _ in range(size):\n if self.env_candidates:\n env_profile = random.choice(self.env_candidates)\n if isinstance(env_profile, str):\n env_profile = EnvironmentProfile.get(env_profile)\n else:\n env_profile_id = random.choice(\n list(EnvironmentProfile.all_pks())\n )\n env_profile = EnvironmentProfile.get(env_profile_id)\n env_profiles.append(env_profile)\n env_profile_id = env_profile.pk\n assert env_profile_id, \"Env candidate must have an id\"\n agents_which_fit_scenario.append(\n _get_fit_agents_for_one_env(\n env_profile_id, agent_candidate_ids, 1\n )[0]\n )\n\n assert (\n len(env_profiles) == size\n ), \"Number of env_profiles is not equal to size\"\n assert (\n len(agents_which_fit_scenario) == size\n ), \"Number of agents_which_fit_scenario is not equal to size\"\n\n for env_profile, agent_profile_id_list in zip(\n env_profiles, agents_which_fit_scenario\n ):\n env = ParallelSotopiaEnv(env_profile=env_profile, **env_params)\n agent_profiles = [\n AgentProfile.get(id) for id in agent_profile_id_list\n ]\n\n agents = [\n agent_class(agent_profile=agent_profile, **agent_params)\n for agent_class, agent_profile, agent_params in zip(\n agent_classes, agent_profiles, agents_params\n )\n ]\n # set goal for each agent\n for agent, goal in zip(agents, env.profile.agent_goals):\n agent.goal = goal\n\n yield env, agents" }, { "identifier": "UniformSampler", "path": "sotopia/samplers/uniform_sampler.py", "snippet": "class UniformSampler(BaseSampler[ObsType, ActType]):\n def sample(\n self,\n agent_classes: Type[BaseAgent[ObsType, ActType]]\n | list[Type[BaseAgent[ObsType, ActType]]],\n n_agent: int = 2,\n replacement: bool = True,\n size: int = 1,\n env_params: dict[str, Any] = {},\n agents_params: list[dict[str, Any]] = [{}, {}],\n ) -> Generator[EnvAgentCombo[ObsType, ActType], None, None]:\n \"\"\"\n Sample an environment and `n_agent` agents.\n\n Runtime checks:\n 1. If `agent_classes` is a list, it should have length `n_agent`.\n 2. `agents_params` should also be a list of length `n_agent`.\n\n Note: Currently, uniform sampling without replacement is not supported.\n This is due to the difficulty of sequentially sampling environment and agents.\n In theory, we can reject samples that have been sampled before, but this is not efficient.\n Please open an issue if you need this feature.\n \"\"\"\n assert (\n not isinstance(agent_classes, list)\n or len(agent_classes) == n_agent\n ), f\"agent_classes should be a list of length {n_agent} or a single agent class\"\n\n if not isinstance(agent_classes, list):\n agent_classes = [agent_classes] * n_agent\n assert (\n len(agents_params) == n_agent\n ), f\"agents_params should be a list of length {n_agent}\"\n\n assert (\n replacement\n ), \"Uniform sampling without replacement is not supported yet\"\n\n for _ in range(size):\n if self.env_candidates:\n env_profile = random.choice(self.env_candidates)\n if isinstance(env_profile, str):\n env_profile = EnvironmentProfile.get(env_profile)\n else:\n env_profile_id = random.choice(\n list(EnvironmentProfile.all_pks())\n )\n env_profile = EnvironmentProfile.get(env_profile_id)\n env = ParallelSotopiaEnv(env_profile=env_profile, **env_params)\n\n if self.agent_candidates:\n agent_profile_candidates = self.agent_candidates\n if len(agent_profile_candidates) < n_agent:\n raise ValueError(\n f\"Number of agent candidates ({len(agent_profile_candidates)}) is less than number of agents ({n_agent})\"\n )\n else:\n agent_profile_candidates_keys = list(AgentProfile.all_pks())\n if len(agent_profile_candidates_keys) < n_agent:\n raise ValueError(\n f\"Number of agent profile candidates ({len(agent_profile_candidates_keys)}) in database is less than number of agents ({n_agent})\"\n )\n agent_profile_candidates = [\n AgentProfile.get(pk=pk)\n for pk in agent_profile_candidates_keys\n ]\n\n if len(agent_profile_candidates) == n_agent:\n agent_profiles_maybe_id = agent_profile_candidates\n else:\n agent_profiles_maybe_id = random.sample(\n agent_profile_candidates, n_agent\n )\n agent_profiles = [\n i if isinstance(i, AgentProfile) else AgentProfile.get(i)\n for i in agent_profiles_maybe_id\n ]\n agents = [\n agent_class(agent_profile=agent_profile, **agent_params)\n for agent_class, agent_profile, agent_params in zip(\n agent_classes, agent_profiles, agents_params\n )\n ]\n # set goal for each agent\n for agent, goal in zip(agents, env.profile.agent_goals):\n agent.goal = goal\n\n yield env, agents" } ]
import asyncio import functools import itertools import logging import gin import rich from typing import Callable, Literal, Sequence, Type, cast from beartype import beartype from tqdm.asyncio import tqdm_asyncio from sotopia.agents import ( Agents, HumanAgent, LLMAgent, RedisAgent, ScriptWritingAgent, SpeakAgent, ) from sotopia.agents.base_agent import BaseAgent from sotopia.database import EpisodeLog from sotopia.database.persistent_profile import ( AgentProfile, EnvironmentProfile, ) from sotopia.envs import ParallelSotopiaEnv from sotopia.envs.evaluators import ( ReachGoalLLMEvaluator, RuleBasedTerminatedEvaluator, unweighted_aggregate_evaluate, ) from sotopia.generation_utils.generate import LLM_Name, agenerate_script from sotopia.messages import AgentAction, Message, Observation from sotopia.messages.message_classes import ( ScriptBackground, ScriptEnvironmentResponse, ScriptInteraction, ) from sotopia.samplers import ( BaseSampler, ConstraintBasedSampler, EnvAgentCombo, UniformSampler, )
18,742
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else: agents[agent_name] = LLMAgent(agent_name, model_name=agent_model) agents.reset() messages: list[tuple[str, str, Message]] = [] # Main Event Loop done = False for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) while not done: # gather agent messages agent_messages: dict[str, AgentAction] = dict() for agent_name in env.agents: if agents_info is not None: agents[agent_name].goal = agents_info[agent_name]["goal"] agent_messages[agent_name] = agents[agent_name].act( environment_messages[agent_name] ) messages.append( (agent_name, "Environment", agent_messages[agent_name]) ) # send agent messages to environment environment_messages, _, terminated, ___, ____ = env.step( agent_messages ) for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) done = all(terminated.values()) return messages @gin.configurable async def arun_one_episode( env: ParallelSotopiaEnv, agent_list: Sequence[BaseAgent[Observation, AgentAction]], model_dict: dict[str, LLM_Name], omniscient: bool = False, script_like: bool = False, json_in_script: bool = False, tag: str | None = None, push_to_db: bool = False, ) -> list[tuple[str, str, Message]]: agents = Agents({agent.agent_name: agent for agent in agent_list}) environment_messages = env.reset(agents=agents, omniscient=omniscient) agents_model_names = [model_dict["agent1"], model_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif agent_model == "redis":
@beartype def run_sync_server( model_name_dict: dict[str, LLM_Name], action_order: Literal["simutaneous", "round-robin", "random"], agents_info: dict[str, dict[str, str]] | None = None, partial_background_file: str | None = None, full_background_file: str | None = None, mode: str | None = None, ) -> list[tuple[str, str, Message]]: # Create Environment and agents # This step will be moved to outside this function env = ParallelSotopiaEnv( model_name=model_name_dict["env"], action_order=action_order, evaluators=[ RuleBasedTerminatedEvaluator(), ], ) if partial_background_file: environment_messages = env.reset( options={"partial_background_file": partial_background_file} ) elif full_background_file: environment_messages = env.reset( options={"full_background_file": full_background_file} ) else: environment_messages = env.reset() agents = Agents() agents_model_names = [model_name_dict["agent1"], model_name_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif mode == "speak": agents[agent_name] = SpeakAgent(agent_name, model_name=agent_model) else: agents[agent_name] = LLMAgent(agent_name, model_name=agent_model) agents.reset() messages: list[tuple[str, str, Message]] = [] # Main Event Loop done = False for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) while not done: # gather agent messages agent_messages: dict[str, AgentAction] = dict() for agent_name in env.agents: if agents_info is not None: agents[agent_name].goal = agents_info[agent_name]["goal"] agent_messages[agent_name] = agents[agent_name].act( environment_messages[agent_name] ) messages.append( (agent_name, "Environment", agent_messages[agent_name]) ) # send agent messages to environment environment_messages, _, terminated, ___, ____ = env.step( agent_messages ) for agent_name in env.agents: messages.append( ("Environment", agent_name, environment_messages[agent_name]) ) done = all(terminated.values()) return messages @gin.configurable async def arun_one_episode( env: ParallelSotopiaEnv, agent_list: Sequence[BaseAgent[Observation, AgentAction]], model_dict: dict[str, LLM_Name], omniscient: bool = False, script_like: bool = False, json_in_script: bool = False, tag: str | None = None, push_to_db: bool = False, ) -> list[tuple[str, str, Message]]: agents = Agents({agent.agent_name: agent for agent in agent_list}) environment_messages = env.reset(agents=agents, omniscient=omniscient) agents_model_names = [model_dict["agent1"], model_dict["agent2"]] for agent_name, agent_model in zip(env.agents, agents_model_names): if agent_model == "human": agents[agent_name] = HumanAgent(agent_name) elif agent_model == "redis":
agents[agent_name] = RedisAgent(agent_name)
5
2023-10-23 19:47:26+00:00
24k
shrimo/SLAMBox
node_graph.py
[ { "identifier": "NodeGraph", "path": "NodeGraphQt/NodeGraphQt/base/graph.py", "snippet": "class NodeGraph(QtCore.QObject):\n \"\"\"\n The ``NodeGraph`` class is the main controller for managing all nodes\n and the node graph.\n\n Inherited from: :class:`PySide2.QtCore.QObject`\n\n .. image:: _images/graph.png\n :width: 60%\n \"\"\"\n\n node_created = QtCore.Signal(NodeObject)\n \"\"\"\n Signal triggered when a node is created in the node graph.\n\n :parameters: :class:`NodeGraphQt.NodeObject`\n :emits: created node\n \"\"\"\n nodes_deleted = QtCore.Signal(list)\n \"\"\"\n Signal triggered when nodes have been deleted from the node graph.\n\n :parameters: list[str]\n :emits: list of deleted node ids.\n \"\"\"\n node_selected = QtCore.Signal(NodeObject)\n \"\"\"\n Signal triggered when a node is clicked with the LMB.\n\n :parameters: :class:`NodeGraphQt.NodeObject`\n :emits: selected node\n \"\"\"\n node_selection_changed = QtCore.Signal(list, list)\n \"\"\"\n Signal triggered when the node selection has changed.\n\n :parameters: list[:class:`NodeGraphQt.NodeObject`],\n list[:class:`NodeGraphQt.NodeObject`]\n :emits: selected node, deselected nodes.\n \"\"\"\n node_double_clicked = QtCore.Signal(NodeObject)\n \"\"\"\n Signal triggered when a node is double clicked and emits the node.\n\n :parameters: :class:`NodeGraphQt.NodeObject`\n :emits: selected node\n \"\"\"\n port_connected = QtCore.Signal(Port, Port)\n \"\"\"\n Signal triggered when a node port has been connected.\n\n :parameters: :class:`NodeGraphQt.Port`, :class:`NodeGraphQt.Port`\n :emits: input port, output port\n \"\"\"\n port_disconnected = QtCore.Signal(Port, Port)\n \"\"\"\n Signal triggered when a node port has been disconnected.\n\n :parameters: :class:`NodeGraphQt.Port`, :class:`NodeGraphQt.Port`\n :emits: input port, output port\n \"\"\"\n property_changed = QtCore.Signal(NodeObject, str, object)\n \"\"\"\n Signal is triggered when a property has changed on a node.\n\n :parameters: :class:`NodeGraphQt.BaseNode`, str, object\n :emits: triggered node, property name, property value\n \"\"\"\n data_dropped = QtCore.Signal(QtCore.QMimeData, QtCore.QPoint)\n \"\"\"\n Signal is triggered when data has been dropped to the graph.\n\n :parameters: :class:`PySide2.QtCore.QMimeData`, :class:`PySide2.QtCore.QPoint`\n :emits: mime data, node graph position\n \"\"\"\n session_changed = QtCore.Signal(str)\n \"\"\"\n Signal is triggered when session has been changed.\n\n :parameters: :str\n :emits: new session path\n \"\"\"\n\n def __init__(self, parent=None, **kwargs):\n \"\"\"\n Args:\n parent (object): object parent.\n **kwargs (dict): Used for overriding internal objects at init time.\n \"\"\"\n super(NodeGraph, self).__init__(parent)\n self.setObjectName('NodeGraph')\n self._model = (\n kwargs.get('model') or NodeGraphModel())\n self._node_factory = (\n kwargs.get('node_factory') or NodeFactory())\n\n self._undo_view = None\n self._undo_stack = (\n kwargs.get('undo_stack') or QtWidgets.QUndoStack(self))\n\n self._widget = None\n\n self._sub_graphs = {}\n\n self._viewer = (\n kwargs.get('viewer') or NodeViewer(undo_stack=self._undo_stack))\n\n self._build_context_menu()\n self._register_builtin_nodes()\n self._wire_signals()\n\n def __repr__(self):\n return '<{}(\"root\") object at {}>'.format(\n self.__class__.__name__, hex(id(self)))\n\n def _build_context_menu(self):\n \"\"\"\n build the essential menus commands for the graph context menu.\n \"\"\"\n from NodeGraphQt.base.graph_actions import build_context_menu\n build_context_menu(self)\n\n def _register_builtin_nodes(self):\n \"\"\"\n Register the default builtin nodes to the :meth:`NodeGraph.node_factory`\n \"\"\"\n self.register_node(BackdropNode, alias='Backdrop')\n\n def _wire_signals(self):\n \"\"\"\n Connect up all the signals and slots here.\n \"\"\"\n\n # internal signals.\n self._viewer.search_triggered.connect(self._on_search_triggered)\n self._viewer.connection_sliced.connect(self._on_connection_sliced)\n self._viewer.connection_changed.connect(self._on_connection_changed)\n self._viewer.moved_nodes.connect(self._on_nodes_moved)\n self._viewer.node_double_clicked.connect(self._on_node_double_clicked)\n self._viewer.node_name_changed.connect(self._on_node_name_changed)\n self._viewer.node_backdrop_updated.connect(\n self._on_node_backdrop_updated)\n self._viewer.insert_node.connect(self._on_insert_node)\n\n # pass through translated signals.\n self._viewer.node_selected.connect(self._on_node_selected)\n self._viewer.node_selection_changed.connect(\n self._on_node_selection_changed)\n self._viewer.data_dropped.connect(self._on_node_data_dropped)\n\n def _on_insert_node(self, pipe, node_id, prev_node_pos):\n \"\"\"\n Slot function triggered when a selected node has collided with a pipe.\n\n Args:\n pipe (Pipe): collided pipe item.\n node_id (str): selected node id to insert.\n prev_node_pos (dict): previous node position. {NodeItem: [prev_x, prev_y]}\n \"\"\"\n node = self.get_node_by_id(node_id)\n\n # exclude if not a BaseNode\n if not isinstance(node, BaseNode):\n return\n\n disconnected = [(pipe.input_port, pipe.output_port)]\n connected = []\n\n if node.input_ports():\n connected.append(\n (pipe.output_port, node.input_ports()[0].view)\n )\n if node.output_ports():\n connected.append(\n (node.output_ports()[0].view, pipe.input_port)\n )\n\n self._undo_stack.beginMacro('inserted node')\n self._on_connection_changed(disconnected, connected)\n self._on_nodes_moved(prev_node_pos)\n self._undo_stack.endMacro()\n\n def _on_property_bin_changed(self, node_id, prop_name, prop_value):\n \"\"\"\n called when a property widget has changed in a properties bin.\n (emits the node object, property name, property value)\n\n Args:\n node_id (str): node id.\n prop_name (str): node property name.\n prop_value (object): python built in types.\n \"\"\"\n node = self.get_node_by_id(node_id)\n\n # prevent signals from causing a infinite loop.\n if node.get_property(prop_name) != prop_value:\n node.set_property(prop_name, prop_value)\n\n def _on_node_name_changed(self, node_id, name):\n \"\"\"\n called when a node text qgraphics item in the viewer is edited.\n (sets the name through the node object so undo commands are registered.)\n\n Args:\n node_id (str): node id emitted by the viewer.\n name (str): new node name.\n \"\"\"\n node = self.get_node_by_id(node_id)\n node.set_name(name)\n\n # TODO: not sure about redrawing the node here.\n node.view.draw_node()\n\n def _on_node_double_clicked(self, node_id):\n \"\"\"\n called when a node in the viewer is double click.\n (emits the node object when the node is clicked)\n\n Args:\n node_id (str): node id emitted by the viewer.\n \"\"\"\n node = self.get_node_by_id(node_id)\n self.node_double_clicked.emit(node)\n\n def _on_node_selected(self, node_id):\n \"\"\"\n called when a node in the viewer is selected on left click.\n (emits the node object when the node is clicked)\n\n Args:\n node_id (str): node id emitted by the viewer.\n \"\"\"\n node = self.get_node_by_id(node_id)\n self.node_selected.emit(node)\n\n def _on_node_selection_changed(self, sel_ids, desel_ids):\n \"\"\"\n called when the node selection changes in the viewer.\n (emits node objects <selected nodes>, <deselected nodes>)\n\n Args:\n sel_ids (list[str]): new selected node ids.\n desel_ids (list[str]): deselected node ids.\n \"\"\"\n sel_nodes = [self.get_node_by_id(nid) for nid in sel_ids]\n unsel_nodes = [self.get_node_by_id(nid) for nid in desel_ids]\n self.node_selection_changed.emit(sel_nodes, unsel_nodes)\n\n def _on_node_data_dropped(self, data, pos):\n \"\"\"\n called when data has been dropped on the viewer.\n\n Example Identifiers:\n URI = ngqt://path/to/node/session.graph\n URN = ngqt::node:com.nodes.MyNode1;node:com.nodes.MyNode2\n\n Args:\n data (QtCore.QMimeData): mime data.\n pos (QtCore.QPoint): scene position relative to the drop.\n \"\"\"\n uri_regex = re.compile(r'{}(?:/*)([\\w/]+)(\\.\\w+)'.format(URI_SCHEME))\n urn_regex = re.compile(r'{}([\\w\\.:;]+)'.format(URN_SCHEME))\n if data.hasFormat('text/uri-list'):\n for url in data.urls():\n local_file = url.toLocalFile()\n if local_file:\n try:\n self.import_session(local_file)\n continue\n except Exception as e:\n pass\n\n url_str = url.toString()\n uri_search = uri_regex.search(url_str)\n urn_search = urn_regex.search(url_str)\n if uri_search:\n path = uri_search.group(1)\n ext = uri_search.group(2)\n self.import_session('{}{}'.format(path, ext))\n elif urn_search:\n search_str = urn_search.group(1)\n node_ids = sorted(re.findall('node:([\\w\\\\.]+)', search_str))\n x, y = pos.x(), pos.y()\n for node_id in node_ids:\n self.create_node(node_id, pos=[x, y])\n x += 80\n y += 80\n\n def _on_nodes_moved(self, node_data):\n \"\"\"\n called when selected nodes in the viewer has changed position.\n\n Args:\n node_data (dict): {<node_view>: <previous_pos>}\n \"\"\"\n self._undo_stack.beginMacro('move nodes')\n for node_view, prev_pos in node_data.items():\n node = self._model.nodes[node_view.id]\n self._undo_stack.push(NodeMovedCmd(node, node.pos(), prev_pos))\n self._undo_stack.endMacro()\n\n def _on_node_backdrop_updated(self, node_id, update_property, value):\n \"\"\"\n called when a BackdropNode is updated.\n\n Args:\n node_id (str): backdrop node id.\n value (str): update type.\n \"\"\"\n backdrop = self.get_node_by_id(node_id)\n if backdrop and isinstance(backdrop, BackdropNode):\n backdrop.on_backdrop_updated(update_property, value)\n\n def _on_search_triggered(self, node_type, pos):\n \"\"\"\n called when the tab search widget is triggered in the viewer.\n\n Args:\n node_type (str): node identifier.\n pos (tuple or list): x, y position for the node.\n \"\"\"\n self.create_node(node_type, pos=pos)\n\n def _on_connection_changed(self, disconnected, connected):\n \"\"\"\n called when a pipe connection has been changed in the viewer.\n\n Args:\n disconnected (list[list[widgets.port.PortItem]):\n pair list of port view items.\n connected (list[list[widgets.port.PortItem]]):\n pair list of port view items.\n \"\"\"\n if not (disconnected or connected):\n return\n\n label = 'connect node(s)' if connected else 'disconnect node(s)'\n ptypes = {PortTypeEnum.IN.value: 'inputs',\n PortTypeEnum.OUT.value: 'outputs'}\n\n self._undo_stack.beginMacro(label)\n for p1_view, p2_view in disconnected:\n node1 = self._model.nodes[p1_view.node.id]\n node2 = self._model.nodes[p2_view.node.id]\n port1 = getattr(node1, ptypes[p1_view.port_type])()[p1_view.name]\n port2 = getattr(node2, ptypes[p2_view.port_type])()[p2_view.name]\n port1.disconnect_from(port2)\n for p1_view, p2_view in connected:\n node1 = self._model.nodes[p1_view.node.id]\n node2 = self._model.nodes[p2_view.node.id]\n port1 = getattr(node1, ptypes[p1_view.port_type])()[p1_view.name]\n port2 = getattr(node2, ptypes[p2_view.port_type])()[p2_view.name]\n port1.connect_to(port2)\n self._undo_stack.endMacro()\n\n def _on_connection_sliced(self, ports):\n \"\"\"\n slot when connection pipes have been sliced.\n\n Args:\n ports (list[list[widgets.port.PortItem]]):\n pair list of port connections (in port, out port)\n \"\"\"\n if not ports:\n return\n ptypes = {PortTypeEnum.IN.value: 'inputs',\n PortTypeEnum.OUT.value: 'outputs'}\n self._undo_stack.beginMacro('slice connections')\n for p1_view, p2_view in ports:\n node1 = self._model.nodes[p1_view.node.id]\n node2 = self._model.nodes[p2_view.node.id]\n port1 = getattr(node1, ptypes[p1_view.port_type])()[p1_view.name]\n port2 = getattr(node2, ptypes[p2_view.port_type])()[p2_view.name]\n port1.disconnect_from(port2)\n self._undo_stack.endMacro()\n\n @property\n def model(self):\n \"\"\"\n The model used for storing the node graph data.\n\n Returns:\n NodeGraphQt.base.model.NodeGraphModel: node graph model.\n \"\"\"\n return self._model\n\n @property\n def node_factory(self):\n \"\"\"\n Return the node factory object used by the node graph.\n\n Returns:\n NodeFactory: node factory.\n \"\"\"\n return self._node_factory\n\n @property\n def widget(self):\n \"\"\"\n The node graph widget for adding into a layout.\n\n Returns:\n NodeGraphWidget: node graph widget.\n \"\"\"\n if self._widget is None:\n self._widget = NodeGraphWidget()\n self._widget.addTab(self._viewer, 'Node Graph')\n # hide the close button on the first tab.\n tab_bar = self._widget.tabBar()\n for btn_flag in [tab_bar.RightSide, tab_bar.LeftSide]:\n tab_btn = tab_bar.tabButton(0, btn_flag)\n if tab_btn:\n tab_btn.deleteLater()\n tab_bar.setTabButton(0, btn_flag, None)\n self._widget.tabCloseRequested.connect(\n self._on_close_sub_graph_tab\n )\n return self._widget\n\n @property\n def undo_view(self):\n \"\"\"\n Returns node graph undo history list widget.\n\n Returns:\n PySide2.QtWidgets.QUndoView: node graph undo view.\n \"\"\"\n if self._undo_view is None:\n self._undo_view = QtWidgets.QUndoView(self._undo_stack)\n self._undo_view.setWindowTitle('Undo History')\n return self._undo_view\n\n def toggle_node_search(self):\n \"\"\"\n toggle the node search widget visibility.\n \"\"\"\n if self._viewer.underMouse():\n self._viewer.tab_search_set_nodes(self._node_factory.names)\n self._viewer.tab_search_toggle()\n\n def show(self):\n \"\"\"\n Show node graph widget this is just a convenience\n function to :meth:`NodeGraph.widget.show()`.\n \"\"\"\n self.widget.show()\n\n def close(self):\n \"\"\"\n Close node graph NodeViewer widget this is just a convenience\n function to :meth:`NodeGraph.widget.close()`.\n \"\"\"\n self.widget.close()\n\n def viewer(self):\n \"\"\"\n Returns the internal view interface used by the node graph.\n\n Warnings:\n Methods in the ``NodeViewer`` are used internally\n by ``NodeGraphQt`` components to get the widget use\n :attr:`NodeGraph.widget`.\n\n See Also:\n :attr:`NodeGraph.widget` to add the node graph widget into a\n :class:`PySide2.QtWidgets.QLayout`.\n\n Returns:\n NodeGraphQt.widgets.viewer.NodeViewer: viewer interface.\n \"\"\"\n return self._viewer\n\n def scene(self):\n \"\"\"\n Returns the ``QGraphicsScene`` object used in the node graph.\n\n Returns:\n NodeGraphQt.widgets.scene.NodeScene: node scene.\n \"\"\"\n return self._viewer.scene()\n\n def background_color(self):\n \"\"\"\n Return the node graph background color.\n\n Returns:\n tuple: r, g ,b\n \"\"\"\n return self.scene().background_color\n\n def set_background_color(self, r, g, b):\n \"\"\"\n Set node graph background color.\n\n Args:\n r (int): red value.\n g (int): green value.\n b (int): blue value.\n \"\"\"\n self.scene().background_color = (r, g, b)\n self._viewer.force_update()\n\n def grid_color(self):\n \"\"\"\n Return the node graph grid color.\n\n Returns:\n tuple: r, g ,b\n \"\"\"\n return self.scene().grid_color\n\n def set_grid_color(self, r, g, b):\n \"\"\"\n Set node graph grid color.\n\n Args:\n r (int): red value.\n g (int): green value.\n b (int): blue value.\n \"\"\"\n self.scene().grid_color = (r, g, b)\n self._viewer.force_update()\n\n def set_grid_mode(self, mode=None):\n \"\"\"\n Set node graph grid mode.\n\n Note:\n By default grid mode is set to \"VIEWER_GRID_LINES\".\n\n Node graph background types:\n\n * :attr:`NodeGraphQt.constants.ViewerEnum.GRID_DISPLAY_NONE.value`\n * :attr:`NodeGraphQt.constants.ViewerEnum.GRID_DISPLAY_DOTS.value`\n * :attr:`NodeGraphQt.constants.ViewerEnum.GRID_DISPLAY_LINES.value`\n\n Args:\n mode (int): background style.\n \"\"\"\n display_types = [\n ViewerEnum.GRID_DISPLAY_NONE.value,\n ViewerEnum.GRID_DISPLAY_DOTS.value,\n ViewerEnum.GRID_DISPLAY_LINES.value\n ]\n if mode not in display_types:\n mode = ViewerEnum.GRID_DISPLAY_LINES.value\n self.scene().grid_mode = mode\n self._viewer.force_update()\n\n def add_properties_bin(self, prop_bin):\n \"\"\"\n Wire up a properties bin widget to the node graph.\n\n Args:\n prop_bin (NodeGraphQt.PropertiesBinWidget): properties widget.\n \"\"\"\n prop_bin.property_changed.connect(self._on_property_bin_changed)\n\n def undo_stack(self):\n \"\"\"\n Returns the undo stack used in the node graph.\n\n See Also:\n :meth:`NodeGraph.begin_undo()`,\n :meth:`NodeGraph.end_undo()`\n\n Returns:\n QtWidgets.QUndoStack: undo stack.\n \"\"\"\n return self._undo_stack\n\n def clear_undo_stack(self):\n \"\"\"\n Clears the undo stack.\n\n Note:\n Convenience function to\n :meth:`NodeGraph.undo_stack().clear()`\n\n See Also:\n :meth:`NodeGraph.begin_undo()`,\n :meth:`NodeGraph.end_undo()`,\n :meth:`NodeGraph.undo_stack()`\n \"\"\"\n self._undo_stack.clear()\n\n def begin_undo(self, name):\n \"\"\"\n Start of an undo block followed by a\n :meth:`NodeGraph.end_undo()`.\n\n Args:\n name (str): name for the undo block.\n \"\"\"\n self._undo_stack.beginMacro(name)\n\n def end_undo(self):\n \"\"\"\n End of an undo block started by\n :meth:`NodeGraph.begin_undo()`.\n \"\"\"\n self._undo_stack.endMacro()\n\n def context_menu(self):\n \"\"\"\n Returns the context menu for the node graph.\n\n Note:\n This is a convenience function to\n :meth:`NodeGraph.get_context_menu`\n with the arg ``menu=\"graph\"``\n\n Returns:\n NodeGraphQt.NodeGraphMenu: context menu object.\n \"\"\"\n return self.get_context_menu('graph')\n\n def context_nodes_menu(self):\n \"\"\"\n Returns the context menu for the nodes.\n\n Note:\n This is a convenience function to\n :meth:`NodeGraph.get_context_menu`\n with the arg ``menu=\"nodes\"``\n\n Returns:\n NodeGraphQt.NodesMenu: context menu object.\n \"\"\"\n return self.get_context_menu('nodes')\n\n def get_context_menu(self, menu):\n \"\"\"\n Returns the context menu specified by the name.\n\n Menu Types:\n - ``\"graph\"`` context menu from the node graph.\n - ``\"nodes\"`` context menu for the nodes.\n\n Args:\n menu (str): menu name.\n\n Returns:\n NodeGraphQt.NodeGraphMenu or NodeGraphQt.NodesMenu: context menu object.\n \"\"\"\n menus = self._viewer.context_menus()\n if menus.get(menu):\n if menu == 'graph':\n return NodeGraphMenu(self, menus[menu])\n elif menu == 'nodes':\n return NodesMenu(self, menus[menu])\n\n def disable_context_menu(self, disabled=True, name='all'):\n \"\"\"\n Disable/Enable context menus from the node graph.\n\n Menu Types:\n - ``\"all\"`` all context menus from the node graph.\n - ``\"graph\"`` context menu from the node graph.\n - ``\"nodes\"`` context menu for the nodes.\n\n Args:\n disabled (bool): true to enable context menu.\n name (str): menu name. (default: ``\"all\"``)\n \"\"\"\n if name == 'all':\n for k, menu in self._viewer.context_menus().items():\n menu.setDisabled(disabled)\n menu.setVisible(not disabled)\n return\n menus = self._viewer.context_menus()\n if menus.get(name):\n menus[name].setDisabled(disabled)\n menus[name].setVisible(not disabled)\n\n def acyclic(self):\n \"\"\"\n Returns true if the current node graph is acyclic.\n\n See Also:\n :meth:`NodeGraph.set_acyclic`\n\n Returns:\n bool: true if acyclic (default: ``True``).\n \"\"\"\n return self._model.acyclic\n\n def set_acyclic(self, mode=False):\n \"\"\"\n Enable the node graph to be a acyclic graph. (default: ``False``)\n\n See Also:\n :meth:`NodeGraph.acyclic`\n\n Args:\n mode (bool): true to enable acyclic.\n \"\"\"\n self._model.acyclic = mode\n self._viewer.acyclic = mode\n\n def pipe_collision(self):\n \"\"\"\n Returns if pipe collision is enabled.\n\n See Also:\n To enable/disable pipe collision\n :meth:`NodeGraph.set_pipe_collision`\n\n Returns:\n bool: True if pipe collision is enabled.\n \"\"\"\n return self._model.pipe_collision\n\n def set_pipe_collision(self, mode=True):\n \"\"\"\n Enable/Disable pipe collision.\n\n When enabled dragging a node over a pipe will allow the node to be\n inserted as a new connection between the pipe.\n\n See Also:\n :meth:`NodeGraph.pipe_collision`\n\n Args:\n mode (bool): False to disable pipe collision.\n \"\"\"\n self._model.pipe_collision = mode\n self._viewer.pipe_collision = mode\n\n def set_pipe_style(self, style=PipeLayoutEnum.CURVED.value):\n \"\"\"\n Set node graph pipes to be drawn as straight, curved or angled.\n\n .. image:: _images/pipe_layout_types.gif\n :width: 80%\n\n Note:\n By default pipe layout is set to \"PIPE_LAYOUT_CURVED\".\n\n Pipe Layout Styles:\n\n * :attr:`NodeGraphQt.constants.PipeLayoutEnum.CURVED.value`\n * :attr:`NodeGraphQt.constants.PipeLayoutEnum.STRAIGHT.value`\n * :attr:`NodeGraphQt.constants.PipeLayoutEnum.ANGLE.value`\n\n Args:\n style (int): pipe layout style.\n \"\"\"\n pipe_max = max([PipeLayoutEnum.CURVED.value,\n PipeLayoutEnum.STRAIGHT.value,\n PipeLayoutEnum.ANGLE.value])\n style = style if 0 <= style <= pipe_max else PipeLayoutEnum.CURVED.value\n self._viewer.set_pipe_layout(style)\n\n def fit_to_selection(self):\n \"\"\"\n Sets the zoom level to fit selected nodes.\n If no nodes are selected then all nodes in the graph will be framed.\n \"\"\"\n nodes = self.selected_nodes() or self.all_nodes()\n if not nodes:\n return\n self._viewer.zoom_to_nodes([n.view for n in nodes])\n\n def reset_zoom(self):\n \"\"\"\n Reset the zoom level\n \"\"\"\n self._viewer.reset_zoom()\n\n def set_zoom(self, zoom=0):\n \"\"\"\n Set the zoom factor of the Node Graph the default is ``0.0``\n\n Args:\n zoom (float): zoom factor (max zoom out ``-0.9`` / max zoom in ``2.0``)\n \"\"\"\n self._viewer.set_zoom(zoom)\n\n def get_zoom(self):\n \"\"\"\n Get the current zoom level of the node graph.\n\n Returns:\n float: the current zoom level.\n \"\"\"\n return self._viewer.get_zoom()\n\n def center_on(self, nodes=None):\n \"\"\"\n Center the node graph on the given nodes or all nodes by default.\n\n Args:\n nodes (list[NodeGraphQt.BaseNode]): a list of nodes.\n \"\"\"\n self._viewer.center_selection([n.view for n in nodes])\n\n def center_selection(self):\n \"\"\"\n Centers on the current selected nodes.\n \"\"\"\n nodes = self._viewer.selected_nodes()\n self._viewer.center_selection(nodes)\n\n def registered_nodes(self):\n \"\"\"\n Return a list of all node types that have been registered.\n\n See Also:\n To register a node :meth:`NodeGraph.register_node`\n\n Returns:\n list[str]: list of node type identifiers.\n \"\"\"\n return sorted(self._node_factory.nodes.keys())\n\n def register_node(self, node, alias=None):\n \"\"\"\n Register the node to the :meth:`NodeGraph.node_factory`\n\n Args:\n node (_NodeGraphQt.NodeObject): node object.\n alias (str): custom alias name for the node type.\n \"\"\"\n self._node_factory.register_node(node, alias)\n self._viewer.rebuild_tab_search()\n\n def register_nodes(self, nodes):\n \"\"\"\n Register the nodes to the :meth:`NodeGraph.node_factory`\n\n Args:\n nodes (list): list of nodes.\n \"\"\"\n [self._node_factory.register_node(n) for n in nodes]\n self._viewer.rebuild_tab_search()\n\n def create_node(self, node_type, name=None, selected=True, color=None,\n text_color=None, pos=None, push_undo=True):\n \"\"\"\n Create a new node in the node graph.\n\n See Also:\n To list all node types :meth:`NodeGraph.registered_nodes`\n\n Args:\n node_type (str): node instance type.\n name (str): set name of the node.\n selected (bool): set created node to be selected.\n color (tuple or str): node color ``(255, 255, 255)`` or ``\"#FFFFFF\"``.\n text_color (tuple or str): text color ``(255, 255, 255)`` or ``\"#FFFFFF\"``.\n pos (list[int, int]): initial x, y position for the node (default: ``(0, 0)``).\n push_undo (bool): register the command to the undo stack. (default: True)\n\n Returns:\n BaseNode: the created instance of the node.\n \"\"\"\n node = self._node_factory.create_node_instance(node_type)\n if node:\n node._graph = self\n node.model._graph_model = self.model\n\n wid_types = node.model.__dict__.pop('_TEMP_property_widget_types')\n prop_attrs = node.model.__dict__.pop('_TEMP_property_attrs')\n\n if self.model.get_node_common_properties(node.type_) is None:\n node_attrs = {node.type_: {\n n: {'widget_type': wt} for n, wt in wid_types.items()\n }}\n for pname, pattrs in prop_attrs.items():\n node_attrs[node.type_][pname].update(pattrs)\n self.model.set_node_common_properties(node_attrs)\n\n node.NODE_NAME = self.get_unique_name(name or node.NODE_NAME)\n node.model.name = node.NODE_NAME\n node.model.selected = selected\n\n def format_color(clr):\n if isinstance(clr, str):\n clr = clr.strip('#')\n return tuple(int(clr[i:i + 2], 16) for i in (0, 2, 4))\n return clr\n\n if color:\n node.model.color = format_color(color)\n if text_color:\n node.model.text_color = format_color(text_color)\n if pos:\n node.model.pos = [float(pos[0]), float(pos[1])]\n\n node.update()\n\n if push_undo:\n undo_cmd = NodeAddedCmd(self, node, node.model.pos)\n undo_cmd.setText('create node: \"{}\"'.format(node.NODE_NAME))\n self._undo_stack.push(undo_cmd)\n else:\n NodeAddedCmd(self, node, node.model.pos).redo()\n\n self.node_created.emit(node)\n return node\n raise TypeError('\\n\\n>> Cannot find node:\\t\"{}\"\\n'.format(node_type))\n\n def add_node(self, node, pos=None, selected=True, push_undo=True):\n \"\"\"\n Add a node into the node graph.\n unlike the :meth:`NodeGraph.create_node` function this will not\n trigger the :attr:`NodeGraph.node_created` signal.\n\n Args:\n node (NodeGraphQt.BaseNode): node object.\n pos (list[float]): node x,y position. (optional)\n selected (bool): node selected state. (optional)\n push_undo (bool): register the command to the undo stack. (default: True)\n \"\"\"\n assert isinstance(node, NodeObject), 'node must be a Node instance.'\n\n wid_types = node.model.__dict__.pop('_TEMP_property_widget_types')\n prop_attrs = node.model.__dict__.pop('_TEMP_property_attrs')\n\n if self.model.get_node_common_properties(node.type_) is None:\n node_attrs = {node.type_: {\n n: {'widget_type': wt} for n, wt in wid_types.items()\n }}\n for pname, pattrs in prop_attrs.items():\n node_attrs[node.type_][pname].update(pattrs)\n self.model.set_node_common_properties(node_attrs)\n\n node._graph = self\n node.NODE_NAME = self.get_unique_name(node.NODE_NAME)\n node.model._graph_model = self.model\n node.model.name = node.NODE_NAME\n node.update()\n\n if push_undo:\n self._undo_stack.beginMacro('add node: \"{}\"'.format(node.name()))\n self._undo_stack.push(NodeAddedCmd(self, node, pos))\n if selected:\n node.set_selected(True)\n self._undo_stack.endMacro()\n else:\n NodeAddedCmd(self, node, pos).redo()\n\n def delete_node(self, node, push_undo=True):\n \"\"\"\n Remove the node from the node graph.\n\n Args:\n node (NodeGraphQt.BaseNode): node object.\n push_undo (bool): register the command to the undo stack. (default: True)\n \"\"\"\n assert isinstance(node, NodeObject), \\\n 'node must be a instance of a NodeObject.'\n node_id = node.id\n if push_undo:\n self._undo_stack.beginMacro('delete node: \"{}\"'.format(node.name()))\n\n if isinstance(node, BaseNode):\n for p in node.input_ports():\n if p.locked():\n p.set_locked(False,\n connected_ports=False,\n push_undo=push_undo)\n p.clear_connections()\n for p in node.output_ports():\n if p.locked():\n p.set_locked(False,\n connected_ports=False,\n push_undo=push_undo)\n p.clear_connections()\n\n if push_undo:\n self._undo_stack.push(NodeRemovedCmd(self, node))\n self._undo_stack.endMacro()\n else:\n NodeRemovedCmd(self, node).redo()\n\n self.nodes_deleted.emit([node_id])\n\n def remove_node(self, node, push_undo=True):\n \"\"\"\n Remove the node from the node graph.\n\n unlike the :meth:`NodeGraph.delete_node` function this will not\n trigger the :attr:`NodeGraph.nodes_deleted` signal.\n\n Args:\n node (NodeGraphQt.BaseNode): node object.\n push_undo (bool): register the command to the undo stack. (default: True)\n\n \"\"\"\n assert isinstance(node, NodeObject), 'node must be a Node instance.'\n\n if push_undo:\n self._undo_stack.beginMacro('delete node: \"{}\"'.format(node.name()))\n\n if isinstance(node, BaseNode):\n for p in node.input_ports():\n if p.locked():\n p.set_locked(False,\n connected_ports=False,\n push_undo=push_undo)\n p.clear_connections()\n for p in node.output_ports():\n if p.locked():\n p.set_locked(False,\n connected_ports=False,\n push_undo=push_undo)\n p.clear_connections()\n\n if push_undo:\n self._undo_stack.push(NodeRemovedCmd(self, node))\n self._undo_stack.endMacro()\n else:\n NodeRemovedCmd(self, node).redo()\n\n def delete_nodes(self, nodes, push_undo=True):\n \"\"\"\n Remove a list of specified nodes from the node graph.\n\n Args:\n nodes (list[NodeGraphQt.BaseNode]): list of node instances.\n push_undo (bool): register the command to the undo stack. (default: True)\n \"\"\"\n if not nodes:\n return\n if len(nodes) == 1:\n self.delete_node(nodes[0], push_undo=push_undo)\n return\n node_ids = [n.id for n in nodes]\n if push_undo:\n self._undo_stack.beginMacro('deleted \"{}\" nodes'.format(len(nodes)))\n for node in nodes:\n if isinstance(node, BaseNode):\n for p in node.input_ports():\n if p.locked():\n p.set_locked(False,\n connected_ports=False,\n push_undo=push_undo)\n p.clear_connections(push_undo=push_undo)\n for p in node.output_ports():\n if p.locked():\n p.set_locked(False,\n connected_ports=False,\n push_undo=push_undo)\n p.clear_connections(push_undo=push_undo)\n if push_undo:\n self._undo_stack.push(NodeRemovedCmd(self, node))\n else:\n NodeRemovedCmd(self, node).redo()\n if push_undo:\n self._undo_stack.endMacro()\n self.nodes_deleted.emit(node_ids)\n\n def all_nodes(self):\n \"\"\"\n Return all nodes in the node graph.\n\n Returns:\n list[NodeGraphQt.BaseNode]: list of nodes.\n \"\"\"\n return list(self._model.nodes.values())\n\n def selected_nodes(self):\n \"\"\"\n Return all selected nodes that are in the node graph.\n\n Returns:\n list[NodeGraphQt.BaseNode]: list of nodes.\n \"\"\"\n nodes = []\n for item in self._viewer.selected_nodes():\n node = self._model.nodes[item.id]\n nodes.append(node)\n return nodes\n\n def select_all(self):\n \"\"\"\n Select all nodes in the node graph.\n \"\"\"\n self._undo_stack.beginMacro('select all')\n [node.set_selected(True) for node in self.all_nodes()]\n self._undo_stack.endMacro()\n\n def clear_selection(self):\n \"\"\"\n Clears the selection in the node graph.\n \"\"\"\n self._undo_stack.beginMacro('clear selection')\n [node.set_selected(False) for node in self.all_nodes()]\n self._undo_stack.endMacro()\n\n def get_node_by_id(self, node_id=None):\n \"\"\"\n Returns the node from the node id string.\n\n Args:\n node_id (str): node id (:attr:`NodeObject.id`)\n\n Returns:\n NodeGraphQt.NodeObject: node object.\n \"\"\"\n return self._model.nodes.get(node_id, None)\n\n def get_node_by_name(self, name):\n \"\"\"\n Returns node that matches the name.\n\n Args:\n name (str): name of the node.\n Returns:\n NodeGraphQt.NodeObject: node object.\n \"\"\"\n for node_id, node in self._model.nodes.items():\n if node.name() == name:\n return node\n\n def get_nodes_by_type(self, node_type):\n \"\"\"\n Return all nodes by their node type identifier.\n (see: :attr:`NodeGraphQt.NodeObject.type_`)\n\n Args:\n node_type (str): node type identifier.\n\n Returns:\n list[NodeGraphQt.NodeObject]: list of nodes.\n \"\"\"\n return [n for n in self._model.nodes.values() if n.type_ == node_type]\n\n def get_unique_name(self, name):\n \"\"\"\n Creates a unique node name to avoid having nodes with the same name.\n\n Args:\n name (str): node name.\n\n Returns:\n str: unique node name.\n \"\"\"\n name = ' '.join(name.split())\n node_names = [n.name() for n in self.all_nodes()]\n if name not in node_names:\n return name\n\n regex = re.compile(r'[\\w ]+(?: )*(\\d+)')\n search = regex.search(name)\n if not search:\n for x in range(1, len(node_names) + 2):\n new_name = '{} {}'.format(name, x)\n if new_name not in node_names:\n return new_name\n\n version = search.group(1)\n name = name[:len(version) * -1].strip()\n for x in range(1, len(node_names) + 2):\n new_name = '{} {}'.format(name, x)\n if new_name not in node_names:\n return new_name\n\n def current_session(self):\n \"\"\"\n Returns the file path to the currently loaded session.\n\n Returns:\n str: path to the currently loaded session\n \"\"\"\n return self._model.session\n\n def clear_session(self):\n \"\"\"\n Clears the current node graph session.\n \"\"\"\n for n in self.all_nodes():\n if isinstance(n, BaseNode):\n for p in n.input_ports():\n if p.locked():\n p.set_locked(False, connected_ports=False)\n p.clear_connections()\n for p in n.output_ports():\n if p.locked():\n p.set_locked(False, connected_ports=False)\n p.clear_connections()\n self._undo_stack.push(NodeRemovedCmd(self, n))\n self._undo_stack.clear()\n self._model.session = ''\n\n def _serialize(self, nodes):\n \"\"\"\n serialize nodes to a dict.\n (used internally by the node graph)\n\n Args:\n nodes (list[NodeGraphQt.Nodes]): list of node instances.\n\n Returns:\n dict: serialized data.\n \"\"\"\n serial_data = {'graph': {}, 'nodes': {}, 'connections': []}\n nodes_data = {}\n\n # serialize graph session.\n serial_data['graph']['acyclic'] = self.acyclic()\n serial_data['graph']['pipe_collision'] = self.pipe_collision()\n\n # serialize nodes.\n for n in nodes:\n # update the node model.\n n.update_model()\n\n node_dict = n.model.to_dict\n nodes_data.update(node_dict)\n\n for n_id, n_data in nodes_data.items():\n serial_data['nodes'][n_id] = n_data\n\n # serialize connections\n inputs = n_data.pop('inputs') if n_data.get('inputs') else {}\n outputs = n_data.pop('outputs') if n_data.get('outputs') else {}\n\n for pname, conn_data in inputs.items():\n for conn_id, prt_names in conn_data.items():\n for conn_prt in prt_names:\n pipe = {\n PortTypeEnum.IN.value: [n_id, pname],\n PortTypeEnum.OUT.value: [conn_id, conn_prt]\n }\n if pipe not in serial_data['connections']:\n serial_data['connections'].append(pipe)\n\n for pname, conn_data in outputs.items():\n for conn_id, prt_names in conn_data.items():\n for conn_prt in prt_names:\n pipe = {\n PortTypeEnum.OUT.value: [n_id, pname],\n PortTypeEnum.IN.value: [conn_id, conn_prt]\n }\n if pipe not in serial_data['connections']:\n serial_data['connections'].append(pipe)\n\n if not serial_data['connections']:\n serial_data.pop('connections')\n\n return serial_data\n\n def _deserialize(self, data, relative_pos=False, pos=None):\n \"\"\"\n deserialize node data.\n (used internally by the node graph)\n\n Args:\n data (dict): node data.\n relative_pos (bool): position node relative to the cursor.\n pos (tuple or list): custom x, y position.\n\n Returns:\n list[NodeGraphQt.Nodes]: list of node instances.\n \"\"\"\n # update node graph properties.\n for attr_name, attr_value in data.get('graph', {}).items():\n if attr_name == 'acyclic':\n self.set_acyclic(attr_value)\n elif attr_name == 'pipe_collision':\n self.set_pipe_collision(attr_value)\n\n # build the nodes.\n nodes = {}\n for n_id, n_data in data.get('nodes', {}).items():\n identifier = n_data['type_']\n node = self._node_factory.create_node_instance(identifier)\n if node:\n node.NODE_NAME = n_data.get('name', node.NODE_NAME)\n # set properties.\n for prop in node.model.properties.keys():\n if prop in n_data.keys():\n node.model.set_property(prop, n_data[prop])\n # set custom properties.\n for prop, val in n_data.get('custom', {}).items():\n node.model.set_property(prop, val)\n if prop in node.view.widgets:\n node.view.widgets[prop].set_value(val)\n\n nodes[n_id] = node\n self.add_node(node, n_data.get('pos'))\n\n if n_data.get('port_deletion_allowed', None):\n node.set_ports({\n 'input_ports': n_data['input_ports'],\n 'output_ports': n_data['output_ports']\n })\n\n # build the connections.\n for connection in data.get('connections', []):\n nid, pname = connection.get('in', ('', ''))\n in_node = nodes.get(nid) or self.get_node_by_id(nid)\n if not in_node:\n continue\n in_port = in_node.inputs().get(pname) if in_node else None\n\n nid, pname = connection.get('out', ('', ''))\n out_node = nodes.get(nid) or self.get_node_by_id(nid)\n if not out_node:\n continue\n out_port = out_node.outputs().get(pname) if out_node else None\n\n if in_port and out_port:\n # only connect if input port is not connected yet or input port\n # can have multiple connections.\n # important when duplicating nodes.\n allow_connection = any([not in_port.model.connected_ports,\n in_port.model.multi_connection])\n if allow_connection:\n self._undo_stack.push(PortConnectedCmd(in_port, out_port))\n\n node_objs = nodes.values()\n if relative_pos:\n self._viewer.move_nodes([n.view for n in node_objs])\n [setattr(n.model, 'pos', n.view.xy_pos) for n in node_objs]\n elif pos:\n self._viewer.move_nodes([n.view for n in node_objs], pos=pos)\n [setattr(n.model, 'pos', n.view.xy_pos) for n in node_objs]\n\n return node_objs\n\n def serialize_session(self):\n \"\"\"\n Serializes the current node graph layout to a dictionary.\n\n See Also:\n :meth:`NodeGraph.deserialize_session`,\n :meth:`NodeGraph.save_session`,\n :meth:`NodeGraph.load_session`\n\n Returns:\n dict: serialized session of the current node layout.\n \"\"\"\n return self._serialize(self.all_nodes())\n\n def deserialize_session(self, layout_data):\n \"\"\"\n Load node graph session from a dictionary object.\n\n See Also:\n :meth:`NodeGraph.serialize_session`,\n :meth:`NodeGraph.load_session`,\n :meth:`NodeGraph.save_session`\n\n Args:\n layout_data (dict): dictionary object containing a node session.\n \"\"\"\n self.clear_session()\n self._deserialize(layout_data)\n self.clear_selection()\n self._undo_stack.clear()\n\n def save_session(self, file_path):\n \"\"\"\n Saves the current node graph session layout to a `JSON` formatted file.\n\n See Also:\n :meth:`NodeGraph.serialize_session`,\n :meth:`NodeGraph.deserialize_session`,\n :meth:`NodeGraph.load_session`,\n\n Args:\n file_path (str): path to the saved node layout.\n \"\"\"\n serialized_data = self._serialize(self.all_nodes())\n file_path = file_path.strip()\n with open(file_path, 'w') as file_out:\n json.dump(\n serialized_data,\n file_out,\n indent=2,\n separators=(',', ':')\n )\n\n def load_session(self, file_path):\n \"\"\"\n Load node graph session layout file.\n\n See Also:\n :meth:`NodeGraph.deserialize_session`,\n :meth:`NodeGraph.serialize_session`,\n :meth:`NodeGraph.save_session`\n\n Args:\n file_path (str): path to the serialized layout file.\n \"\"\"\n file_path = file_path.strip()\n if not os.path.isfile(file_path):\n raise IOError('file does not exist: {}'.format(file_path))\n\n self.clear_session()\n self.import_session(file_path)\n\n def import_session(self, file_path):\n \"\"\"\n Import node graph session layout file.\n\n Args:\n file_path (str): path to the serialized layout file.\n \"\"\"\n file_path = file_path.strip()\n if not os.path.isfile(file_path):\n raise IOError('file does not exist: {}'.format(file_path))\n\n try:\n with open(file_path) as data_file:\n layout_data = json.load(data_file)\n except Exception as e:\n layout_data = None\n print('Cannot read data from file.\\n{}'.format(e))\n\n if not layout_data:\n return\n\n self._deserialize(layout_data)\n self._undo_stack.clear()\n self._model.session = file_path\n\n self.session_changed.emit(file_path)\n\n def copy_nodes(self, nodes=None):\n \"\"\"\n Copy nodes to the clipboard.\n\n See Also:\n :meth:`NodeGraph.cut_nodes`\n\n Args:\n nodes (list[NodeGraphQt.BaseNode]):\n list of nodes (default: selected nodes).\n \"\"\"\n nodes = nodes or self.selected_nodes()\n if not nodes:\n return False\n clipboard = QtWidgets.QApplication.clipboard()\n serial_data = self._serialize(nodes)\n serial_str = json.dumps(serial_data)\n if serial_str:\n clipboard.setText(serial_str)\n return True\n return False\n\n def cut_nodes(self, nodes=None):\n \"\"\"\n Cut nodes to the clipboard.\n\n See Also:\n :meth:`NodeGraph.copy_nodes`\n\n Args:\n nodes (list[NodeGraphQt.BaseNode]):\n list of nodes (default: selected nodes).\n \"\"\"\n nodes = nodes or self.selected_nodes()\n self.copy_nodes(nodes)\n self._undo_stack.beginMacro('cut nodes')\n [self._undo_stack.push(NodeRemovedCmd(self, n)) for n in nodes]\n self._undo_stack.endMacro()\n\n def paste_nodes(self):\n \"\"\"\n Pastes nodes copied from the clipboard.\n \"\"\"\n clipboard = QtWidgets.QApplication.clipboard()\n cb_text = clipboard.text()\n if not cb_text:\n return\n\n try:\n serial_data = json.loads(cb_text)\n except json.decoder.JSONDecodeError as e:\n print('ERROR: Can\\'t Decode Clipboard Data:\\n'\n '\"{}\"'.format(cb_text))\n return\n\n self._undo_stack.beginMacro('pasted nodes')\n self.clear_selection()\n nodes = self._deserialize(serial_data, relative_pos=True)\n [n.set_selected(True) for n in nodes]\n self._undo_stack.endMacro()\n\n def duplicate_nodes(self, nodes):\n \"\"\"\n Create duplicate copy from the list of nodes.\n\n Args:\n nodes (list[NodeGraphQt.BaseNode]): list of nodes.\n Returns:\n list[NodeGraphQt.BaseNode]: list of duplicated node instances.\n \"\"\"\n if not nodes:\n return\n\n self._undo_stack.beginMacro('duplicate nodes')\n\n self.clear_selection()\n serial = self._serialize(nodes)\n new_nodes = self._deserialize(serial)\n offset = 50\n for n in new_nodes:\n x, y = n.pos()\n n.set_pos(x + offset, y + offset)\n n.set_property('selected', True)\n\n self._undo_stack.endMacro()\n return new_nodes\n\n def disable_nodes(self, nodes, mode=None):\n \"\"\"\n Set weather to Disable or Enable specified nodes.\n\n See Also:\n :meth:`NodeObject.set_disabled`\n\n Args:\n nodes (list[NodeGraphQt.BaseNode]): list of node instances.\n mode (bool): (optional) disable state of the nodes.\n \"\"\"\n if not nodes:\n return\n if mode is None:\n mode = not nodes[0].disabled()\n if len(nodes) > 1:\n text = {False: 'enable', True: 'disable'}[mode]\n text = '{} ({}) nodes'.format(text, len(nodes))\n self._undo_stack.beginMacro(text)\n [n.set_disabled(mode) for n in nodes]\n self._undo_stack.endMacro()\n return\n nodes[0].set_disabled(mode)\n\n def use_OpenGL(self):\n \"\"\"\n Set the viewport to use QOpenGLWidget widget to draw the graph.\n \"\"\"\n self._viewer.use_OpenGL()\n\n # auto layout node functions.\n # --------------------------------------------------------------------------\n\n @staticmethod\n def _update_node_rank(node, nodes_rank, down_stream=True):\n \"\"\"\n Recursive function for updating the node ranking.\n\n Args:\n node (NodeGraphQt.BaseNode): node to start from.\n nodes_rank (dict): node ranking object to be updated.\n down_stream (bool): true to rank down stram.\n \"\"\"\n if down_stream:\n node_values = node.connected_output_nodes().values()\n else:\n node_values = node.connected_input_nodes().values()\n\n connected_nodes = set()\n for nodes in node_values:\n connected_nodes.update(nodes)\n\n rank = nodes_rank[node] + 1\n for n in connected_nodes:\n if n in nodes_rank:\n nodes_rank[n] = max(nodes_rank[n], rank)\n else:\n nodes_rank[n] = rank\n NodeGraph._update_node_rank(n, nodes_rank, down_stream)\n\n @staticmethod\n def _compute_node_rank(nodes, down_stream=True):\n \"\"\"\n Compute the ranking of nodes.\n\n Args:\n nodes (list[NodeGraphQt.BaseNode]): nodes to start ranking from.\n down_stream (bool): true to compute down stream.\n\n Returns:\n dict: {NodeGraphQt.BaseNode: node_rank, ...}\n \"\"\"\n nodes_rank = {}\n for node in nodes:\n nodes_rank[node] = 0\n NodeGraph._update_node_rank(node, nodes_rank, down_stream)\n return nodes_rank\n\n def auto_layout_nodes(self, nodes=None, down_stream=True, start_nodes=None):\n \"\"\"\n Auto layout the nodes in the node graph.\n\n Note:\n If the node graph is acyclic then the ``start_nodes`` will need\n to be specified.\n\n Args:\n nodes (list[NodeGraphQt.BaseNode]): list of nodes to auto layout\n if nodes is None then all nodes is layed out.\n down_stream (bool): false to layout up stream.\n start_nodes (list[NodeGraphQt.BaseNode]):\n list of nodes to start the auto layout from (Optional).\n \"\"\"\n self.begin_undo('Auto Layout Nodes')\n\n nodes = nodes or self.all_nodes()\n\n # filter out the backdrops.\n backdrops = {\n n: n.nodes() for n in nodes if isinstance(n, BackdropNode)\n }\n filtered_nodes = [n for n in nodes if not isinstance(n, BackdropNode)]\n\n start_nodes = start_nodes or []\n if down_stream:\n start_nodes += [\n n for n in filtered_nodes\n if not any(n.connected_input_nodes().values())\n ]\n else:\n start_nodes += [\n n for n in filtered_nodes\n if not any(n.connected_output_nodes().values())\n ]\n\n if not start_nodes:\n return\n\n node_views = [n.view for n in nodes]\n nodes_center_0 = self.viewer().nodes_rect_center(node_views)\n\n nodes_rank = NodeGraph._compute_node_rank(start_nodes, down_stream)\n\n rank_map = {}\n for node, rank in nodes_rank.items():\n if rank in rank_map:\n rank_map[rank].append(node)\n else:\n rank_map[rank] = [node]\n\n if NODE_LAYOUT_DIRECTION is NODE_LAYOUT_HORIZONTAL:\n current_x = 0\n node_height = 120\n for rank in sorted(range(len(rank_map)), reverse=not down_stream):\n ranked_nodes = rank_map[rank]\n max_width = max([node.view.width for node in ranked_nodes])\n current_x += max_width\n current_y = 0\n for idx, node in enumerate(ranked_nodes):\n dy = max(node_height, node.view.height)\n current_y += 0 if idx == 0 else dy\n node.set_pos(current_x, current_y)\n current_y += dy * 0.5 + 10\n\n current_x += max_width * 0.5 + 100\n elif NODE_LAYOUT_DIRECTION is NODE_LAYOUT_VERTICAL:\n current_y = 0\n node_width = 250\n for rank in sorted(range(len(rank_map)), reverse=not down_stream):\n ranked_nodes = rank_map[rank]\n max_height = max([node.view.height for node in ranked_nodes])\n current_y += max_height\n current_x = 0\n for idx, node in enumerate(ranked_nodes):\n dx = max(node_width, node.view.width)\n current_x += 0 if idx == 0 else dx\n node.set_pos(current_x, current_y)\n current_x += dx * 0.5 + 10\n\n current_y += max_height * 0.5 + 40\n\n nodes_center_1 = self.viewer().nodes_rect_center(node_views)\n dx = nodes_center_0[0] - nodes_center_1[0]\n dy = nodes_center_0[1] - nodes_center_1[1]\n [n.set_pos(n.x_pos() + dx, n.y_pos() + dy) for n in nodes]\n\n # wrap the backdrop nodes.\n for backdrop, contained_nodes in backdrops.items():\n backdrop.wrap_nodes(contained_nodes)\n\n self.end_undo()\n\n # convenience dialog functions.\n # --------------------------------------------------------------------------\n\n def question_dialog(self, text, title='Node Graph'):\n \"\"\"\n Prompts a question open dialog with ``\"Yes\"`` and ``\"No\"`` buttons in\n the node graph.\n\n Note:\n Convenience function to\n :meth:`NodeGraph.viewer().question_dialog`\n\n Args:\n text (str): question text.\n title (str): dialog window title.\n\n Returns:\n bool: true if user clicked yes.\n \"\"\"\n return self._viewer.question_dialog(text, title)\n\n def message_dialog(self, text, title='Node Graph'):\n \"\"\"\n Prompts a file open dialog in the node graph.\n\n Note:\n Convenience function to\n :meth:`NodeGraph.viewer().message_dialog`\n\n Args:\n text (str): message text.\n title (str): dialog window title.\n \"\"\"\n self._viewer.message_dialog(text, title)\n\n def load_dialog(self, current_dir=None, ext=None):\n \"\"\"\n Prompts a file open dialog in the node graph.\n\n Note:\n Convenience function to\n :meth:`NodeGraph.viewer().load_dialog`\n\n Args:\n current_dir (str): path to a directory.\n ext (str): custom file type extension (default: ``\"json\"``)\n\n Returns:\n str: selected file path.\n \"\"\"\n return self._viewer.load_dialog(current_dir, ext)\n\n def save_dialog(self, current_dir=None, ext=None):\n \"\"\"\n Prompts a file save dialog in the node graph.\n\n Note:\n Convenience function to\n :meth:`NodeGraph.viewer().save_dialog`\n\n Args:\n current_dir (str): path to a directory.\n ext (str): custom file type extension (default: ``\"json\"``)\n\n Returns:\n str: selected file path.\n \"\"\"\n return self._viewer.save_dialog(current_dir, ext)\n\n # group node / sub graph.\n # --------------------------------------------------------------------------\n\n def _on_close_sub_graph_tab(self, index):\n \"\"\"\n Called when the close button is clicked on a expanded sub graph tab.\n\n Args:\n index (int): tab index.\n \"\"\"\n node_id = self.widget.tabToolTip(index)\n group_node = self.get_node_by_id(node_id)\n self.collapse_group_node(group_node)\n\n @property\n def is_root(self):\n \"\"\"\n Returns if the node graph controller is the root graph.\n\n Returns:\n bool: true is the node graph is root.\n \"\"\"\n return True\n\n @property\n def sub_graphs(self):\n \"\"\"\n Returns expanded group node sub graphs.\n\n Returns:\n dict: {<node_id>: <sub_graph>}\n \"\"\"\n return self._sub_graphs\n\n # def graph_rect(self):\n # \"\"\"\n # Get the graph viewer range (scene size).\n #\n # Returns:\n # list[float]: [x, y, width, height].\n # \"\"\"\n # return self._viewer.scene_rect()\n #\n # def set_graph_rect(self, rect):\n # \"\"\"\n # Set the graph viewer range (scene size).\n #\n # Args:\n # rect (list[float]): [x, y, width, height].\n # \"\"\"\n # self._viewer.set_scene_rect(rect)\n\n def expand_group_node(self, node):\n \"\"\"\n Expands a group node session in a new tab.\n\n Args:\n node (NodeGraphQt.GroupNode): group node.\n\n Returns:\n SubGraph: sub node graph used to manage the group node session.\n \"\"\"\n if not isinstance(node, GroupNode):\n return\n if self._widget is None:\n raise RuntimeError('NodeGraph.widget not initialized!')\n\n self.viewer().clear_key_state()\n self.viewer().clearFocus()\n\n if node.id in self._sub_graphs:\n sub_graph = self._sub_graphs[node.id]\n tab_index = self._widget.indexOf(sub_graph.widget)\n self._widget.setCurrentIndex(tab_index)\n return sub_graph\n\n # build new sub graph.\n node_factory = copy.deepcopy(self.node_factory)\n sub_graph = SubGraph(self, node=node, node_factory=node_factory)\n\n # populate the sub graph.\n session = node.get_sub_graph_session()\n sub_graph.deserialize_session(session)\n\n # store reference to expanded.\n self._sub_graphs[node.id] = sub_graph\n\n # open new tab at root level.\n self.widget.add_viewer(sub_graph.widget, node.name(), node.id)\n\n return sub_graph\n\n def collapse_group_node(self, node):\n \"\"\"\n Collapse a group node session tab and it's expanded child sub graphs.\n\n Args:\n node (NodeGraphQt.GroupNode): group node.\n \"\"\"\n assert isinstance(node, GroupNode), 'node must be a GroupNode instance.'\n if self._widget is None:\n return\n\n if node.id not in self._sub_graphs:\n err = '{} sub graph not initialized!'.format(node.name())\n raise RuntimeError(err)\n\n sub_graph = self._sub_graphs.pop(node.id)\n sub_graph.collapse_group_node(node)\n\n # remove the sub graph tab.\n self.widget.remove_viewer(sub_graph.widget)\n\n # TODO: delete sub graph hmm... not sure if I need this here.\n del sub_graph" }, { "identifier": "PropertiesBinWidget", "path": "NodeGraphQt/NodeGraphQt/custom_widgets/properties_bin.py", "snippet": "class PropertiesBinWidget(QtWidgets.QWidget):\n \"\"\"\n The :class:`NodeGraphQt.PropertiesBinWidget` is a list widget for displaying\n and editing a nodes properties.\n\n .. image:: _images/prop_bin.png\n :width: 950px\n\n .. code-block:: python\n :linenos:\n\n from NodeGraphQt import NodeGraph, PropertiesBinWidget\n\n # create node graph.\n graph = NodeGraph()\n\n # create properties bin widget.\n properties_bin = PropertiesBinWidget(parent=None, node_graph=graph)\n properties_bin.show()\n\n Args:\n parent (QtWidgets.QWidget): parent of the new widget.\n node_graph (NodeGraphQt.NodeGraph): node graph.\n \"\"\"\n\n #: Signal emitted (node_id, prop_name, prop_value)\n property_changed = QtCore.Signal(str, str, object)\n\n def __init__(self, parent=None, node_graph=None):\n super(PropertiesBinWidget, self).__init__(parent)\n self.setWindowTitle('Properties Bin')\n self._prop_list = PropertiesList()\n self._limit = QtWidgets.QSpinBox()\n self._limit.setToolTip('Set display nodes limit.')\n self._limit.setMaximum(10)\n self._limit.setMinimum(0)\n self._limit.setValue(2)\n self._limit.valueChanged.connect(self.__on_limit_changed)\n self.resize(450, 400)\n\n self._block_signal = False\n\n self._lock = False\n self.btn_lock = QtWidgets.QPushButton('lock')\n self.btn_lock.setToolTip(\n 'Lock the properties bin prevent nodes from being loaded.')\n self.btn_lock.clicked.connect(self.lock_bin)\n\n btn_clr = QtWidgets.QPushButton('clear')\n btn_clr.setToolTip('Clear the properties bin.')\n btn_clr.clicked.connect(self.clear_bin)\n\n top_layout = QtWidgets.QHBoxLayout()\n top_layout.setSpacing(2)\n top_layout.addWidget(self._limit)\n top_layout.addStretch(1)\n top_layout.addWidget(self.btn_lock)\n top_layout.addWidget(btn_clr)\n\n layout = QtWidgets.QVBoxLayout(self)\n layout.addLayout(top_layout)\n layout.addWidget(self._prop_list, 1)\n\n # wire up node graph.\n node_graph.add_properties_bin(self)\n node_graph.node_double_clicked.connect(self.add_node)\n node_graph.nodes_deleted.connect(self.__on_nodes_deleted)\n node_graph.property_changed.connect(self.__on_graph_property_changed)\n\n def __repr__(self):\n return '<{} object at {}>'.format(self.__class__.__name__, hex(id(self)))\n\n def __on_prop_close(self, node_id):\n items = self._prop_list.findItems(node_id, QtCore.Qt.MatchExactly)\n [self._prop_list.removeRow(i.row()) for i in items]\n\n def __on_limit_changed(self, value):\n rows = self._prop_list.rowCount()\n if rows > value:\n self._prop_list.removeRow(rows - 1)\n\n def __on_nodes_deleted(self, nodes):\n \"\"\"\n Slot function when a node has been deleted.\n\n Args:\n nodes (list[str]): list of node ids.\n \"\"\"\n [self.__on_prop_close(n) for n in nodes]\n\n def __on_graph_property_changed(self, node, prop_name, prop_value):\n \"\"\"\n Slot function that updates the property bin from the node graph signal.\n\n Args:\n node (NodeGraphQt.NodeObject):\n prop_name (str): node property name.\n prop_value (object): node property value.\n \"\"\"\n properties_widget = self.prop_widget(node)\n if not properties_widget:\n return\n\n property_window = properties_widget.get_widget(prop_name)\n\n if property_window and prop_value != property_window.get_value():\n self._block_signal = True\n property_window.set_value(prop_value)\n self._block_signal = False\n\n def __on_property_widget_changed(self, node_id, prop_name, prop_value):\n \"\"\"\n Slot function triggered when a property widget value has changed.\n\n Args:\n node_id (str): node id.\n prop_name (str): node property name.\n prop_value (object): node property value.\n \"\"\"\n if not self._block_signal:\n self.property_changed.emit(node_id, prop_name, prop_value)\n\n def limit(self):\n \"\"\"\n Returns the limit for how many nodes can be loaded into the bin.\n\n Returns:\n int: node limit.\n \"\"\"\n return int(self._limit.value())\n\n def set_limit(self, limit):\n \"\"\"\n Set limit of nodes to display.\n\n Args:\n limit (int): node limit.\n \"\"\"\n self._limit.setValue(limit)\n\n def add_node(self, node):\n \"\"\"\n Add node to the properties bin.\n\n Args:\n node (NodeGraphQt.NodeObject): node object.\n \"\"\"\n if self.limit() == 0 or self._lock:\n return\n\n rows = self._prop_list.rowCount()\n if rows >= self.limit():\n self._prop_list.removeRow(rows - 1)\n\n itm_find = self._prop_list.findItems(node.id, QtCore.Qt.MatchExactly)\n if itm_find:\n self._prop_list.removeRow(itm_find[0].row())\n\n self._prop_list.insertRow(0)\n prop_widget = NodePropWidget(node=node)\n prop_widget.property_changed.connect(self.__on_property_widget_changed)\n prop_widget.property_closed.connect(self.__on_prop_close)\n self._prop_list.setCellWidget(0, 0, prop_widget)\n\n item = QtWidgets.QTableWidgetItem(node.id)\n self._prop_list.setItem(0, 0, item)\n self._prop_list.selectRow(0)\n\n def remove_node(self, node):\n \"\"\"\n Remove node from the properties bin.\n\n Args:\n node (str or NodeGraphQt.BaseNode): node id or node object.\n \"\"\"\n node_id = node if isinstance(node, str) else node.id\n self.__on_prop_close(node_id)\n\n def lock_bin(self):\n \"\"\"\n Lock/UnLock the properties bin.\n \"\"\"\n self._lock = not self._lock\n if self._lock:\n self.btn_lock.setText('UnLock')\n else:\n self.btn_lock.setText('Lock')\n\n def clear_bin(self):\n \"\"\"\n Clear the properties bin.\n \"\"\"\n self._prop_list.setRowCount(0)\n\n def prop_widget(self, node):\n \"\"\"\n Returns the node property widget.\n\n Args:\n node (str or NodeGraphQt.NodeObject): node id or node object.\n\n Returns:\n NodePropWidget: node property widget.\n \"\"\"\n node_id = node if isinstance(node, str) else node.id\n itm_find = self._prop_list.findItems(node_id, QtCore.Qt.MatchExactly)\n if itm_find:\n item = itm_find[0]\n return self._prop_list.cellWidget(item.row(), 0)" } ]
import sys import socket import pickle import json import requests import config as cfg import plugins_ui as plugins from Qt import QtCore, QtWidgets from PySide2.QtGui import QPixmap from NodeGraphQt import NodeGraph, PropertiesBinWidget
16,903
#!/usr/bin/python3.10 """ GUI for video or image analysis and processing Based on the NodeGraphQt a node graph UI framework written in python that can be implemented and re-purposed into applications supporting PySide2. """ # Loading configuration data if cfg.nodegraphqt not in sys.path: sys.path.append(cfg.nodegraphqt) PLUGINS = plugins.PluginRegistration()
#!/usr/bin/python3.10 """ GUI for video or image analysis and processing Based on the NodeGraphQt a node graph UI framework written in python that can be implemented and re-purposed into applications supporting PySide2. """ # Loading configuration data if cfg.nodegraphqt not in sys.path: sys.path.append(cfg.nodegraphqt) PLUGINS = plugins.PluginRegistration()
class NodeBased(NodeGraph):
0
2023-10-18 14:11:43+00:00
24k
f0uriest/interpax
tests/test_interpolate.py
[ { "identifier": "fft_interp1d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=\"n\")\ndef fft_interp1d(f: jax.Array, n: int, sx: jax.Array = None, dx: float = 1.0):\n \"\"\"Interpolation of a 1d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray, shape(nx, ...)\n Source data. Assumed to cover 1 full period, excluding the endpoint.\n n : int\n Number of desired interpolation points.\n sx : ndarray or None\n Shift in x to evaluate at. If original data is f(x), interpolates to f(x + sx)\n dx : float\n Spacing of source points\n\n Returns\n -------\n fi : ndarray, shape(n, ..., len(sx))\n Interpolated (and possibly shifted) data points\n \"\"\"\n c = jnp.fft.ifft(f, axis=0)\n nx = c.shape[0]\n if sx is not None:\n sx = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(nx)[:, None] * sx / dx)\n c = (c[None].T * sx).T\n c = jnp.moveaxis(c, 0, -1)\n pad = ((n - nx) // 2, n - nx - (n - nx) // 2)\n if nx % 2 != 0:\n pad = pad[::-1]\n c = jnp.fft.ifftshift(_pad_along_axis(jnp.fft.fftshift(c, axes=0), pad, axis=0))\n return jnp.fft.fft(c, axis=0).real" }, { "identifier": "fft_interp2d", "path": "interpax/_fourier.py", "snippet": "@partial(jit, static_argnames=(\"n1\", \"n2\"))\ndef fft_interp2d(\n f: jax.Array,\n n1: int,\n n2: int,\n sx: jax.Array = None,\n sy: jax.Array = None,\n dx: float = 1.0,\n dy: float = 1.0,\n):\n \"\"\"Interpolation of a 2d periodic function via FFT.\n\n Parameters\n ----------\n f : ndarray, shape(nx, ny, ...)\n Source data. Assumed to cover 1 full period, excluding the endpoint.\n n1, n2 : int\n Number of desired interpolation points in x and y directions\n sx, sy : ndarray or None\n Shift in x and y to evaluate at. If original data is f(x,y), interpolates to\n f(x + sx, y + sy). Both must be provided or None\n dx, dy : float\n Spacing of source points in x and y\n\n Returns\n -------\n fi : ndarray, shape(n1, n2, ..., len(sx))\n Interpolated (and possibly shifted) data points\n \"\"\"\n c = jnp.fft.ifft2(f, axes=(0, 1))\n nx, ny = c.shape[:2]\n if (sx is not None) and (sy is not None):\n sx = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(nx)[:, None] * sx / dx)\n sy = jnp.exp(-1j * 2 * jnp.pi * jnp.fft.fftfreq(ny)[:, None] * sy / dy)\n c = (c[None].T * sx[None, :, :] * sy[:, None, :]).T\n c = jnp.moveaxis(c, 0, -1)\n padx = ((n1 - nx) // 2, n1 - nx - (n1 - nx) // 2)\n pady = ((n2 - ny) // 2, n2 - ny - (n2 - ny) // 2)\n if nx % 2 != 0:\n padx = padx[::-1]\n if ny % 2 != 0:\n pady = pady[::-1]\n\n c = jnp.fft.ifftshift(\n _pad_along_axis(jnp.fft.fftshift(c, axes=0), padx, axis=0), axes=0\n )\n c = jnp.fft.ifftshift(\n _pad_along_axis(jnp.fft.fftshift(c, axes=1), pady, axis=1), axes=1\n )\n\n return jnp.fft.fft2(c, axes=(0, 1)).real" }, { "identifier": "Interpolator1D", "path": "interpax/_spline.py", "snippet": "class Interpolator1D(eqx.Module):\n \"\"\"Convenience class for representing a 1D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the\n data, and will not introduce new extrema in the interpolated points\n - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at\n both endpoints\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as a 2 element array or tuple to specify different conditions\n for xq<x[0] and x[-1]<xq\n period : float > 0, None\n periodicity of the function. If given, function is assumed to be periodic\n on the interval [0,period]. None denotes no periodicity\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float] = None,\n **kwargs,\n ):\n x, f = map(jnp.asarray, (x, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n\n errorif(\n (len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_1D, ValueError, f\"unknown method {method}\")\n\n self.x = x\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, axis, **kwargs)\n\n self.derivs = {\"fx\": fx}\n\n def __call__(self, xq: jax.Array, dx: int = 0):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n Query points where interpolation is desired\n dx : int >= 0\n Derivative to take.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp1d(\n xq,\n self.x,\n self.f,\n self.method,\n dx,\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "Interpolator2D", "path": "interpax/_spline.py", "snippet": "class Interpolator2D(eqx.Module):\n \"\"\"Convenience class for representing a 2D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n y: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float, tuple]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n y: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n ):\n x, y, f = map(jnp.asarray, (x, y, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fxy = kwargs.pop(\"fxy\", None)\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_2D, ValueError, f\"unknown method {method}\")\n\n self.x = x\n self.y = y\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n\n self.derivs = {\"fx\": fx, \"fy\": fy, \"fxy\": fxy}\n\n def __call__(self, xq: jax.Array, yq: jax.Array, dx: int = 0, dy: int = 0):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq, yq : ndarray, shape(Nq,)\n x, y query points where interpolation is desired\n dx, dy : int >= 0\n Derivative to take in x, y directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp2d(\n xq,\n yq,\n self.x,\n self.y,\n self.f,\n self.method,\n (dx, dy),\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "Interpolator3D", "path": "interpax/_spline.py", "snippet": "class Interpolator3D(eqx.Module):\n \"\"\"Convenience class for representing a 3D interpolated function.\n\n Parameters\n ----------\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n z : ndarray, shape(Nz,)\n z coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,Nz,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y, z directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Notes\n -----\n This class is registered as a PyTree in JAX (it is actually an equinox.Module)\n so should be compatible with standard JAX transformations (jit, grad, vmap, etc.)\n\n \"\"\"\n\n x: jax.Array\n y: jax.Array\n z: jax.Array\n f: jax.Array\n derivs: dict\n method: str\n extrap: Union[bool, float, tuple]\n period: Union[None, float, tuple]\n axis: int\n\n def __init__(\n self,\n x: jax.Array,\n y: jax.Array,\n z: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n ):\n x, y, z, f = map(jnp.asarray, (x, y, z, f))\n axis = kwargs.get(\"axis\", 0)\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(\n (len(z) != f.shape[2]) or (z.ndim != 1),\n ValueError,\n \"z and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_3D, ValueError, f\"unknown method {method}\")\n\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fz = kwargs.pop(\"fz\", None)\n fxy = kwargs.pop(\"fxy\", None)\n fxz = kwargs.pop(\"fxz\", None)\n fyz = kwargs.pop(\"fyz\", None)\n fxyz = kwargs.pop(\"fxyz\", None)\n\n self.x = x\n self.y = y\n self.z = z\n self.f = f\n self.axis = axis\n self.method = method\n self.extrap = extrap\n self.period = period\n\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fz is None:\n fz = approx_df(z, f, method, 2, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n if fxz is None:\n fxz = approx_df(z, fx, method, 2, **kwargs)\n if fyz is None:\n fyz = approx_df(z, fy, method, 2, **kwargs)\n if fxyz is None:\n fxyz = approx_df(z, fxy, method, 2, **kwargs)\n\n self.derivs = {\n \"fx\": fx,\n \"fy\": fy,\n \"fz\": fz,\n \"fxy\": fxy,\n \"fxz\": fxz,\n \"fyz\": fyz,\n \"fxyz\": fxyz,\n }\n\n def __call__(\n self,\n xq: jax.Array,\n yq: jax.Array,\n zq: jax.Array,\n dx: int = 0,\n dy: int = 0,\n dz: int = 0,\n ):\n \"\"\"Evaluate the interpolated function or its derivatives.\n\n Parameters\n ----------\n xq, yq, zq : ndarray, shape(Nq,)\n x, y, z query points where interpolation is desired\n dx, dy, dz : int >= 0\n Derivative to take in x, y, z directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq, ...)\n Interpolated values.\n \"\"\"\n return interp3d(\n xq,\n yq,\n zq,\n self.x,\n self.y,\n self.z,\n self.f,\n self.method,\n (dx, dy, dz),\n self.extrap,\n self.period,\n **self.derivs,\n )" }, { "identifier": "interp1d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp1d(\n xq: jax.Array,\n x: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 1d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n query points where interpolation is desired\n x : ndarray, shape(Nx,)\n coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n - ``'monotonic'``: C1 cubic splines that attempt to preserve monotonicity in the\n data, and will not introduce new extrema in the interpolated points\n - ``'monotonic-0'``: same as ``'monotonic'`` but with 0 first derivatives at\n both endpoints\n\n derivative : int >= 0\n derivative order to calculate\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as a 2 element array or tuple to specify different conditions\n for xq<x[0] and x[-1]<xq\n period : float > 0, None\n periodicity of the function. If given, function is assumed to be periodic\n on the interval [0,period]. None denotes no periodicity\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, f data, recommend using Interpolator1D\n which caches the calculation of the derivatives and spline coefficients.\n\n \"\"\"\n xq, x, f = map(jnp.asarray, (xq, x, f))\n axis = kwargs.get(\"axis\", 0)\n fx = kwargs.pop(\"fx\", None)\n outshape = xq.shape + f.shape[1:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq = jnp.atleast_1d(xq)\n\n errorif(\n (len(x) != f.shape[axis]) or (jnp.ndim(x) != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_1D, ValueError, f\"unknown method {method}\")\n\n lowx, highx = _parse_extrap(extrap, 1)\n\n if period is not None:\n xq, x, f, fx = _make_periodic(xq, x, period, axis, f, fx)\n lowx = highx = True\n\n if method == \"nearest\":\n\n def derivative0():\n i = jnp.argmin(jnp.abs(xq[:, np.newaxis] - x[np.newaxis]), axis=1)\n return f[i]\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[1:]))\n\n fq = jax.lax.switch(derivative, [derivative0, derivative1])\n\n elif method == \"linear\":\n\n def derivative0():\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)\n dx = x[i] - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n delta = xq - x[i - 1]\n fq = jnp.where(\n (dx == 0),\n jnp.take(f, i, axis).T,\n jnp.take(f, i - 1, axis).T + (delta * dxi * df.T),\n ).T\n return fq\n\n def derivative1():\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n df = jnp.take(f, i, axis) - jnp.take(f, i - 1, axis)\n dx = x[i] - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n return (df.T * dxi).T\n\n def derivative2():\n return jnp.zeros((xq.size, *f.shape[1:]))\n\n fq = jax.lax.switch(derivative, [derivative0, derivative1, derivative2])\n\n elif method in (CUBIC_METHODS + (\"monotonic\", \"monotonic-0\")):\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n if fx is None:\n fx = approx_df(x, f, method, axis, **kwargs)\n assert fx.shape == f.shape\n\n dx = x[i] - x[i - 1]\n delta = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n t = delta * dxi\n\n f0 = jnp.take(f, i - 1, axis)\n f1 = jnp.take(f, i, axis)\n fx0 = (jnp.take(fx, i - 1, axis).T * dx).T\n fx1 = (jnp.take(fx, i, axis).T * dx).T\n\n F = jnp.stack([f0, f1, fx0, fx1], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_CUBIC, F).T\n ttx = _get_t_der(t, derivative, dxi)\n fq = jnp.einsum(\"ji...,ij->i...\", coef, ttx)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n return fq.reshape(outshape)" }, { "identifier": "interp2d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp2d( # noqa: C901 - FIXME: break this up into simpler pieces\n xq: jax.Array,\n yq: jax.Array,\n x: jax.Array,\n y: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 2d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n x query points where interpolation is desired\n yq : ndarray, shape(Nq,)\n y query points where interpolation is desired\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n derivative : int >= 0 or array-like, shape(2,)\n derivative order to calculate in x, y. Use a single value for the same in both\n directions.\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions\n [[xlow, xhigh],[ylow,yhigh]]\n period : float > 0, None, array-like, shape(2,)\n periodicity of the function in x, y directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in both directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, y, f data, recommend using\n Interpolator2D which caches the calculation of the derivatives and spline\n coefficients.\n\n \"\"\"\n xq, yq, x, y, f = map(jnp.asarray, (xq, yq, x, y, f))\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fxy = kwargs.pop(\"fxy\", None)\n xq, yq = jnp.broadcast_arrays(xq, yq)\n outshape = xq.shape + f.shape[2:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq, yq = map(jnp.atleast_1d, (xq, yq))\n\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_2D, ValueError, f\"unknown method {method}\")\n\n periodx, periody = _parse_ndarg(period, 2)\n derivative_x, derivative_y = _parse_ndarg(derivative, 2)\n lowx, highx, lowy, highy = _parse_extrap(extrap, 2)\n\n if periodx is not None:\n xq, x, f, fx, fy, fxy = _make_periodic(xq, x, periodx, 0, f, fx, fy, fxy)\n lowx = highx = True\n if periody is not None:\n yq, y, f, fx, fy, fxy = _make_periodic(yq, y, periody, 1, f, fx, fy, fxy)\n lowy = highy = True\n\n if method == \"nearest\":\n\n def derivative0():\n # because of the regular spaced grid we know that the nearest point\n # will be one of the 4 neighbors on the grid, so we first find those\n # and then take the nearest one among them.\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n neighbors_x = jnp.array(\n [[x[i], x[i - 1], x[i], x[i - 1]], [y[j], y[j], y[j - 1], y[j - 1]]]\n )\n neighbors_f = jnp.array(\n [f[i, j].T, f[i - 1, j].T, f[i, j - 1].T, f[i - 1, j - 1].T]\n )\n xyq = jnp.array([xq, yq])\n dist = jnp.linalg.norm(neighbors_x - xyq[:, None, :], axis=0)\n idx = jnp.argmin(dist, axis=0)\n return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[2:]))\n\n fq = jax.lax.cond(\n (derivative_x == 0) & (derivative_y == 0), derivative0, derivative1\n )\n\n elif method == \"linear\":\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n\n f00 = f[i - 1, j - 1]\n f01 = f[i - 1, j]\n f10 = f[i, j - 1]\n f11 = f[i, j]\n x0 = x[i - 1]\n x1 = x[i]\n y0 = y[j - 1]\n y1 = y[j]\n dx = x1 - x0\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n dy = y1 - y0\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n\n dx0 = lambda: jnp.array([x1 - xq, xq - x0])\n dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])\n dx2 = lambda: jnp.zeros((2, xq.size))\n dy0 = lambda: jnp.array([y1 - yq, yq - y0])\n dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])\n dy2 = lambda: jnp.zeros((2, yq.size))\n\n tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])\n ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])\n F = jnp.array([[f00, f01], [f10, f11]])\n fq = (dxi * dyi * jnp.einsum(\"ijk...,ik,jk->k...\", F, tx, ty).T).T\n\n elif method in CUBIC_METHODS:\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n assert fx.shape == fy.shape == fxy.shape == f.shape\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n\n dx = x[i] - x[i - 1]\n deltax = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n tx = deltax * dxi\n dy = y[j] - y[j - 1]\n deltay = yq - y[j - 1]\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n ty = deltay * dyi\n\n fs = OrderedDict()\n fs[\"f\"] = f\n fs[\"fx\"] = fx\n fs[\"fy\"] = fy\n fs[\"fxy\"] = fxy\n fsq = OrderedDict()\n for ff in fs.keys():\n for jj in [0, 1]:\n for ii in [0, 1]:\n s = ff + str(ii) + str(jj)\n fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj]\n if \"x\" in ff:\n fsq[s] = (dx * fsq[s].T).T\n if \"y\" in ff:\n fsq[s] = (dy * fsq[s].T).T\n\n F = jnp.stack([foo for foo in fsq.values()], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_BICUBIC, F).T\n coef = jnp.moveaxis(coef.reshape((4, 4, *coef.shape[1:]), order=\"F\"), 2, 0)\n ttx = _get_t_der(tx, derivative_x, dxi)\n tty = _get_t_der(ty, derivative_y, dyi)\n fq = jnp.einsum(\"ijk...,ij,ik->i...\", coef, ttx, tty)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n fq = _extrap(yq, fq, y, lowy, highy)\n\n return fq.reshape(outshape)" }, { "identifier": "interp3d", "path": "interpax/_spline.py", "snippet": "@partial(jit, static_argnames=\"method\")\ndef interp3d( # noqa: C901 - FIXME: break this up into simpler pieces\n xq: jax.Array,\n yq: jax.Array,\n zq: jax.Array,\n x: jax.Array,\n y: jax.Array,\n z: jax.Array,\n f: jax.Array,\n method: str = \"cubic\",\n derivative: int = 0,\n extrap: Union[bool, float, tuple] = False,\n period: Union[None, float, tuple] = None,\n **kwargs,\n):\n \"\"\"Interpolate a 3d function.\n\n Parameters\n ----------\n xq : ndarray, shape(Nq,)\n x query points where interpolation is desired\n yq : ndarray, shape(Nq,)\n y query points where interpolation is desired\n zq : ndarray, shape(Nq,)\n z query points where interpolation is desired\n x : ndarray, shape(Nx,)\n x coordinates of known function values (\"knots\")\n y : ndarray, shape(Ny,)\n y coordinates of known function values (\"knots\")\n z : ndarray, shape(Nz,)\n z coordinates of known function values (\"knots\")\n f : ndarray, shape(Nx,Ny,Nz,...)\n function values to interpolate\n method : str\n method of interpolation\n\n - ``'nearest'``: nearest neighbor interpolation\n - ``'linear'``: linear interpolation\n - ``'cubic'``: C1 cubic splines (aka local splines)\n - ``'cubic2'``: C2 cubic splines (aka natural splines)\n - ``'catmull-rom'``: C1 cubic centripetal \"tension\" splines\n - ``'cardinal'``: C1 cubic general tension splines. If used, can also pass\n keyword parameter ``c`` in float[0,1] to specify tension\n\n derivative : int >= 0, array-like, shape(3,)\n derivative order to calculate in x,y,z directions. Use a single value for the\n same in all directions.\n extrap : bool, float, array-like\n whether to extrapolate values beyond knots (True) or return nan (False),\n or a specified value to return for query points outside the bounds. Can\n also be passed as an array or tuple to specify different conditions for\n [[xlow, xhigh],[ylow,yhigh],[zlow,zhigh]]\n period : float > 0, None, array-like, shape(3,)\n periodicity of the function in x, y, z directions. None denotes no periodicity,\n otherwise function is assumed to be periodic on the interval [0,period]. Use a\n single value for the same in all directions.\n\n Returns\n -------\n fq : ndarray, shape(Nq,...)\n function value at query points\n\n Notes\n -----\n For repeated interpolation given the same x, y, z, f data, recommend using\n Interpolator3D which caches the calculation of the derivatives and spline\n coefficients.\n\n \"\"\"\n xq, yq, zq, x, y, z, f = map(jnp.asarray, (xq, yq, zq, x, y, z, f))\n errorif(\n (len(x) != f.shape[0]) or (x.ndim != 1),\n ValueError,\n \"x and f must be arrays of equal length\",\n )\n errorif(\n (len(y) != f.shape[1]) or (y.ndim != 1),\n ValueError,\n \"y and f must be arrays of equal length\",\n )\n errorif(\n (len(z) != f.shape[2]) or (z.ndim != 1),\n ValueError,\n \"z and f must be arrays of equal length\",\n )\n errorif(method not in METHODS_3D, ValueError, f\"unknown method {method}\")\n\n xq, yq, zq = jnp.broadcast_arrays(xq, yq, zq)\n outshape = xq.shape + f.shape[3:]\n\n # Promote scalar query points to 1D array.\n # Note this is done after the computation of outshape\n # to make jax.grad work in the scalar case.\n xq, yq, zq = map(jnp.atleast_1d, (xq, yq, zq))\n\n fx = kwargs.pop(\"fx\", None)\n fy = kwargs.pop(\"fy\", None)\n fz = kwargs.pop(\"fz\", None)\n fxy = kwargs.pop(\"fxy\", None)\n fxz = kwargs.pop(\"fxz\", None)\n fyz = kwargs.pop(\"fyz\", None)\n fxyz = kwargs.pop(\"fxyz\", None)\n\n periodx, periody, periodz = _parse_ndarg(period, 3)\n derivative_x, derivative_y, derivative_z = _parse_ndarg(derivative, 3)\n lowx, highx, lowy, highy, lowz, highz = _parse_extrap(extrap, 3)\n\n if periodx is not None:\n xq, x, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n xq, x, periodx, 0, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowx = highx = True\n if periody is not None:\n yq, y, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n yq, y, periody, 1, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowy = highy = True\n if periodz is not None:\n zq, z, f, fx, fy, fz, fxy, fxz, fyz, fxyz = _make_periodic(\n zq, z, periodz, 2, f, fx, fy, fz, fxy, fxz, fyz, fxyz\n )\n lowz = highz = True\n\n if method == \"nearest\":\n\n def derivative0():\n # because of the regular spaced grid we know that the nearest point\n # will be one of the 8 neighbors on the grid, so we first find those\n # and then take the nearest one among them.\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n neighbors_x = jnp.array(\n [\n [x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1], x[i], x[i - 1]],\n [y[j], y[j], y[j - 1], y[j - 1], y[j], y[j], y[j - 1], y[j - 1]],\n [z[k], z[k], z[k], z[k], z[k - 1], z[k - 1], z[k - 1], z[k - 1]],\n ]\n )\n neighbors_f = jnp.array(\n [\n f[i, j, k].T,\n f[i - 1, j, k].T,\n f[i, j - 1, k].T,\n f[i - 1, j - 1, k].T,\n f[i, j, k - 1].T,\n f[i - 1, j, k - 1].T,\n f[i, j - 1, k - 1].T,\n f[i - 1, j - 1, k - 1].T,\n ]\n )\n xyzq = jnp.array([xq, yq, zq])\n dist = jnp.linalg.norm(neighbors_x - xyzq[:, None, :], axis=0)\n idx = jnp.argmin(dist, axis=0)\n return jax.vmap(lambda a, b: jnp.take(a, b, axis=-1))(neighbors_f.T, idx)\n\n def derivative1():\n return jnp.zeros((xq.size, *f.shape[3:]))\n\n fq = jax.lax.cond(\n (derivative_x == 0) & (derivative_y == 0) & (derivative_z == 0),\n derivative0,\n derivative1,\n )\n\n elif method == \"linear\":\n\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n\n f000 = f[i - 1, j - 1, k - 1]\n f001 = f[i - 1, j - 1, k]\n f010 = f[i - 1, j, k - 1]\n f100 = f[i, j - 1, k - 1]\n f110 = f[i, j, k - 1]\n f011 = f[i - 1, j, k]\n f101 = f[i, j - 1, k]\n f111 = f[i, j, k]\n x0 = x[i - 1]\n x1 = x[i]\n y0 = y[j - 1]\n y1 = y[j]\n z0 = z[k - 1]\n z1 = z[k]\n dx = x1 - x0\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n dy = y1 - y0\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n dz = z1 - z0\n dzi = jnp.where(dz == 0, 0, 1 / dz)\n\n dx0 = lambda: jnp.array([x1 - xq, xq - x0])\n dx1 = lambda: jnp.array([-jnp.ones_like(xq), jnp.ones_like(xq)])\n dx2 = lambda: jnp.zeros((2, xq.size))\n dy0 = lambda: jnp.array([y1 - yq, yq - y0])\n dy1 = lambda: jnp.array([-jnp.ones_like(yq), jnp.ones_like(yq)])\n dy2 = lambda: jnp.zeros((2, yq.size))\n dz0 = lambda: jnp.array([z1 - zq, zq - z0])\n dz1 = lambda: jnp.array([-jnp.ones_like(zq), jnp.ones_like(zq)])\n dz2 = lambda: jnp.zeros((2, zq.size))\n\n tx = jax.lax.switch(derivative_x, [dx0, dx1, dx2])\n ty = jax.lax.switch(derivative_y, [dy0, dy1, dy2])\n tz = jax.lax.switch(derivative_z, [dz0, dz1, dz2])\n\n F = jnp.array([[[f000, f001], [f010, f011]], [[f100, f101], [f110, f111]]])\n fq = (dxi * dyi * dzi * jnp.einsum(\"lijk...,lk,ik,jk->k...\", F, tx, ty, tz).T).T\n\n elif method in CUBIC_METHODS:\n if fx is None:\n fx = approx_df(x, f, method, 0, **kwargs)\n if fy is None:\n fy = approx_df(y, f, method, 1, **kwargs)\n if fz is None:\n fz = approx_df(z, f, method, 2, **kwargs)\n if fxy is None:\n fxy = approx_df(y, fx, method, 1, **kwargs)\n if fxz is None:\n fxz = approx_df(z, fx, method, 2, **kwargs)\n if fyz is None:\n fyz = approx_df(z, fy, method, 2, **kwargs)\n if fxyz is None:\n fxyz = approx_df(z, fxy, method, 2, **kwargs)\n assert (\n fx.shape\n == fy.shape\n == fz.shape\n == fxy.shape\n == fxz.shape\n == fyz.shape\n == fxyz.shape\n == f.shape\n )\n i = jnp.clip(jnp.searchsorted(x, xq, side=\"right\"), 1, len(x) - 1)\n j = jnp.clip(jnp.searchsorted(y, yq, side=\"right\"), 1, len(y) - 1)\n k = jnp.clip(jnp.searchsorted(z, zq, side=\"right\"), 1, len(z) - 1)\n\n dx = x[i] - x[i - 1]\n deltax = xq - x[i - 1]\n dxi = jnp.where(dx == 0, 0, 1 / dx)\n tx = deltax * dxi\n\n dy = y[j] - y[j - 1]\n deltay = yq - y[j - 1]\n dyi = jnp.where(dy == 0, 0, 1 / dy)\n ty = deltay * dyi\n\n dz = z[k] - z[k - 1]\n deltaz = zq - z[k - 1]\n dzi = jnp.where(dz == 0, 0, 1 / dz)\n tz = deltaz * dzi\n\n fs = OrderedDict()\n fs[\"f\"] = f\n fs[\"fx\"] = fx\n fs[\"fy\"] = fy\n fs[\"fz\"] = fz\n fs[\"fxy\"] = fxy\n fs[\"fxz\"] = fxz\n fs[\"fyz\"] = fyz\n fs[\"fxyz\"] = fxyz\n fsq = OrderedDict()\n for ff in fs.keys():\n for kk in [0, 1]:\n for jj in [0, 1]:\n for ii in [0, 1]:\n s = ff + str(ii) + str(jj) + str(kk)\n fsq[s] = fs[ff][i - 1 + ii, j - 1 + jj, k - 1 + kk]\n if \"x\" in ff:\n fsq[s] = (dx * fsq[s].T).T\n if \"y\" in ff:\n fsq[s] = (dy * fsq[s].T).T\n if \"z\" in ff:\n fsq[s] = (dz * fsq[s].T).T\n\n F = jnp.stack([foo for foo in fsq.values()], axis=0).T\n coef = jnp.vectorize(jnp.matmul, signature=\"(n,n),(n)->(n)\")(A_TRICUBIC, F).T\n coef = jnp.moveaxis(coef.reshape((4, 4, 4, *coef.shape[1:]), order=\"F\"), 3, 0)\n ttx = _get_t_der(tx, derivative_x, dxi)\n tty = _get_t_der(ty, derivative_y, dyi)\n ttz = _get_t_der(tz, derivative_z, dzi)\n fq = jnp.einsum(\"lijk...,li,lj,lk->l...\", coef, ttx, tty, ttz)\n\n fq = _extrap(xq, fq, x, lowx, highx)\n fq = _extrap(yq, fq, y, lowy, highy)\n fq = _extrap(zq, fq, z, lowz, highz)\n\n return fq.reshape(outshape)" } ]
import jax import jax.numpy as jnp import numpy as np import pytest from jax import config as jax_config from interpax import ( Interpolator1D, Interpolator2D, Interpolator3D, fft_interp1d, fft_interp2d, interp1d, interp2d, interp3d, )
15,785
"""Ensure monotonic interpolation is actually monotonic.""" # true function is just linear with a jump discontinuity at x=1.5 x = np.linspace(-4, 5, 10) f = np.heaviside(x - 1.5, 0) + 0.1 * x xq = np.linspace(-4, 5, 1000) dfc = interp1d(xq, x, f, derivative=1, method="cubic") dfm = interp1d(xq, x, f, derivative=1, method="monotonic") dfm0 = interp1d(xq, x, f, derivative=1, method="monotonic-0") assert dfc.min() < 0 # cubic interpolation undershoots, giving negative slope assert dfm.min() > 0 # monotonic interpolation doesn't assert dfm0.min() >= 0 # monotonic-0 doesn't overshoot either # ensure monotonic-0 has 0 slope at end points np.testing.assert_allclose(dfm0[np.array([0, -1])], 0, atol=1e-12) class TestInterp2D: """Tests for interp2d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y", [ (np.linspace(0, 3 * np.pi, 1000), np.linspace(0, 2 * np.pi, 1000)), (0.0, 0.0), ], ) def test_interp2d(self, x, y): """Test accuracy of different 2d interpolation methods.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.sin(x) * np.cos(y) fp = f(xxp, yyp) interp1 = lambda xq, yq, *args, **kwargs: interp2d(xq, yq, *args, **kwargs) interp2 = lambda xq, yq, *args, **kwargs: Interpolator2D(*args, **kwargs)( xq, yq ) for interp in [interp1, interp2]: fq = interp( x, y, xp, yp, fp, method="nearest", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-2, atol=1) fq = interp( x, y, xp, yp, fp, method="linear", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-4, atol=1e-2) atol = 2e-3 rtol = 1e-5 fq = interp(x, y, xp, yp, fp, method="cubic", period=(2 * np.pi, 2 * np.pi)) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cubic2", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="catmull-rom", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cardinal", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp2d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) x = np.linspace(0, 3 * np.pi, 200) y = np.linspace(0, 2 * np.pi, 200) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.array([np.sin(x) * np.cos(y), np.sin(x) + np.cos(y)]) fp = f(xxp.T, yyp.T).T fq = interp2d(x, y, xp, yp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-2, atol=1.2e-1) fq = interp2d(x, y, xp, yp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-3, atol=1e-2) fq = interp2d(x, y, xp, yp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-5, atol=2e-3) class TestInterp3D: """Tests for interp3d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y, z", [ ( np.linspace(0, np.pi, 1000), np.linspace(0, 2 * np.pi, 1000), np.linspace(0, 3, 1000), ), (0.0, 0.0, 0.0), ], ) def test_interp3d(self, x, y, z): """Test accuracy of different 3d interpolation methods.""" xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.sin(x) * np.cos(y) * z**2 fp = f(xxp, yyp, zzp) interp1 = lambda xq, yq, zq, *args, **kwargs: interp3d( xq, yq, zq, *args, **kwargs )
"""Tests for interpolation functions.""" jax_config.update("jax_enable_x64", True) class TestInterp1D: """Tests for interp1d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x", [ np.linspace(0, 2 * np.pi, 10000), 0.0, ], ) def test_interp1d(self, x): """Test accuracy of different 1d interpolation methods.""" xp = np.linspace(0, 2 * np.pi, 100) f = lambda x: np.sin(x) fp = f(xp) interp1 = lambda xq, *args, **kwargs: interp1d(xq, *args, **kwargs) interp2 = lambda xq, *args, **kwargs: Interpolator1D(*args, **kwargs)(xq) for interp in [interp1, interp2]: fq = interp(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x), rtol=1e-2, atol=1e-1) fq = interp(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-5) fq = interp(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-3) fq = interp(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x), rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 2 * np.pi, 100) x = np.linspace(0, 2 * np.pi, 300)[10:-10] f = lambda x: np.array([np.sin(x), np.cos(x)]) fp = f(xp).T fq = interp1d(x, xp, fp, method="nearest") np.testing.assert_allclose(fq, f(x).T, rtol=1e-2, atol=1e-1) fq = interp1d(x, xp, fp, method="linear") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="cubic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cubic2") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="cardinal") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="catmull-rom") np.testing.assert_allclose(fq, f(x).T, rtol=1e-6, atol=1e-5) fq = interp1d(x, xp, fp, method="monotonic") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-3) fq = interp1d(x, xp, fp, method="monotonic-0") np.testing.assert_allclose(fq, f(x).T, rtol=1e-4, atol=1e-2) @pytest.mark.unit def test_interp1d_extrap_periodic(self): """Test extrapolation and periodic BC of 1d interpolation.""" xp = np.linspace(0, 2 * np.pi, 200) x = np.linspace(-1, 2 * np.pi + 1, 10000) f = lambda x: np.sin(x) fp = f(xp) fq = interp1d(x, xp, fp, method="cubic", extrap=False) assert np.isnan(fq[0]) assert np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", extrap=True) assert not np.isnan(fq[0]) assert not np.isnan(fq[-1]) fq = interp1d(x, xp, fp, method="cubic", period=2 * np.pi) np.testing.assert_allclose(fq, f(x), rtol=1e-6, atol=1e-2) @pytest.mark.unit def test_interp1d_monotonic(self): """Ensure monotonic interpolation is actually monotonic.""" # true function is just linear with a jump discontinuity at x=1.5 x = np.linspace(-4, 5, 10) f = np.heaviside(x - 1.5, 0) + 0.1 * x xq = np.linspace(-4, 5, 1000) dfc = interp1d(xq, x, f, derivative=1, method="cubic") dfm = interp1d(xq, x, f, derivative=1, method="monotonic") dfm0 = interp1d(xq, x, f, derivative=1, method="monotonic-0") assert dfc.min() < 0 # cubic interpolation undershoots, giving negative slope assert dfm.min() > 0 # monotonic interpolation doesn't assert dfm0.min() >= 0 # monotonic-0 doesn't overshoot either # ensure monotonic-0 has 0 slope at end points np.testing.assert_allclose(dfm0[np.array([0, -1])], 0, atol=1e-12) class TestInterp2D: """Tests for interp2d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y", [ (np.linspace(0, 3 * np.pi, 1000), np.linspace(0, 2 * np.pi, 1000)), (0.0, 0.0), ], ) def test_interp2d(self, x, y): """Test accuracy of different 2d interpolation methods.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.sin(x) * np.cos(y) fp = f(xxp, yyp) interp1 = lambda xq, yq, *args, **kwargs: interp2d(xq, yq, *args, **kwargs) interp2 = lambda xq, yq, *args, **kwargs: Interpolator2D(*args, **kwargs)( xq, yq ) for interp in [interp1, interp2]: fq = interp( x, y, xp, yp, fp, method="nearest", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-2, atol=1) fq = interp( x, y, xp, yp, fp, method="linear", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=1e-4, atol=1e-2) atol = 2e-3 rtol = 1e-5 fq = interp(x, y, xp, yp, fp, method="cubic", period=(2 * np.pi, 2 * np.pi)) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cubic2", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="catmull-rom", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) fq = interp( x, y, xp, yp, fp, method="cardinal", period=(2 * np.pi, 2 * np.pi) ) np.testing.assert_allclose(fq, f(x, y), rtol=rtol, atol=atol) @pytest.mark.unit def test_interp2d_vector_valued(self): """Test for interpolating vector valued function.""" xp = np.linspace(0, 3 * np.pi, 99) yp = np.linspace(0, 2 * np.pi, 40) x = np.linspace(0, 3 * np.pi, 200) y = np.linspace(0, 2 * np.pi, 200) xxp, yyp = np.meshgrid(xp, yp, indexing="ij") f = lambda x, y: np.array([np.sin(x) * np.cos(y), np.sin(x) + np.cos(y)]) fp = f(xxp.T, yyp.T).T fq = interp2d(x, y, xp, yp, fp, method="nearest") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-2, atol=1.2e-1) fq = interp2d(x, y, xp, yp, fp, method="linear") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-3, atol=1e-2) fq = interp2d(x, y, xp, yp, fp, method="cubic") np.testing.assert_allclose(fq, f(x, y).T, rtol=1e-5, atol=2e-3) class TestInterp3D: """Tests for interp3d function.""" @pytest.mark.unit @pytest.mark.parametrize( "x, y, z", [ ( np.linspace(0, np.pi, 1000), np.linspace(0, 2 * np.pi, 1000), np.linspace(0, 3, 1000), ), (0.0, 0.0, 0.0), ], ) def test_interp3d(self, x, y, z): """Test accuracy of different 3d interpolation methods.""" xp = np.linspace(0, np.pi, 20) yp = np.linspace(0, 2 * np.pi, 30) zp = np.linspace(0, 3, 25) xxp, yyp, zzp = np.meshgrid(xp, yp, zp, indexing="ij") f = lambda x, y, z: np.sin(x) * np.cos(y) * z**2 fp = f(xxp, yyp, zzp) interp1 = lambda xq, yq, zq, *args, **kwargs: interp3d( xq, yq, zq, *args, **kwargs )
interp2 = lambda xq, yq, zq, *args, **kwargs: Interpolator3D(*args, **kwargs)(
4
2023-10-18 13:12:20+00:00
24k
city96/ComfyUI_ExtraModels
PixArt/sampler.py
[ { "identifier": "gaussian_diffusion", "path": "PixArt/sampling/gaussian_diffusion.py", "snippet": "def mean_flat(tensor):\n def is_vb(self):\ndef _warmup_beta(beta_start, beta_end, num_diffusion_timesteps, warmup_frac):\ndef get_beta_schedule(beta_schedule, *, beta_start, beta_end, num_diffusion_timesteps):\ndef get_named_beta_schedule(schedule_name, num_diffusion_timesteps):\ndef betas_for_alpha_bar(num_diffusion_timesteps, alpha_bar, max_beta=0.999):\n def __init__(\n self,\n *,\n betas,\n model_mean_type,\n model_var_type,\n loss_type,\n snr=False\n ):\n def q_mean_variance(self, x_start, t):\n def q_sample(self, x_start, t, noise=None):\n def q_posterior_mean_variance(self, x_start, x_t, t):\n def p_mean_variance(self, model, x, t, clip_denoised=True, denoised_fn=None, model_kwargs=None):\n def process_xstart(x):\n def _predict_xstart_from_eps(self, x_t, t, eps):\n def _predict_eps_from_xstart(self, x_t, t, pred_xstart):\n def condition_mean(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def condition_score(self, cond_fn, p_mean_var, x, t, model_kwargs=None):\n def p_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n ):\n def p_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n ):\n def p_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n ):\n def ddim_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_reverse_sample(\n self,\n model,\n x,\n t,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n eta=0.0,\n ):\n def ddim_sample_loop(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n ):\n def ddim_sample_loop_progressive(\n self,\n model,\n shape,\n noise=None,\n clip_denoised=True,\n denoised_fn=None,\n cond_fn=None,\n model_kwargs=None,\n device=None,\n progress=False,\n eta=0.0,\n ):\n def _vb_terms_bpd(\n self, model, x_start, x_t, t, clip_denoised=True, model_kwargs=None\n ):\n def training_losses(self, model, x_start, t, model_kwargs=None, noise=None):\n def _prior_bpd(self, x_start):\n def calc_bpd_loop(self, model, x_start, clip_denoised=True, model_kwargs=None):\ndef _extract_into_tensor(arr, timesteps, broadcast_shape):\nclass ModelMeanType(enum.Enum):\nclass ModelVarType(enum.Enum):\nclass LossType(enum.Enum):\nclass GaussianDiffusion:\n PREVIOUS_X = enum.auto() # the model predicts x_{t-1}\n START_X = enum.auto() # the model predicts x_0\n EPSILON = enum.auto() # the model predicts epsilon\n LEARNED = enum.auto()\n FIXED_SMALL = enum.auto()\n FIXED_LARGE = enum.auto()\n LEARNED_RANGE = enum.auto()\n MSE = enum.auto() # use raw MSE loss (and KL when learning variances)\n RESCALED_MSE = (\n enum.auto()\n ) # use raw MSE loss (with RESCALED_KL when learning variances)\n KL = enum.auto() # use the variational lower-bound\n RESCALED_KL = enum.auto() # like KL, but rescale to estimate the full VLB\n B, C = x.shape[:2]\n B, C = x_t.shape[:2]" }, { "identifier": "model_wrapper", "path": "PixArt/sampling/dpm_solver.py", "snippet": "def model_wrapper(\n model,\n noise_schedule,\n model_type=\"noise\",\n model_kwargs={},\n guidance_type=\"uncond\",\n condition=None,\n unconditional_condition=None,\n guidance_scale=1.,\n classifier_fn=None,\n classifier_kwargs={},\n):\n \"\"\"Create a wrapper function for the noise prediction model.\n\n DPM-Solver needs to solve the continuous-time diffusion ODEs. For DPMs trained on discrete-time labels, we need to\n firstly wrap the model function to a noise prediction model that accepts the continuous time as the input.\n\n We support four types of the diffusion model by setting `model_type`:\n\n 1. \"noise\": noise prediction model. (Trained by predicting noise).\n\n 2. \"x_start\": data prediction model. (Trained by predicting the data x_0 at time 0).\n\n 3. \"v\": velocity prediction model. (Trained by predicting the velocity).\n The \"v\" prediction is derivation detailed in Appendix D of [1], and is used in Imagen-Video [2].\n\n [1] Salimans, Tim, and Jonathan Ho. \"Progressive distillation for fast sampling of diffusion models.\"\n arXiv preprint arXiv:2202.00512 (2022).\n [2] Ho, Jonathan, et al. \"Imagen Video: High Definition Video Generation with Diffusion Models.\"\n arXiv preprint arXiv:2210.02303 (2022).\n\n 4. \"score\": marginal score function. (Trained by denoising score matching).\n Note that the score function and the noise prediction model follows a simple relationship:\n ```\n noise(x_t, t) = -sigma_t * score(x_t, t)\n ```\n\n We support three types of guided sampling by DPMs by setting `guidance_type`:\n 1. \"uncond\": unconditional sampling by DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n 2. \"classifier\": classifier guidance sampling [3] by DPMs and another classifier.\n The input `model` has the following format:\n ``\n model(x, t_input, **model_kwargs) -> noise | x_start | v | score\n ``\n\n The input `classifier_fn` has the following format:\n ``\n classifier_fn(x, t_input, cond, **classifier_kwargs) -> logits(x, t_input, cond)\n ``\n\n [3] P. Dhariwal and A. Q. Nichol, \"Diffusion models beat GANs on image synthesis,\"\n in Advances in Neural Information Processing Systems, vol. 34, 2021, pp. 8780-8794.\n\n 3. \"classifier-free\": classifier-free guidance sampling by conditional DPMs.\n The input `model` has the following format:\n ``\n model(x, t_input, cond, **model_kwargs) -> noise | x_start | v | score\n ``\n And if cond == `unconditional_condition`, the model output is the unconditional DPM output.\n\n [4] Ho, Jonathan, and Tim Salimans. \"Classifier-free diffusion guidance.\"\n arXiv preprint arXiv:2207.12598 (2022).\n\n\n The `t_input` is the time label of the model, which may be discrete-time labels (i.e. 0 to 999)\n or continuous-time labels (i.e. epsilon to T).\n\n We wrap the model function to accept only `x` and `t_continuous` as inputs, and outputs the predicted noise:\n ``\n def model_fn(x, t_continuous) -> noise:\n t_input = get_model_input_time(t_continuous)\n return noise_pred(model, x, t_input, **model_kwargs)\n ``\n where `t_continuous` is the continuous time labels (i.e. epsilon to T). And we use `model_fn` for DPM-Solver.\n\n ===============================================================\n\n Args:\n model: A diffusion model with the corresponding format described above.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n model_type: A `str`. The parameterization type of the diffusion model.\n \"noise\" or \"x_start\" or \"v\" or \"score\".\n model_kwargs: A `dict`. A dict for the other inputs of the model function.\n guidance_type: A `str`. The type of the guidance for sampling.\n \"uncond\" or \"classifier\" or \"classifier-free\".\n condition: A pytorch tensor. The condition for the guided sampling.\n Only used for \"classifier\" or \"classifier-free\" guidance type.\n unconditional_condition: A pytorch tensor. The condition for the unconditional sampling.\n Only used for \"classifier-free\" guidance type.\n guidance_scale: A `float`. The scale for the guided sampling.\n classifier_fn: A classifier function. Only used for the classifier guidance.\n classifier_kwargs: A `dict`. A dict for the other inputs of the classifier function.\n Returns:\n A noise prediction model that accepts the noised data and the continuous time as the inputs.\n \"\"\"\n\n def get_model_input_time(t_continuous):\n \"\"\"\n Convert the continuous-time `t_continuous` (in [epsilon, T]) to the model input time.\n For discrete-time DPMs, we convert `t_continuous` in [1 / N, 1] to `t_input` in [0, 1000 * (N - 1) / N].\n For continuous-time DPMs, we just use `t_continuous`.\n \"\"\"\n if noise_schedule.schedule == 'discrete':\n return (t_continuous - 1. / noise_schedule.total_N) * 1000.\n else:\n return t_continuous\n\n def noise_pred_fn(x, t_continuous, cond=None):\n t_input = get_model_input_time(t_continuous)\n if cond is None:\n output = model(\n x = x,\n timesteps = t_input,\n context = None,\n y = None,\n **model_kwargs\n )\n else:\n output = model(\n x = x,\n timesteps = t_input,\n context = cond,\n y = None,\n **model_kwargs\n )\n if model_type == \"noise\":\n return output\n elif model_type == \"x_start\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)\n return (x - expand_dims(alpha_t, x.dim()) * output) / expand_dims(sigma_t, x.dim())\n elif model_type == \"v\":\n alpha_t, sigma_t = noise_schedule.marginal_alpha(t_continuous), noise_schedule.marginal_std(t_continuous)\n return expand_dims(alpha_t, x.dim()) * output + expand_dims(sigma_t, x.dim()) * x\n elif model_type == \"score\":\n sigma_t = noise_schedule.marginal_std(t_continuous)\n return -expand_dims(sigma_t, x.dim()) * output\n\n def cond_grad_fn(x, t_input):\n \"\"\"\n Compute the gradient of the classifier, i.e. nabla_{x} log p_t(cond | x_t).\n \"\"\"\n with torch.enable_grad():\n x_in = x.detach().requires_grad_(True)\n log_prob = classifier_fn(x_in, t_input, condition, **classifier_kwargs)\n return torch.autograd.grad(log_prob.sum(), x_in)[0]\n\n def model_fn(x, t_continuous):\n \"\"\"\n The noise predicition model function that is used for DPM-Solver.\n \"\"\"\n if guidance_type == \"uncond\":\n return noise_pred_fn(x, t_continuous)\n elif guidance_type == \"classifier\":\n assert classifier_fn is not None\n t_input = get_model_input_time(t_continuous)\n cond_grad = cond_grad_fn(x, t_input)\n sigma_t = noise_schedule.marginal_std(t_continuous)\n noise = noise_pred_fn(x, t_continuous)\n return noise - guidance_scale * expand_dims(sigma_t, x.dim()) * cond_grad\n elif guidance_type == \"classifier-free\":\n if guidance_scale == 1. or unconditional_condition is None:\n return noise_pred_fn(x, t_continuous, cond=condition)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t_continuous] * 2)\n c_in = torch.cat([unconditional_condition, condition])\n noise_uncond, noise = noise_pred_fn(x_in, t_in, cond=c_in).chunk(2)\n return noise_uncond + guidance_scale * (noise - noise_uncond)\n\n assert model_type in [\"noise\", \"x_start\", \"v\", \"score\"]\n assert guidance_type in [\"uncond\", \"classifier\", \"classifier-free\"]\n return model_fn" }, { "identifier": "DPM_Solver", "path": "PixArt/sampling/dpm_solver.py", "snippet": "class DPM_Solver:\n def __init__(\n self,\n model_fn,\n noise_schedule,\n algorithm_type=\"dpmsolver++\",\n correcting_x0_fn=None,\n correcting_xt_fn=None,\n thresholding_max_val=1.,\n dynamic_thresholding_ratio=0.995,\n ):\n \"\"\"Construct a DPM-Solver.\n\n We support both DPM-Solver (`algorithm_type=\"dpmsolver\"`) and DPM-Solver++ (`algorithm_type=\"dpmsolver++\"`).\n\n We also support the \"dynamic thresholding\" method in Imagen[1]. For pixel-space diffusion models, you\n can set both `algorithm_type=\"dpmsolver++\"` and `correcting_x0_fn=\"dynamic_thresholding\"` to use the\n dynamic thresholding. The \"dynamic thresholding\" can greatly improve the sample quality for pixel-space\n DPMs with large guidance scales. Note that the thresholding method is **unsuitable** for latent-space\n DPMs (such as stable-diffusion).\n\n To support advanced algorithms in image-to-image applications, we also support corrector functions for\n both x0 and xt.\n\n Args:\n model_fn: A noise prediction model function which accepts the continuous-time input (t in [epsilon, T]):\n ``\n def model_fn(x, t_continuous):\n return noise\n ``\n The shape of `x` is `(batch_size, **shape)`, and the shape of `t_continuous` is `(batch_size,)`.\n noise_schedule: A noise schedule object, such as NoiseScheduleVP.\n algorithm_type: A `str`. Either \"dpmsolver\" or \"dpmsolver++\".\n correcting_x0_fn: A `str` or a function with the following format:\n ```\n def correcting_x0_fn(x0, t):\n x0_new = ...\n return x0_new\n ```\n This function is to correct the outputs of the data prediction model at each sampling step. e.g.,\n ```\n x0_pred = data_pred_model(xt, t)\n if correcting_x0_fn is not None:\n x0_pred = correcting_x0_fn(x0_pred, t)\n xt_1 = update(x0_pred, xt, t)\n ```\n If `correcting_x0_fn=\"dynamic_thresholding\"`, we use the dynamic thresholding proposed in Imagen[1].\n correcting_xt_fn: A function with the following format:\n ```\n def correcting_xt_fn(xt, t, step):\n x_new = ...\n return x_new\n ```\n This function is to correct the intermediate samples xt at each sampling step. e.g.,\n ```\n xt = ...\n xt = correcting_xt_fn(xt, t, step)\n ```\n thresholding_max_val: A `float`. The max value for thresholding.\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n dynamic_thresholding_ratio: A `float`. The ratio for dynamic thresholding (see Imagen[1] for details).\n Valid only when use `dpmsolver++` and `correcting_x0_fn=\"dynamic_thresholding\"`.\n\n [1] Chitwan Saharia, William Chan, Saurabh Saxena, Lala Li, Jay Whang, Emily Denton, Seyed Kamyar Seyed Ghasemipour,\n Burcu Karagol Ayan, S Sara Mahdavi, Rapha Gontijo Lopes, et al. Photorealistic text-to-image diffusion models\n with deep language understanding. arXiv preprint arXiv:2205.11487, 2022b.\n \"\"\"\n self.model = lambda x, t: model_fn(x, t.expand((x.shape[0])))\n self.noise_schedule = noise_schedule\n assert algorithm_type in [\"dpmsolver\", \"dpmsolver++\"]\n self.algorithm_type = algorithm_type\n if correcting_x0_fn == \"dynamic_thresholding\":\n self.correcting_x0_fn = self.dynamic_thresholding_fn\n else:\n self.correcting_x0_fn = correcting_x0_fn\n self.correcting_xt_fn = correcting_xt_fn\n self.dynamic_thresholding_ratio = dynamic_thresholding_ratio\n self.thresholding_max_val = thresholding_max_val\n\n def dynamic_thresholding_fn(self, x0, t):\n \"\"\"\n The dynamic thresholding method.\n \"\"\"\n dims = x0.dim()\n p = self.dynamic_thresholding_ratio\n s = torch.quantile(torch.abs(x0).reshape((x0.shape[0], -1)), p, dim=1)\n s = expand_dims(torch.maximum(s, self.thresholding_max_val * torch.ones_like(s).to(s.device)), dims)\n x0 = torch.clamp(x0, -s, s) / s\n return x0\n\n def noise_prediction_fn(self, x, t):\n \"\"\"\n Return the noise prediction model.\n \"\"\"\n return self.model(x, t)\n\n def data_prediction_fn(self, x, t):\n \"\"\"\n Return the data prediction model (with corrector).\n \"\"\"\n noise = self.noise_prediction_fn(x, t)\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)\n x0 = (x - sigma_t * noise) / alpha_t\n if self.correcting_x0_fn is not None:\n x0 = self.correcting_x0_fn(x0, t)\n return x0\n\n def model_fn(self, x, t):\n \"\"\"\n Convert the model to the noise prediction model or the data prediction model.\n \"\"\"\n if self.algorithm_type == \"dpmsolver++\":\n return self.data_prediction_fn(x, t)\n else:\n return self.noise_prediction_fn(x, t)\n\n def get_time_steps(self, skip_type, t_T, t_0, N, device):\n \"\"\"Compute the intermediate time steps for sampling.\n\n Args:\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n N: A `int`. The total number of the spacing of the time steps.\n device: A torch device.\n Returns:\n A pytorch tensor of the time steps, with the shape (N + 1,).\n \"\"\"\n if skip_type == 'logSNR':\n lambda_T = self.noise_schedule.marginal_lambda(torch.tensor(t_T).to(device))\n lambda_0 = self.noise_schedule.marginal_lambda(torch.tensor(t_0).to(device))\n logSNR_steps = torch.linspace(lambda_T.cpu().item(), lambda_0.cpu().item(), N + 1).to(device)\n return self.noise_schedule.inverse_lambda(logSNR_steps)\n elif skip_type == 'time_uniform':\n return torch.linspace(t_T, t_0, N + 1).to(device)\n elif skip_type == 'time_quadratic':\n t_order = 2\n t = torch.linspace(t_T ** (1. / t_order), t_0 ** (1. / t_order), N + 1).pow(t_order).to(device)\n return t\n else:\n raise ValueError(\n \"Unsupported skip_type {}, need to be 'logSNR' or 'time_uniform' or 'time_quadratic'\".format(skip_type))\n\n def get_orders_and_timesteps_for_singlestep_solver(self, steps, order, skip_type, t_T, t_0, device):\n \"\"\"\n Get the order of each step for sampling by the singlestep DPM-Solver.\n\n We combine both DPM-Solver-1,2,3 to use all the function evaluations, which is named as \"DPM-Solver-fast\".\n Given a fixed number of function evaluations by `steps`, the sampling procedure by DPM-Solver-fast is:\n - If order == 1:\n We take `steps` of DPM-Solver-1 (i.e. DDIM).\n - If order == 2:\n - Denote K = (steps // 2). We take K or (K + 1) intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of DPM-Solver-2.\n - If steps % 2 == 1, we use K steps of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If order == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of DPM-Solver-3, and 1 step of DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of DPM-Solver-3 and 1 step of DPM-Solver-2.\n\n ============================================\n Args:\n order: A `int`. The max order for the solver (2 or 3).\n steps: A `int`. The total number of function evaluations (NFE).\n skip_type: A `str`. The type for the spacing of the time steps. We support three types:\n - 'logSNR': uniform logSNR for the time steps.\n - 'time_uniform': uniform time for the time steps. (**Recommended for high-resolutional data**.)\n - 'time_quadratic': quadratic time for the time steps. (Used in DDIM for low-resolutional data.)\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n device: A torch device.\n Returns:\n orders: A list of the solver order of each step.\n \"\"\"\n if order == 3:\n K = steps // 3 + 1\n if steps % 3 == 0:\n orders = [3, ] * (K - 2) + [2, 1]\n elif steps % 3 == 1:\n orders = [3, ] * (K - 1) + [1]\n else:\n orders = [3, ] * (K - 1) + [2]\n elif order == 2:\n if steps % 2 == 0:\n K = steps // 2\n orders = [2, ] * K\n else:\n K = steps // 2 + 1\n orders = [2, ] * (K - 1) + [1]\n elif order == 1:\n K = 1\n orders = [1, ] * steps\n else:\n raise ValueError(\"'order' must be '1' or '2' or '3'.\")\n if skip_type == 'logSNR':\n # To reproduce the results in DPM-Solver paper\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, K, device)\n else:\n timesteps_outer = self.get_time_steps(skip_type, t_T, t_0, steps, device)[\n torch.cumsum(torch.tensor([0, ] + orders), 0).to(device)]\n return timesteps_outer, orders\n\n def denoise_to_zero_fn(self, x, s):\n \"\"\"\n Denoise at the final step, which is equivalent to solve the ODE from lambda_s to infty by first-order discretization.\n \"\"\"\n return self.data_prediction_fn(x, s)\n\n def dpm_solver_first_update(self, x, s, t, model_s=None, return_intermediate=False):\n \"\"\"\n DPM-Solver-1 (equivalent to DDIM) from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n dims = x.dim()\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n log_alpha_s, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_t = ns.marginal_std(s), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = (\n sigma_t / sigma_s * x\n - alpha_t * phi_1 * model_s\n )\n if return_intermediate:\n return x_t, {'model_s': model_s}\n else:\n return x_t\n else:\n phi_1 = torch.expm1(h)\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n )\n if return_intermediate:\n return x_t, {'model_s': model_s}\n else:\n return x_t\n\n def singlestep_dpm_solver_second_update(self, x, s, t, r1=0.5, model_s=None, return_intermediate=False,\n solver_type='dpmsolver'):\n \"\"\"\n Singlestep solver DPM-Solver-2 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the second-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s` and `s1` (the intermediate time).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpmsolver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(solver_type))\n if r1 is None:\n r1 = 0.5\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n s1 = ns.inverse_lambda(lambda_s1)\n log_alpha_s, log_alpha_s1, log_alpha_t = ns.marginal_log_mean_coeff(s), ns.marginal_log_mean_coeff(\n s1), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_s1, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(t)\n alpha_s1, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_1 = torch.expm1(-h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n (sigma_s1 / sigma_s) * x\n - (alpha_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == 'dpmsolver':\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n - (0.5 / r1) * (alpha_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == 'taylor':\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1. / r1) * (alpha_t * (phi_1 / h + 1.)) * (model_s1 - model_s)\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_1 = torch.expm1(h)\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n x_s1 = (\n torch.exp(log_alpha_s1 - log_alpha_s) * x\n - (sigma_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n if solver_type == 'dpmsolver':\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (0.5 / r1) * (sigma_t * phi_1) * (model_s1 - model_s)\n )\n elif solver_type == 'taylor':\n x_t = (\n torch.exp(log_alpha_t - log_alpha_s) * x\n - (sigma_t * phi_1) * model_s\n - (1. / r1) * (sigma_t * (phi_1 / h - 1.)) * (model_s1 - model_s)\n )\n if return_intermediate:\n return x_t, {'model_s': model_s, 'model_s1': model_s1}\n else:\n return x_t\n\n def singlestep_dpm_solver_third_update(self, x, s, t, r1=1. / 3., r2=2. / 3., model_s=None, model_s1=None,\n return_intermediate=False, solver_type='dpmsolver'):\n \"\"\"\n Singlestep solver DPM-Solver-3 from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n r1: A `float`. The hyperparameter of the third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n model_s: A pytorch tensor. The model function evaluated at time `s`.\n If `model_s` is None, we evaluate the model by `x` and `s`; otherwise we directly use it.\n model_s1: A pytorch tensor. The model function evaluated at time `s1` (the intermediate time given by `r1`).\n If `model_s1` is None, we evaluate the model at `s1`; otherwise we directly use it.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpmsolver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(solver_type))\n if r1 is None:\n r1 = 1. / 3.\n if r2 is None:\n r2 = 2. / 3.\n ns = self.noise_schedule\n lambda_s, lambda_t = ns.marginal_lambda(s), ns.marginal_lambda(t)\n h = lambda_t - lambda_s\n lambda_s1 = lambda_s + r1 * h\n lambda_s2 = lambda_s + r2 * h\n s1 = ns.inverse_lambda(lambda_s1)\n s2 = ns.inverse_lambda(lambda_s2)\n log_alpha_s, log_alpha_s1, log_alpha_s2, log_alpha_t = ns.marginal_log_mean_coeff(\n s), ns.marginal_log_mean_coeff(s1), ns.marginal_log_mean_coeff(s2), ns.marginal_log_mean_coeff(t)\n sigma_s, sigma_s1, sigma_s2, sigma_t = ns.marginal_std(s), ns.marginal_std(s1), ns.marginal_std(\n s2), ns.marginal_std(t)\n alpha_s1, alpha_s2, alpha_t = torch.exp(log_alpha_s1), torch.exp(log_alpha_s2), torch.exp(log_alpha_t)\n\n if self.algorithm_type == \"dpmsolver++\":\n phi_11 = torch.expm1(-r1 * h)\n phi_12 = torch.expm1(-r2 * h)\n phi_1 = torch.expm1(-h)\n phi_22 = torch.expm1(-r2 * h) / (r2 * h) + 1.\n phi_2 = phi_1 / h + 1.\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (\n (sigma_s1 / sigma_s) * x\n - (alpha_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (sigma_s2 / sigma_s) * x\n - (alpha_s2 * phi_12) * model_s\n + r2 / r1 * (alpha_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == 'dpmsolver':\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (1. / r2) * (alpha_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == 'taylor':\n D1_0 = (1. / r1) * (model_s1 - model_s)\n D1_1 = (1. / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2. * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (sigma_t / sigma_s) * x\n - (alpha_t * phi_1) * model_s\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_11 = torch.expm1(r1 * h)\n phi_12 = torch.expm1(r2 * h)\n phi_1 = torch.expm1(h)\n phi_22 = torch.expm1(r2 * h) / (r2 * h) - 1.\n phi_2 = phi_1 / h - 1.\n phi_3 = phi_2 / h - 0.5\n\n if model_s is None:\n model_s = self.model_fn(x, s)\n if model_s1 is None:\n x_s1 = (\n (torch.exp(log_alpha_s1 - log_alpha_s)) * x\n - (sigma_s1 * phi_11) * model_s\n )\n model_s1 = self.model_fn(x_s1, s1)\n x_s2 = (\n (torch.exp(log_alpha_s2 - log_alpha_s)) * x\n - (sigma_s2 * phi_12) * model_s\n - r2 / r1 * (sigma_s2 * phi_22) * (model_s1 - model_s)\n )\n model_s2 = self.model_fn(x_s2, s2)\n if solver_type == 'dpmsolver':\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (1. / r2) * (sigma_t * phi_2) * (model_s2 - model_s)\n )\n elif solver_type == 'taylor':\n D1_0 = (1. / r1) * (model_s1 - model_s)\n D1_1 = (1. / r2) * (model_s2 - model_s)\n D1 = (r2 * D1_0 - r1 * D1_1) / (r2 - r1)\n D2 = 2. * (D1_1 - D1_0) / (r2 - r1)\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_s)) * x\n - (sigma_t * phi_1) * model_s\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n\n if return_intermediate:\n return x_t, {'model_s': model_s, 'model_s1': model_s1, 'model_s2': model_s2}\n else:\n return x_t\n\n def multistep_dpm_solver_second_update(self, x, model_prev_list, t_prev_list, t, solver_type=\"dpmsolver\"):\n \"\"\"\n Multistep solver DPM-Solver-2 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if solver_type not in ['dpmsolver', 'taylor']:\n raise ValueError(\"'solver_type' must be either 'dpmsolver' or 'taylor', got {}\".format(solver_type))\n ns = self.noise_schedule\n model_prev_1, model_prev_0 = model_prev_list[-2], model_prev_list[-1]\n t_prev_1, t_prev_0 = t_prev_list[-2], t_prev_list[-1]\n lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_1), ns.marginal_lambda(\n t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0 = h_0 / h\n D1_0 = (1. / r0) * (model_prev_0 - model_prev_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n if solver_type == 'dpmsolver':\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n - 0.5 * (alpha_t * phi_1) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * (phi_1 / h + 1.)) * D1_0\n )\n else:\n phi_1 = torch.expm1(h)\n if solver_type == 'dpmsolver':\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - 0.5 * (sigma_t * phi_1) * D1_0\n )\n elif solver_type == 'taylor':\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * (phi_1 / h - 1.)) * D1_0\n )\n return x_t\n\n def multistep_dpm_solver_third_update(self, x, model_prev_list, t_prev_list, t, solver_type='dpmsolver'):\n \"\"\"\n Multistep solver DPM-Solver-3 from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n ns = self.noise_schedule\n model_prev_2, model_prev_1, model_prev_0 = model_prev_list\n t_prev_2, t_prev_1, t_prev_0 = t_prev_list\n lambda_prev_2, lambda_prev_1, lambda_prev_0, lambda_t = ns.marginal_lambda(t_prev_2), ns.marginal_lambda(\n t_prev_1), ns.marginal_lambda(t_prev_0), ns.marginal_lambda(t)\n log_alpha_prev_0, log_alpha_t = ns.marginal_log_mean_coeff(t_prev_0), ns.marginal_log_mean_coeff(t)\n sigma_prev_0, sigma_t = ns.marginal_std(t_prev_0), ns.marginal_std(t)\n alpha_t = torch.exp(log_alpha_t)\n\n h_1 = lambda_prev_1 - lambda_prev_2\n h_0 = lambda_prev_0 - lambda_prev_1\n h = lambda_t - lambda_prev_0\n r0, r1 = h_0 / h, h_1 / h\n D1_0 = (1. / r0) * (model_prev_0 - model_prev_1)\n D1_1 = (1. / r1) * (model_prev_1 - model_prev_2)\n D1 = D1_0 + (r0 / (r0 + r1)) * (D1_0 - D1_1)\n D2 = (1. / (r0 + r1)) * (D1_0 - D1_1)\n if self.algorithm_type == \"dpmsolver++\":\n phi_1 = torch.expm1(-h)\n phi_2 = phi_1 / h + 1.\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (sigma_t / sigma_prev_0) * x\n - (alpha_t * phi_1) * model_prev_0\n + (alpha_t * phi_2) * D1\n - (alpha_t * phi_3) * D2\n )\n else:\n phi_1 = torch.expm1(h)\n phi_2 = phi_1 / h - 1.\n phi_3 = phi_2 / h - 0.5\n x_t = (\n (torch.exp(log_alpha_t - log_alpha_prev_0)) * x\n - (sigma_t * phi_1) * model_prev_0\n - (sigma_t * phi_2) * D1\n - (sigma_t * phi_3) * D2\n )\n return x_t\n\n def singlestep_dpm_solver_update(self, x, s, t, order, return_intermediate=False, solver_type='dpmsolver', r1=None,\n r2=None):\n \"\"\"\n Singlestep DPM-Solver with the order `order` from time `s` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n s: A pytorch tensor. The starting time, with the shape (1,).\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n return_intermediate: A `bool`. If true, also return the model value at time `s`, `s1` and `s2` (the intermediate times).\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n r1: A `float`. The hyperparameter of the second-order or third-order solver.\n r2: A `float`. The hyperparameter of the third-order solver.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(x, s, t, return_intermediate=return_intermediate)\n elif order == 2:\n return self.singlestep_dpm_solver_second_update(x, s, t, return_intermediate=return_intermediate,\n solver_type=solver_type, r1=r1)\n elif order == 3:\n return self.singlestep_dpm_solver_third_update(x, s, t, return_intermediate=return_intermediate,\n solver_type=solver_type, r1=r1, r2=r2)\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def multistep_dpm_solver_update(self, x, model_prev_list, t_prev_list, t, order, solver_type='dpmsolver'):\n \"\"\"\n Multistep DPM-Solver with the order `order` from time `t_prev_list[-1]` to time `t`.\n\n Args:\n x: A pytorch tensor. The initial value at time `s`.\n model_prev_list: A list of pytorch tensor. The previous computed model values.\n t_prev_list: A list of pytorch tensor. The previous times, each time has the shape (1,)\n t: A pytorch tensor. The ending time, with the shape (1,).\n order: A `int`. The order of DPM-Solver. We only support order == 1 or 2 or 3.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_t: A pytorch tensor. The approximated solution at time `t`.\n \"\"\"\n if order == 1:\n return self.dpm_solver_first_update(x, t_prev_list[-1], t, model_s=model_prev_list[-1])\n elif order == 2:\n return self.multistep_dpm_solver_second_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)\n elif order == 3:\n return self.multistep_dpm_solver_third_update(x, model_prev_list, t_prev_list, t, solver_type=solver_type)\n else:\n raise ValueError(\"Solver order must be 1 or 2 or 3, got {}\".format(order))\n\n def dpm_solver_adaptive(self, x, order, t_T, t_0, h_init=0.05, atol=0.0078, rtol=0.05, theta=0.9, t_err=1e-5,\n solver_type='dpmsolver'):\n \"\"\"\n The adaptive step size solver based on singlestep DPM-Solver.\n\n Args:\n x: A pytorch tensor. The initial value at time `t_T`.\n order: A `int`. The (higher) order of the solver. We only support order == 2 or 3.\n t_T: A `float`. The starting time of the sampling (default is T).\n t_0: A `float`. The ending time of the sampling (default is epsilon).\n h_init: A `float`. The initial step size (for logSNR).\n atol: A `float`. The absolute tolerance of the solver. For image data, the default setting is 0.0078, followed [1].\n rtol: A `float`. The relative tolerance of the solver. The default setting is 0.05.\n theta: A `float`. The safety hyperparameter for adapting the step size. The default setting is 0.9, followed [1].\n t_err: A `float`. The tolerance for the time. We solve the diffusion ODE until the absolute error between the\n current time and `t_0` is less than `t_err`. The default setting is 1e-5.\n solver_type: either 'dpmsolver' or 'taylor'. The type for the high-order solvers.\n The type slightly impacts the performance. We recommend to use 'dpmsolver' type.\n Returns:\n x_0: A pytorch tensor. The approximated solution at time `t_0`.\n\n [1] A. Jolicoeur-Martineau, K. Li, R. Piché-Taillefer, T. Kachman, and I. Mitliagkas, \"Gotta go fast when generating data with score-based models,\" arXiv preprint arXiv:2105.14080, 2021.\n \"\"\"\n ns = self.noise_schedule\n s = t_T * torch.ones((1,)).to(x)\n lambda_s = ns.marginal_lambda(s)\n lambda_0 = ns.marginal_lambda(t_0 * torch.ones_like(s).to(x))\n h = h_init * torch.ones_like(s).to(x)\n x_prev = x\n nfe = 0\n if order == 2:\n r1 = 0.5\n lower_update = lambda x, s, t: self.dpm_solver_first_update(x, s, t, return_intermediate=True)\n higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,\n solver_type=solver_type,\n **kwargs)\n elif order == 3:\n r1, r2 = 1. / 3., 2. / 3.\n lower_update = lambda x, s, t: self.singlestep_dpm_solver_second_update(x, s, t, r1=r1,\n return_intermediate=True,\n solver_type=solver_type)\n higher_update = lambda x, s, t, **kwargs: self.singlestep_dpm_solver_third_update(x, s, t, r1=r1, r2=r2,\n solver_type=solver_type,\n **kwargs)\n else:\n raise ValueError(\"For adaptive step size solver, order must be 2 or 3, got {}\".format(order))\n while torch.abs((s - t_0)).mean() > t_err:\n t = ns.inverse_lambda(lambda_s + h)\n x_lower, lower_noise_kwargs = lower_update(x, s, t)\n x_higher = higher_update(x, s, t, **lower_noise_kwargs)\n delta = torch.max(torch.ones_like(x).to(x) * atol, rtol * torch.max(torch.abs(x_lower), torch.abs(x_prev)))\n norm_fn = lambda v: torch.sqrt(torch.square(v.reshape((v.shape[0], -1))).mean(dim=-1, keepdim=True))\n E = norm_fn((x_higher - x_lower) / delta).max()\n if torch.all(E <= 1.):\n x = x_higher\n s = t\n x_prev = x_lower\n lambda_s = ns.marginal_lambda(s)\n h = torch.min(theta * h * torch.float_power(E, -1. / order).float(), lambda_0 - lambda_s)\n nfe += order\n print('adaptive solver nfe', nfe)\n return x\n\n def add_noise(self, x, t, noise=None):\n \"\"\"\n Compute the noised input xt = alpha_t * x + sigma_t * noise.\n\n Args:\n x: A `torch.Tensor` with shape `(batch_size, *shape)`.\n t: A `torch.Tensor` with shape `(t_size,)`.\n Returns:\n xt with shape `(t_size, batch_size, *shape)`.\n \"\"\"\n alpha_t, sigma_t = self.noise_schedule.marginal_alpha(t), self.noise_schedule.marginal_std(t)\n if noise is None:\n noise = torch.randn((t.shape[0], *x.shape), device=x.device)\n x = x.reshape((-1, *x.shape))\n xt = expand_dims(alpha_t, x.dim()) * x + expand_dims(sigma_t, x.dim()) * noise\n if t.shape[0] == 1:\n return xt.squeeze(0)\n else:\n return xt\n\n def inverse(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform',\n method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver',\n atol=0.0078, rtol=0.05, return_intermediate=False,\n ):\n \"\"\"\n Inverse the sample `x` from time `t_start` to `t_end` by DPM-Solver.\n For discrete-time DPMs, we use `t_start=1/N`, where `N` is the total time steps during training.\n \"\"\"\n t_0 = 1. / self.noise_schedule.total_N if t_start is None else t_start\n t_T = self.noise_schedule.T if t_end is None else t_end\n assert t_0 > 0 and t_T > 0, \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n return self.sample(x, steps=steps, t_start=t_0, t_end=t_T, order=order, skip_type=skip_type,\n method=method, lower_order_final=lower_order_final, denoise_to_zero=denoise_to_zero,\n solver_type=solver_type,\n atol=atol, rtol=rtol, return_intermediate=return_intermediate)\n\n def sample(self, x, steps=20, t_start=None, t_end=None, order=2, skip_type='time_uniform',\n method='multistep', lower_order_final=True, denoise_to_zero=False, solver_type='dpmsolver',\n atol=0.0078, rtol=0.05, return_intermediate=False, latent_scale_factor=1.0, pbar=None, previewer=None,\n ):\n \"\"\"\n Compute the sample at time `t_end` by DPM-Solver, given the initial `x` at time `t_start`.\n\n =====================================================\n\n We support the following algorithms for both noise prediction model and data prediction model:\n - 'singlestep':\n Singlestep DPM-Solver (i.e. \"DPM-Solver-fast\" in the paper), which combines different orders of singlestep DPM-Solver.\n We combine all the singlestep solvers with order <= `order` to use up all the function evaluations (steps).\n The total number of function evaluations (NFE) == `steps`.\n Given a fixed NFE == `steps`, the sampling procedure is:\n - If `order` == 1:\n - Denote K = steps. We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - Denote K = (steps // 2) + (steps % 2). We take K intermediate time steps for sampling.\n - If steps % 2 == 0, we use K steps of singlestep DPM-Solver-2.\n - If steps % 2 == 1, we use (K - 1) steps of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If `order` == 3:\n - Denote K = (steps // 3 + 1). We take K intermediate time steps for sampling.\n - If steps % 3 == 0, we use (K - 2) steps of singlestep DPM-Solver-3, and 1 step of singlestep DPM-Solver-2 and 1 step of DPM-Solver-1.\n - If steps % 3 == 1, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of DPM-Solver-1.\n - If steps % 3 == 2, we use (K - 1) steps of singlestep DPM-Solver-3 and 1 step of singlestep DPM-Solver-2.\n - 'multistep':\n Multistep DPM-Solver with the order of `order`. The total number of function evaluations (NFE) == `steps`.\n We initialize the first `order` values by lower order multistep solvers.\n Given a fixed NFE == `steps`, the sampling procedure is:\n Denote K = steps.\n - If `order` == 1:\n - We use K steps of DPM-Solver-1 (i.e. DDIM).\n - If `order` == 2:\n - We firstly use 1 step of DPM-Solver-1, then use (K - 1) step of multistep DPM-Solver-2.\n - If `order` == 3:\n - We firstly use 1 step of DPM-Solver-1, then 1 step of multistep DPM-Solver-2, then (K - 2) step of multistep DPM-Solver-3.\n - 'singlestep_fixed':\n Fixed order singlestep DPM-Solver (i.e. DPM-Solver-1 or singlestep DPM-Solver-2 or singlestep DPM-Solver-3).\n We use singlestep DPM-Solver-`order` for `order`=1 or 2 or 3, with total [`steps` // `order`] * `order` NFE.\n - 'adaptive':\n Adaptive step size DPM-Solver (i.e. \"DPM-Solver-12\" and \"DPM-Solver-23\" in the paper).\n We ignore `steps` and use adaptive step size DPM-Solver with a higher order of `order`.\n You can adjust the absolute tolerance `atol` and the relative tolerance `rtol` to balance the computatation costs\n (NFE) and the sample quality.\n - If `order` == 2, we use DPM-Solver-12 which combines DPM-Solver-1 and singlestep DPM-Solver-2.\n - If `order` == 3, we use DPM-Solver-23 which combines singlestep DPM-Solver-2 and singlestep DPM-Solver-3.\n\n =====================================================\n\n Some advices for choosing the algorithm:\n - For **unconditional sampling** or **guided sampling with small guidance scale** by DPMs:\n Use singlestep DPM-Solver or DPM-Solver++ (\"DPM-Solver-fast\" in the paper) with `order = 3`.\n e.g., DPM-Solver:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n e.g., DPM-Solver++:\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=3,\n skip_type='time_uniform', method='singlestep')\n - For **guided sampling with large guidance scale** by DPMs:\n Use multistep DPM-Solver with `algorithm_type=\"dpmsolver++\"` and `order = 2`.\n e.g.\n >>> dpm_solver = DPM_Solver(model_fn, noise_schedule, algorithm_type=\"dpmsolver++\")\n >>> x_sample = dpm_solver.sample(x, steps=steps, t_start=t_start, t_end=t_end, order=2,\n skip_type='time_uniform', method='multistep')\n\n We support three types of `skip_type`:\n - 'logSNR': uniform logSNR for the time steps. **Recommended for low-resolutional images**\n - 'time_uniform': uniform time for the time steps. **Recommended for high-resolutional images**.\n - 'time_quadratic': quadratic time for the time steps.\n\n =====================================================\n Args:\n x: A pytorch tensor. The initial value at time `t_start`\n e.g. if `t_start` == T, then `x` is a sample from the standard normal distribution.\n steps: A `int`. The total number of function evaluations (NFE).\n t_start: A `float`. The starting time of the sampling.\n If `T` is None, we use self.noise_schedule.T (default is 1.0).\n t_end: A `float`. The ending time of the sampling.\n If `t_end` is None, we use 1. / self.noise_schedule.total_N.\n e.g. if total_N == 1000, we have `t_end` == 1e-3.\n For discrete-time DPMs:\n - We recommend `t_end` == 1. / self.noise_schedule.total_N.\n For continuous-time DPMs:\n - We recommend `t_end` == 1e-3 when `steps` <= 15; and `t_end` == 1e-4 when `steps` > 15.\n order: A `int`. The order of DPM-Solver.\n skip_type: A `str`. The type for the spacing of the time steps. 'time_uniform' or 'logSNR' or 'time_quadratic'.\n method: A `str`. The method for sampling. 'singlestep' or 'multistep' or 'singlestep_fixed' or 'adaptive'.\n denoise_to_zero: A `bool`. Whether to denoise to time 0 at the final step.\n Default is `False`. If `denoise_to_zero` is `True`, the total NFE is (`steps` + 1).\n\n This trick is firstly proposed by DDPM (https://arxiv.org/abs/2006.11239) and\n score_sde (https://arxiv.org/abs/2011.13456). Such trick can improve the FID\n for diffusion models sampling by diffusion SDEs for low-resolutional images\n (such as CIFAR-10). However, we observed that such trick does not matter for\n high-resolutional images. As it needs an additional NFE, we do not recommend\n it for high-resolutional images.\n lower_order_final: A `bool`. Whether to use lower order solvers at the final steps.\n Only valid for `method=multistep` and `steps < 15`. We empirically find that\n this trick is a key to stabilizing the sampling by DPM-Solver with very few steps\n (especially for steps <= 10). So we recommend to set it to be `True`.\n solver_type: A `str`. The taylor expansion type for the solver. `dpmsolver` or `taylor`. We recommend `dpmsolver`.\n atol: A `float`. The absolute tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n rtol: A `float`. The relative tolerance of the adaptive step size solver. Valid when `method` == 'adaptive'.\n return_intermediate: A `bool`. Whether to save the xt at each step.\n When set to `True`, method returns a tuple (x0, intermediates); when set to False, method returns only x0.\n Returns:\n x_end: A pytorch tensor. The approximated solution at time `t_end`.\n\n \"\"\"\n t_0 = 1. / self.noise_schedule.total_N if t_end is None else t_end\n t_T = self.noise_schedule.T if t_start is None else t_start\n assert t_0 > 0 and t_T > 0, \"Time range needs to be greater than 0. For discrete-time DPMs, it needs to be in [1 / N, 1], where N is the length of betas array\"\n if return_intermediate:\n assert method in ['multistep', 'singlestep',\n 'singlestep_fixed'], \"Cannot use adaptive solver when saving intermediate values\"\n if self.correcting_xt_fn is not None:\n assert method in ['multistep', 'singlestep',\n 'singlestep_fixed'], \"Cannot use adaptive solver when correcting_xt_fn is not None\"\n device = x.device\n intermediates = []\n with torch.no_grad():\n if method == 'adaptive':\n x = self.dpm_solver_adaptive(x, order=order, t_T=t_T, t_0=t_0, atol=atol, rtol=rtol,\n solver_type=solver_type)\n elif method == 'multistep':\n assert steps >= order\n timesteps = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=steps, device=device)\n assert timesteps.shape[0] - 1 == steps\n # Init the initial values.\n step = 0\n t = timesteps[step]\n t_prev_list = [t]\n model_prev_list = [self.model_fn(x, t)]\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n # Init the first `order` values by lower order multistep DPM-Solver.\n for step in range(1, order):\n t = timesteps[step]\n x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step,\n solver_type=solver_type)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n t_prev_list.append(t)\n model_prev_list.append(self.model_fn(x, t))\n # Compute the remaining values by `order`-th order multistep DPM-Solver.\n for step in tqdm(range(order, steps + 1)):\n t = timesteps[step]\n # We only use lower order for steps < 10\n if lower_order_final and steps < 10:\n step_order = min(order, steps + 1 - step)\n else:\n step_order = order\n x = self.multistep_dpm_solver_update(x, model_prev_list, t_prev_list, t, step_order,\n solver_type=solver_type)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n for i in range(order - 1):\n t_prev_list[i] = t_prev_list[i + 1]\n model_prev_list[i] = model_prev_list[i + 1]\n t_prev_list[-1] = t\n # We do not need to evaluate the final model value.\n if step < steps:\n model_prev_list[-1] = self.model_fn(x, t)\n # comfyui preview\n if pbar:\n preview_bytes = None\n if previewer:\n preview_bytes = previewer.decode_latent_to_preview_image(\"JPEG\", x)\n pbar.update_absolute(step, steps, preview_bytes)\n\n elif method in ['singlestep', 'singlestep_fixed']:\n if method == 'singlestep':\n timesteps_outer, orders = self.get_orders_and_timesteps_for_singlestep_solver(steps=steps,\n order=order,\n skip_type=skip_type,\n t_T=t_T, t_0=t_0,\n device=device)\n elif method == 'singlestep_fixed':\n K = steps // order\n orders = [order, ] * K\n timesteps_outer = self.get_time_steps(skip_type=skip_type, t_T=t_T, t_0=t_0, N=K, device=device)\n for step, order in enumerate(orders):\n s, t = timesteps_outer[step], timesteps_outer[step + 1]\n timesteps_inner = self.get_time_steps(skip_type=skip_type, t_T=s.item(), t_0=t.item(), N=order,\n device=device)\n lambda_inner = self.noise_schedule.marginal_lambda(timesteps_inner)\n h = lambda_inner[-1] - lambda_inner[0]\n r1 = None if order <= 1 else (lambda_inner[1] - lambda_inner[0]) / h\n r2 = None if order <= 2 else (lambda_inner[2] - lambda_inner[0]) / h\n x = self.singlestep_dpm_solver_update(x, s, t, order, solver_type=solver_type, r1=r1, r2=r2)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step)\n if return_intermediate:\n intermediates.append(x)\n else:\n raise ValueError(\"Got wrong method {}\".format(method))\n if denoise_to_zero:\n t = torch.ones((1,)).to(device) * t_0\n x = self.denoise_to_zero_fn(x, t)\n if self.correcting_xt_fn is not None:\n x = self.correcting_xt_fn(x, t, step + 1)\n if return_intermediate:\n intermediates.append(x)\n\n if return_intermediate:\n return x, intermediates\n else:\n return x" }, { "identifier": "NoiseScheduleVP", "path": "PixArt/sampling/dpm_solver.py", "snippet": "class NoiseScheduleVP:\n def __init__(\n self,\n schedule='discrete',\n betas=None,\n alphas_cumprod=None,\n continuous_beta_0=0.1,\n continuous_beta_1=20.,\n dtype=torch.float32,\n ):\n \"\"\"Create a wrapper class for the forward SDE (VP type).\n\n ***\n Update: We support discrete-time diffusion models by implementing a picewise linear interpolation for log_alpha_t.\n We recommend to use schedule='discrete' for the discrete-time diffusion models, especially for high-resolution images.\n ***\n\n The forward SDE ensures that the condition distribution q_{t|0}(x_t | x_0) = N ( alpha_t * x_0, sigma_t^2 * I ).\n We further define lambda_t = log(alpha_t) - log(sigma_t), which is the half-logSNR (described in the DPM-Solver paper).\n Therefore, we implement the functions for computing alpha_t, sigma_t and lambda_t. For t in [0, T], we have:\n\n log_alpha_t = self.marginal_log_mean_coeff(t)\n sigma_t = self.marginal_std(t)\n lambda_t = self.marginal_lambda(t)\n\n Moreover, as lambda(t) is an invertible function, we also support its inverse function:\n\n t = self.inverse_lambda(lambda_t)\n\n ===============================================================\n\n We support both discrete-time DPMs (trained on n = 0, 1, ..., N-1) and continuous-time DPMs (trained on t in [t_0, T]).\n\n 1. For discrete-time DPMs:\n\n For discrete-time DPMs trained on n = 0, 1, ..., N-1, we convert the discrete steps to continuous time steps by:\n t_i = (i + 1) / N\n e.g. for N = 1000, we have t_0 = 1e-3 and T = t_{N-1} = 1.\n We solve the corresponding diffusion ODE from time T = 1 to time t_0 = 1e-3.\n\n Args:\n betas: A `torch.Tensor`. The beta array for the discrete-time DPM. (See the original DDPM paper for details)\n alphas_cumprod: A `torch.Tensor`. The cumprod alphas for the discrete-time DPM. (See the original DDPM paper for details)\n\n Note that we always have alphas_cumprod = cumprod(1 - betas). Therefore, we only need to set one of `betas` and `alphas_cumprod`.\n\n **Important**: Please pay special attention for the args for `alphas_cumprod`:\n The `alphas_cumprod` is the \\hat{alpha_n} arrays in the notations of DDPM. Specifically, DDPMs assume that\n q_{t_n | 0}(x_{t_n} | x_0) = N ( \\sqrt{\\hat{alpha_n}} * x_0, (1 - \\hat{alpha_n}) * I ).\n Therefore, the notation \\hat{alpha_n} is different from the notation alpha_t in DPM-Solver. In fact, we have\n alpha_{t_n} = \\sqrt{\\hat{alpha_n}},\n and\n log(alpha_{t_n}) = 0.5 * log(\\hat{alpha_n}).\n\n\n 2. For continuous-time DPMs:\n\n We support the linear VPSDE for the continuous time setting. The hyperparameters for the noise\n schedule are the default settings in Yang Song's ScoreSDE:\n\n Args:\n beta_min: A `float` number. The smallest beta for the linear schedule.\n beta_max: A `float` number. The largest beta for the linear schedule.\n T: A `float` number. The ending time of the forward process.\n\n ===============================================================\n\n Args:\n schedule: A `str`. The noise schedule of the forward SDE. 'discrete' for discrete-time DPMs,\n 'linear' for continuous-time DPMs.\n Returns:\n A wrapper object of the forward SDE (VP type).\n\n ===============================================================\n\n Example:\n\n # For discrete-time DPMs, given betas (the beta array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', betas=betas)\n\n # For discrete-time DPMs, given alphas_cumprod (the \\hat{alpha_n} array for n = 0, 1, ..., N - 1):\n >>> ns = NoiseScheduleVP('discrete', alphas_cumprod=alphas_cumprod)\n\n # For continuous-time DPMs (VPSDE), linear schedule:\n >>> ns = NoiseScheduleVP('linear', continuous_beta_0=0.1, continuous_beta_1=20.)\n\n \"\"\"\n\n if schedule not in ['discrete', 'linear']:\n raise ValueError(\n \"Unsupported noise schedule {}. The schedule needs to be 'discrete' or 'linear'\".format(schedule))\n\n self.schedule = schedule\n if schedule == 'discrete':\n if betas is not None:\n log_alphas = 0.5 * torch.log(1 - betas).cumsum(dim=0)\n else:\n assert alphas_cumprod is not None\n log_alphas = 0.5 * torch.log(alphas_cumprod)\n self.T = 1.\n self.log_alpha_array = self.numerical_clip_alpha(log_alphas).reshape((1, -1,)).to(dtype=dtype)\n self.total_N = self.log_alpha_array.shape[1]\n self.t_array = torch.linspace(0., 1., self.total_N + 1)[1:].reshape((1, -1)).to(dtype=dtype)\n else:\n self.T = 1.\n self.total_N = 1000\n self.beta_0 = continuous_beta_0\n self.beta_1 = continuous_beta_1\n\n def numerical_clip_alpha(self, log_alphas, clipped_lambda=-5.1):\n \"\"\"\n For some beta schedules such as cosine schedule, the log-SNR has numerical isssues.\n We clip the log-SNR near t=T within -5.1 to ensure the stability.\n Such a trick is very useful for diffusion models with the cosine schedule, such as i-DDPM, guided-diffusion and GLIDE.\n \"\"\"\n log_sigmas = 0.5 * torch.log(1. - torch.exp(2. * log_alphas))\n lambs = log_alphas - log_sigmas\n idx = torch.searchsorted(torch.flip(lambs, [0]), clipped_lambda)\n if idx > 0:\n log_alphas = log_alphas[:-idx]\n return log_alphas\n\n def marginal_log_mean_coeff(self, t):\n \"\"\"\n Compute log(alpha_t) of a given continuous-time label t in [0, T].\n \"\"\"\n if self.schedule == 'discrete':\n return interpolate_fn(t.reshape((-1, 1)), self.t_array.to(t.device),\n self.log_alpha_array.to(t.device)).reshape((-1))\n elif self.schedule == 'linear':\n return -0.25 * t ** 2 * (self.beta_1 - self.beta_0) - 0.5 * t * self.beta_0\n\n def marginal_alpha(self, t):\n \"\"\"\n Compute alpha_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.exp(self.marginal_log_mean_coeff(t))\n\n def marginal_std(self, t):\n \"\"\"\n Compute sigma_t of a given continuous-time label t in [0, T].\n \"\"\"\n return torch.sqrt(1. - torch.exp(2. * self.marginal_log_mean_coeff(t)))\n\n def marginal_lambda(self, t):\n \"\"\"\n Compute lambda_t = log(alpha_t) - log(sigma_t) of a given continuous-time label t in [0, T].\n \"\"\"\n log_mean_coeff = self.marginal_log_mean_coeff(t)\n log_std = 0.5 * torch.log(1. - torch.exp(2. * log_mean_coeff))\n return log_mean_coeff - log_std\n\n def inverse_lambda(self, lamb):\n \"\"\"\n Compute the continuous-time label t in [0, T] of a given half-logSNR lambda_t.\n \"\"\"\n if self.schedule == 'linear':\n tmp = 2. * (self.beta_1 - self.beta_0) * torch.logaddexp(-2. * lamb, torch.zeros((1,)).to(lamb))\n Delta = self.beta_0 ** 2 + tmp\n return tmp / (torch.sqrt(Delta) + self.beta_0) / (self.beta_1 - self.beta_0)\n elif self.schedule == 'discrete':\n log_alpha = -0.5 * torch.logaddexp(torch.zeros((1,)).to(lamb.device), -2. * lamb)\n t = interpolate_fn(log_alpha.reshape((-1, 1)), torch.flip(self.log_alpha_array.to(lamb.device), [1]),\n torch.flip(self.t_array.to(lamb.device), [1]))\n return t.reshape((-1,))" } ]
import torch import comfy.utils import latent_preview from .sampling import gaussian_diffusion as gd from .sampling.dpm_solver import model_wrapper, DPM_Solver, NoiseScheduleVP from comfy.sample import prepare_sampling, prepare_noise, cleanup_additional_models, get_models_from_cond
20,107
def sample_pixart(model, seed, steps, cfg, noise_schedule, noise_schedule_vp, positive, negative, latent_image): """ Mostly just a wrapper around the reference code. """ # prepare model noise = prepare_noise(latent_image, seed) real_model, _, _, _, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask=None) # negative cond cond = positive[0][0] raw_uncond = negative[0][0] # Sampler seems to want the same dim for cond and uncond # truncate uncond to the length of cond # if shorter, pad uncond with y_null null_y = real_model.diffusion_model.y_embedder.y_embedding[None].repeat(latent_image.shape[0], 1, 1) uncond = null_y[:, :cond.shape[1], :] uncond[:, :raw_uncond.shape[1], :] = raw_uncond[:, :cond.shape[1], :] if raw_uncond.shape[1] > cond.shape[1]: print("PixArt: Warning. Your negative prompt is too long.") uncond[:, -1, :] = raw_uncond[:, -1, :] # add back EOS token # Move inputs cond = cond.to(model.load_device).to(real_model.diffusion_model.dtype) uncond = uncond.to(model.load_device).to(real_model.diffusion_model.dtype) noise = noise.to(model.load_device).to(real_model.diffusion_model.dtype) # preview pbar = comfy.utils.ProgressBar(steps) previewer = latent_preview.get_previewer(model.load_device, model.model.latent_format) ## Noise schedule. betas = torch.tensor(gd.get_named_beta_schedule(noise_schedule, 1000)) noise_schedule = NoiseScheduleVP(schedule=noise_schedule_vp, betas=betas) ## Convert your discrete-time `model` to the continuous-time ## noise prediction model. Here is an example for a diffusion model ## `model` with the noise prediction type ("noise") .
def sample_pixart(model, seed, steps, cfg, noise_schedule, noise_schedule_vp, positive, negative, latent_image): """ Mostly just a wrapper around the reference code. """ # prepare model noise = prepare_noise(latent_image, seed) real_model, _, _, _, models = prepare_sampling(model, noise.shape, positive, negative, noise_mask=None) # negative cond cond = positive[0][0] raw_uncond = negative[0][0] # Sampler seems to want the same dim for cond and uncond # truncate uncond to the length of cond # if shorter, pad uncond with y_null null_y = real_model.diffusion_model.y_embedder.y_embedding[None].repeat(latent_image.shape[0], 1, 1) uncond = null_y[:, :cond.shape[1], :] uncond[:, :raw_uncond.shape[1], :] = raw_uncond[:, :cond.shape[1], :] if raw_uncond.shape[1] > cond.shape[1]: print("PixArt: Warning. Your negative prompt is too long.") uncond[:, -1, :] = raw_uncond[:, -1, :] # add back EOS token # Move inputs cond = cond.to(model.load_device).to(real_model.diffusion_model.dtype) uncond = uncond.to(model.load_device).to(real_model.diffusion_model.dtype) noise = noise.to(model.load_device).to(real_model.diffusion_model.dtype) # preview pbar = comfy.utils.ProgressBar(steps) previewer = latent_preview.get_previewer(model.load_device, model.model.latent_format) ## Noise schedule. betas = torch.tensor(gd.get_named_beta_schedule(noise_schedule, 1000)) noise_schedule = NoiseScheduleVP(schedule=noise_schedule_vp, betas=betas) ## Convert your discrete-time `model` to the continuous-time ## noise prediction model. Here is an example for a diffusion model ## `model` with the noise prediction type ("noise") .
model_fn = model_wrapper(
1
2023-10-20 21:19:44+00:00
24k
amitfin/oref_alert
custom_components/oref_alert/config_flow.py
[ { "identifier": "CONF_AREAS", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_AREAS: Final = \"areas\"" }, { "identifier": "CONF_ALERT_MAX_AGE", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_ALERT_MAX_AGE: Final = \"alert_max_age\"" }, { "identifier": "CONF_OFF_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_OFF_ICON: Final = \"off_icon\"" }, { "identifier": "CONF_ON_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_ON_ICON: Final = \"on_icon\"" }, { "identifier": "CONF_POLL_INTERVAL", "path": "custom_components/oref_alert/const.py", "snippet": "CONF_POLL_INTERVAL: Final = \"poll_interval\"" }, { "identifier": "DEFAULT_ALERT_MAX_AGE", "path": "custom_components/oref_alert/const.py", "snippet": "DEFAULT_ALERT_MAX_AGE: Final = 10" }, { "identifier": "DOMAIN", "path": "custom_components/oref_alert/const.py", "snippet": "DOMAIN: Final = \"oref_alert\"" }, { "identifier": "DEFAULT_OFF_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "DEFAULT_OFF_ICON: Final = \"mdi:home-outline\"" }, { "identifier": "DEFAULT_ON_ICON", "path": "custom_components/oref_alert/const.py", "snippet": "DEFAULT_ON_ICON: Final = \"mdi:home-alert-outline\"" }, { "identifier": "DEFAULT_POLL_INTERVAL", "path": "custom_components/oref_alert/const.py", "snippet": "DEFAULT_POLL_INTERVAL: Final = 2" }, { "identifier": "TITLE", "path": "custom_components/oref_alert/const.py", "snippet": "TITLE: Final = \"Oref Alert\"" }, { "identifier": "find_area", "path": "custom_components/oref_alert/metadata/area_to_polygon.py", "snippet": "def find_area(lat: float, long: float) -> str | None:\n \"\"\"Find an area using lat/long.\"\"\"\n point = Point(lat, long)\n for area, polygon in _load_area_to_polygon().items():\n if Polygon(polygon).contains(point):\n return area\n return None" }, { "identifier": "AREAS_AND_GROUPS", "path": "custom_components/oref_alert/metadata/areas_and_groups.py", "snippet": "AREAS_AND_GROUPS = [\n \"אבו סנאן\",\n \"אבו קרינאת\",\n \"אבו תלול\",\n \"אבו-גוש\",\n \"אבטליון\",\n \"אביאל\",\n \"אביבים\",\n \"אביגדור\",\n \"אביחיל\",\n \"אביעזר\",\n \"אבירים\",\n \"אבן יהודה\",\n \"אבן מנחם\",\n \"אבן ספיר\",\n \"אבן שמואל\",\n \"אבני איתן\",\n \"אבני חפץ\",\n \"אבנת\",\n \"אבשלום\",\n \"אדורה\",\n \"אדוריים\",\n \"אדמית\",\n \"אדרת\",\n \"אודים\",\n \"אודם\",\n \"אום אל פחם\",\n \"אום אל קוטוף\",\n \"אום אל-גנם\",\n \"אום בטין\",\n \"אופקים\",\n \"אור הגנוז\",\n \"אור הנר\",\n \"אור יהודה\",\n \"אור עקיבא\",\n \"אורה\",\n \"אורון תעשייה ומסחר\",\n \"אורות\",\n \"אורטל\",\n \"אורים\",\n \"אורנים\",\n \"אורנית\",\n \"אושה\",\n \"אזור\",\n \"אזור תעשייה אכזיב מילואות\",\n \"אזור תעשייה אלון התבור\",\n \"אזור תעשייה אפק ולב הארץ\",\n \"אזור תעשייה באר טוביה\",\n \"אזור תעשייה בני יהודה\",\n \"אזור תעשייה בר-לב\",\n \"אזור תעשייה בראון\",\n \"אזור תעשייה ברוש\",\n \"אזור תעשייה דימונה\",\n \"אזור תעשייה הדרומי אשקלון\",\n \"אזור תעשייה הר טוב - צרעה\",\n \"אזור תעשייה חבל מודיעין\",\n \"אזור תעשייה חצור הגלילית\",\n \"אזור תעשייה טירה\",\n \"אזור תעשייה יקנעם עילית\",\n \"אזור תעשייה כנות\",\n \"אזור תעשייה כרמיאל\",\n \"אזור תעשייה מבוא כרמל\",\n \"אזור תעשייה מבואות הגלבוע\",\n \"אזור תעשייה מישור אדומים\",\n \"אזור תעשייה מיתרים\",\n \"אזור תעשייה נ.ע.מ\",\n \"אזור תעשייה ניר עציון\",\n \"אזור תעשייה נשר - רמלה\",\n \"אזור תעשייה עד הלום\",\n \"אזור תעשייה עידן הנגב\",\n \"אזור תעשייה עמק חפר\",\n \"אזור תעשייה צ.ח.ר\",\n \"אזור תעשייה צבאים\",\n \"אזור תעשייה ציפורית\",\n \"אזור תעשייה צמח\",\n \"אזור תעשייה צפוני אשקלון\",\n \"אזור תעשייה קדמת גליל\",\n \"אזור תעשייה קיסריה\",\n \"אזור תעשייה קריית גת\",\n \"אזור תעשייה רגבים\",\n \"אזור תעשייה רותם\",\n \"אזור תעשייה רמת דלתון\",\n \"אזור תעשייה שחורת\",\n \"אזור תעשייה שער בנימין\",\n \"אזור תעשייה שער נעמן\",\n \"אזור תעשייה תימורים\",\n \"אזור תעשייה תרדיון\",\n \"אחווה\",\n \"אחוזם\",\n \"אחוזת ברק\",\n \"אחיה\",\n \"אחיהוד\",\n \"אחיטוב\",\n \"אחיסמך\",\n \"אחיעזר\",\n \"איבטין\",\n \"אייל\",\n \"איילת השחר\",\n \"אילון\",\n \"אילות\",\n \"אילניה\",\n \"אילת\",\n \"אירוס\",\n \"איתמר\",\n \"איתן\",\n \"אכסאל\",\n \"אל סייד\",\n \"אל עזי\",\n \"אל עמארני, אל מסק\",\n \"אל עריאן\",\n \"אל פורעה\",\n \"אל רום\",\n \"אל-ח'וואלד מערב\",\n \"אלומה\",\n \"אלומות\",\n \"אלון\",\n \"אלון הגליל\",\n \"אלון מורה\",\n \"אלון שבות\",\n \"אלוני אבא\",\n \"אלוני הבשן\",\n \"אלוני יצחק\",\n \"אלונים\",\n \"אלי עד\",\n \"אליאב\",\n \"אליכין\",\n \"אליפז ומכרות תמנע\",\n \"אליפלט\",\n \"אליקים\",\n \"אלישיב\",\n \"אלישמע\",\n \"אלמגור\",\n \"אלמוג\",\n \"אלעד\",\n \"אלעזר\",\n \"אלפי מנשה\",\n \"אלקוש\",\n \"אלקנה\",\n \"אמונים\",\n \"אמירים\",\n \"אמנון\",\n \"אמץ\",\n \"אמציה\",\n \"אניעם\",\n \"אעבלין\",\n \"אפיק\",\n \"אפיקים\",\n \"אפק\",\n \"אפרת\",\n \"ארבל\",\n \"ארגמן\",\n \"ארז\",\n \"אריאל\",\n \"ארסוף\",\n \"אשבול\",\n \"אשבל\",\n \"אשדוד - א,ב,ד,ה\",\n \"אשדוד - איזור תעשייה צפוני\",\n \"אשדוד - ג,ו,ז\",\n \"אשדוד - ח,ט,י,יג,יד,טז\",\n \"אשדוד - כל האזורים\",\n \"אשדוד -יא,יב,טו,יז,מרינה,סיט\",\n \"אשדות יעקב איחוד\",\n \"אשדות יעקב מאוחד\",\n \"אשחר\",\n \"אשכולות\",\n \"אשל הנשיא\",\n \"אשלים\",\n \"אשקלון - דרום\",\n \"אשקלון - כל האזורים\",\n \"אשקלון - צפון\",\n \"אשרת\",\n \"אשתאול\",\n \"אתר דודאים\",\n \"אתר ההנצחה גולני\",\n \"באקה אל גרבייה\",\n \"באר אורה\",\n \"באר גנים\",\n \"באר טוביה\",\n \"באר יעקב\",\n \"באר מילכה\",\n \"באר שבע - דרום\",\n \"באר שבע - כל האזורים\",\n \"באר שבע - מזרח\",\n \"באר שבע - מערב\",\n \"באר שבע - צפון\",\n \"בארות יצחק\",\n \"בארותיים\",\n \"בארי\",\n \"בוסתן הגליל\",\n \"בועיינה-נוג'ידאת\",\n \"בוקעתא\",\n \"בורגתה\",\n \"בחן\",\n \"בטחה\",\n \"ביצרון\",\n \"ביר אלמכסור\",\n \"ביר הדאג'\",\n \"ביריה\",\n \"בית אורן\",\n \"בית אל\",\n \"בית אלעזרי\",\n \"בית אלפא וחפציבה\",\n \"בית אריה\",\n \"בית ברל\",\n \"בית ג'אן\",\n \"בית גוברין\",\n \"בית גמליאל\",\n \"בית דגן\",\n \"בית הגדי\",\n \"בית הלוי\",\n \"בית הלל\",\n \"בית העמק\",\n \"בית הערבה\",\n \"בית השיטה\",\n \"בית זית\",\n \"בית זרע\",\n \"בית חגי\",\n \"בית חורון\",\n \"בית חזון\",\n \"בית חלקיה\",\n \"בית חנן\",\n \"בית חנניה\",\n \"בית חרות\",\n \"בית חשמונאי\",\n \"בית יהושע\",\n \"בית יוסף\",\n \"בית ינאי\",\n \"בית יצחק - שער חפר\",\n \"בית ירח\",\n \"בית יתיר\",\n \"בית לחם הגלילית\",\n \"בית מאיר\",\n \"בית נחמיה\",\n \"בית ניר\",\n \"בית נקופה\",\n \"בית סוהר השרון\",\n \"בית סוהר מגידו\",\n \"בית סוהר נפחא\",\n \"בית סוהר צלמון\",\n \"בית סוהר קישון\",\n \"בית סוהר שיטה וגלבוע\",\n \"בית ספר אורט בנימינה\",\n \"בית ספר שדה מירון\",\n \"בית עובד\",\n \"בית עוזיאל\",\n \"בית עזרא\",\n \"בית עלמין תל רגב\",\n \"בית עריף\",\n \"בית צבי\",\n \"בית קמה\",\n \"בית קשת\",\n \"בית רימון\",\n \"בית שאן\",\n \"בית שמש\",\n \"בית שערים\",\n \"בית שקמה\",\n \"ביתן אהרן\",\n \"ביתר עילית\",\n \"בלפוריה\",\n \"בן זכאי\",\n \"בן עמי\",\n \"בן שמן\",\n \"בני ברק\",\n \"בני דקלים\",\n \"בני דרום\",\n \"בני דרור\",\n \"בני יהודה וגבעת יואב\",\n \"בני נצרים\",\n \"בני עטרות\",\n \"בני עי''ש\",\n \"בני ציון\",\n \"בני ראם\",\n \"בניה\",\n \"בנימינה\",\n 'בסמ\"ה',\n \"בסמת טבעון\",\n \"בענה\",\n \"בצרה\",\n \"בצת\",\n \"בקוע\",\n \"בקעות\",\n \"בר גיורא\",\n \"בר יוחאי\",\n \"ברוכין\",\n \"ברור חיל\",\n \"ברוש\",\n \"ברטעה\",\n \"ברכיה\",\n \"ברעם\",\n \"ברקאי\",\n \"ברקן\",\n \"ברקת\",\n \"בת הדר\",\n \"בת חן\",\n \"בת חפר\",\n \"בת עין\",\n \"בת שלמה\",\n \"בת-ים\",\n \"בתי מלון ים המלח\",\n \"ג'דידה מכר\",\n \"ג'וליס\",\n \"ג'לג'וליה\",\n \"ג'סר א-זרקא\",\n \"ג'ש - גוש חלב\",\n \"ג'ת\",\n \"גאולי תימן\",\n \"גאולים\",\n \"גאליה\",\n \"גבולות\",\n \"גבים, מכללת ספיר\",\n \"גבע בנימין\",\n \"גבע כרמל\",\n \"גבעון החדשה\",\n \"גבעות\",\n \"גבעות בר\",\n \"גבעות גורל\",\n \"גבעות עדן\",\n \"גבעת אבני\",\n \"גבעת אלה\",\n \"גבעת אסף\",\n \"גבעת ברנר\",\n \"גבעת הראל וגבעת הרואה\",\n \"גבעת השלושה\",\n \"גבעת וולפסון\",\n \"גבעת וושינגטון\",\n \"גבעת זאב\",\n \"גבעת חביבה\",\n \"גבעת חיים איחוד\",\n \"גבעת חיים מאוחד\",\n \"גבעת חן\",\n \"גבעת יערים\",\n \"גבעת ישעיהו\",\n \"גבעת כ''ח\",\n \"גבעת ניל''י\",\n \"גבעת עדה\",\n \"גבעת עוז\",\n \"גבעת שמואל\",\n \"גבעת שפירא\",\n \"גבעתי\",\n \"גבעתיים\",\n \"גברעם\",\n \"גבת\",\n \"גדות\",\n \"גדיש\",\n \"גדעונה\",\n \"גדרה\",\n \"גונן\",\n \"גורן\",\n \"גורנות הגליל\",\n \"גזית\",\n \"גזר\",\n \"גיאה\",\n \"גיבתון\",\n \"גיזו\",\n \"גילת\",\n \"גינוסר\",\n \"גינתון\",\n \"גיתה\",\n \"גיתית\",\n \"גלאון\",\n \"גלגל\",\n \"גלעד\",\n \"גמזו\",\n \"גן הדרום\",\n \"גן השומרון\",\n \"גן חיים\",\n \"גן יאשיה\",\n \"גן יבנה\",\n \"גן נר\",\n \"גן שורק\",\n \"גן שלמה\",\n \"גן שמואל\",\n \"גנות\",\n \"גנות הדר\",\n \"גני הדר\",\n \"גני טל\",\n \"גני יוחנן\",\n \"גני מודיעין\",\n \"גני עם\",\n \"גני תקווה\",\n \"גניגר\",\n \"געש\",\n \"געתון\",\n \"גפן\",\n \"גרופית\",\n \"גשור\",\n \"גשר\",\n \"גשר הזיו\",\n \"גת\",\n \"גת רימון\",\n \"דבוריה\",\n \"דביר\",\n \"דברת\",\n \"דגניה א\",\n \"דגניה ב\",\n \"דוב''ב\",\n \"דולב\",\n \"דור\",\n \"דורות\",\n \"דחי\",\n \"דימונה\",\n \"דיר אל-אסד\",\n \"דיר חנא\",\n \"דישון\",\n \"דליה\",\n \"דלית אל כרמל\",\n \"דלתון\",\n \"דמיידה\",\n \"דניאל\",\n \"דפנה\",\n \"דקל\",\n \"האון\",\n \"הבונים\",\n \"הגושרים\",\n \"הדר עם\",\n \"הוד השרון\",\n \"הודיה\",\n \"הודיות\",\n \"הושעיה\",\n \"הזורעים\",\n \"החותרים\",\n \"היוגב\",\n \"הילה\",\n \"המכללה האקדמית כנרת\",\n \"המעפיל\",\n \"המרכז האקדמי רופין\",\n \"הסוללים\",\n \"העוגן\",\n \"הר אדר\",\n \"הר ברכה\",\n \"הר גילה\",\n \"הר הנגב\",\n \"הר עמשא\",\n \"הר-חלוץ\",\n \"הראל\",\n \"הרדוף\",\n \"הרצליה - כל האזורים\",\n \"הרצליה - מערב\",\n \"הרצליה - מרכז וגליל ים\",\n \"הררית יחד\",\n \"ואדי אל חמאם\",\n \"ואדי אל נעם דרום\",\n \"ורד יריחו\",\n \"ורדון\",\n \"זבדיאל\",\n \"זוהר\",\n \"זיקים\",\n \"זיתן\",\n \"זכרון יעקב\",\n \"זכריה\",\n \"זמר\",\n \"זמרת, שובה\",\n \"זנוח\",\n \"זרועה\",\n \"זרזיר\",\n \"זרחיה\",\n \"זרעית\",\n \"ח'וואלד\",\n \"חבצלת השרון וצוקי ים\",\n \"חברון\",\n \"חג'אג'רה\",\n \"חגור\",\n \"חגלה\",\n \"חד נס\",\n \"חדיד\",\n \"חדרה - כל האזורים\",\n \"חדרה - מזרח\",\n \"חדרה - מערב\",\n \"חדרה - מרכז\",\n \"חדרה - נווה חיים\",\n \"חוות גלעד\",\n \"חוות יאיר\",\n \"חוות עדן\",\n \"חוות ערנדל\",\n \"חוות שדה בר\",\n \"חוות שיקמים\",\n \"חולדה\",\n \"חולון\",\n \"חולית\",\n \"חולתה\",\n \"חוסן\",\n \"חוסנייה\",\n \"חופית\",\n \"חוקוק\",\n \"חורה\",\n \"חורפיש\",\n \"חורשים\",\n \"חזון\",\n \"חי-בר יטבתה\",\n \"חיבת ציון\",\n \"חיננית\",\n \"חיפה - כל האזורים\",\n \"חיפה - כרמל ועיר תחתית\",\n \"חיפה - מערב\",\n \"חיפה - נווה שאנן ורמות כרמל\",\n \"חיפה - קריית חיים ושמואל\",\n \"חיפה-מפרץ\",\n \"חירן\",\n \"חלמיש\",\n \"חלץ\",\n \"חמד\",\n \"חמדיה\",\n \"חמדת\",\n \"חמרה\",\n \"חמת גדר\",\n \"חניאל\",\n \"חניתה\",\n \"חנתון\",\n \"חספין\",\n \"חפץ חיים\",\n \"חצב\",\n \"חצבה\",\n \"חצור\",\n \"חצור הגלילית\",\n \"חצרים\",\n \"חרב לאת\",\n \"חרוצים\",\n \"חרות\",\n \"חריש\",\n \"חרמש\",\n \"חרשה\",\n \"חרשים\",\n \"חשמונאים\",\n \"טבריה\",\n \"טובא זנגריה\",\n \"טורעאן\",\n \"טייבה\",\n \"טייבה בגלבוע\",\n \"טירה\",\n \"טירת יהודה\",\n \"טירת כרמל\",\n \"טירת צבי\",\n \"טל מנשה\",\n \"טל שחר\",\n \"טל-אל\",\n \"טללים\",\n \"טלמון\",\n \"טמרה\",\n \"טמרה בגלבוע\",\n \"טנא עומרים\",\n \"טפחות\",\n \"יבול\",\n \"יבנאל\",\n \"יבנה\",\n \"יגור\",\n \"יגל\",\n \"יד בנימין\",\n \"יד השמונה\",\n \"יד חנה\",\n \"יד מרדכי\",\n \"יד נתן\",\n \"יד רמב''ם\",\n \"יהוד-מונוסון\",\n \"יהל\",\n \"יובלים\",\n \"יודפת\",\n \"יונתן\",\n \"יושיביה\",\n \"יזרעאל\",\n \"יחיעם\",\n \"יטבתה\",\n \"ייט''ב\",\n \"יכיני\",\n \"ינוב\",\n \"ינוח-ג'ת\",\n \"ינון\",\n \"יסוד המעלה\",\n \"יסודות\",\n \"יסעור\",\n \"יעד\",\n \"יעף\",\n \"יערה\",\n \"יערות הכרמל\",\n \"יפיע\",\n \"יפית\",\n \"יפעת\",\n \"יפתח\",\n \"יצהר\",\n \"יציץ\",\n \"יקום\",\n \"יקיר\",\n \"יקנעם המושבה והזורע\",\n \"יקנעם עילית\",\n \"יראון\",\n \"ירדנה\",\n \"ירוחם\",\n \"ירושלים - אזור תעשייה עטרות\",\n \"ירושלים - דרום\",\n \"ירושלים - כל האזורים\",\n \"ירושלים - כפר עקב\",\n \"ירושלים - מזרח\",\n \"ירושלים - מערב\",\n \"ירושלים - מרכז\",\n \"ירושלים - צפון\",\n \"ירחיב\",\n \"ירכא\",\n \"ירקונה\",\n \"ישובי אומן\",\n \"ישובי יעל\",\n \"ישעי\",\n \"ישרש\",\n \"יתד\",\n \"כאבול\",\n \"כאוכב אבו אלהיג'א\",\n \"כברי\",\n \"כדורי\",\n \"כוכב השחר\",\n \"כוכב יאיר - צור יגאל\",\n \"כוכב יעקב\",\n \"כוכב מיכאל\",\n \"כורזים ורד הגליל\",\n \"כושי רמון\",\n \"כחל\",\n \"כינרת מושבה\",\n \"כינרת קבוצה\",\n \"כיסופים\",\n \"כיסרא סמיע\",\n \"כישור\",\n \"כלא דמון\",\n \"כליל\",\n \"כלנית\",\n \"כמהין\",\n \"כמון\",\n \"כנות\",\n \"כנף\",\n \"כסייפה\",\n \"כסלון\",\n \"כעביה\",\n \"כעביה טבאש\",\n \"כפר אביב\",\n \"כפר אדומים\",\n \"כפר אוריה\",\n \"כפר אחים\",\n \"כפר אלדד\",\n \"כפר ביאליק\",\n \"כפר ביל''ו\",\n \"כפר בלום\",\n \"כפר בן נון\",\n \"כפר ברא\",\n \"כפר ברוך\",\n \"כפר גדעון\",\n \"כפר גלים\",\n \"כפר גליקסון\",\n \"כפר גלעדי\",\n \"כפר גמילה מלכישוע\",\n \"כפר דניאל\",\n \"כפר האורנים\",\n \"כפר החורש\",\n \"כפר המכבי\",\n \"כפר הנגיד\",\n \"כפר הנוער ימין אורד\",\n \"כפר הנשיא\",\n \"כפר הס\",\n \"כפר הרא''ה\",\n \"כפר הרי''ף וצומת ראם\",\n \"כפר ויתקין\",\n \"כפר ורבורג\",\n \"כפר ורדים\",\n \"כפר זוהרים\",\n \"כפר זיתים\",\n \"כפר חב''ד\",\n \"כפר חיטים\",\n \"כפר חיים\",\n \"כפר חנניה\",\n \"כפר חסידים\",\n \"כפר חרוב\",\n \"כפר טבאש\",\n \"כפר טרומן\",\n \"כפר ידידיה\",\n \"כפר יהושע\",\n \"כפר יובל\",\n \"כפר יונה\",\n \"כפר יחזקאל\",\n \"כפר יסיף\",\n \"כפר יעבץ\",\n \"כפר כמא\",\n \"כפר כנא\",\n \"כפר מונש\",\n \"כפר מימון ותושיה\",\n \"כפר מל''ל\",\n \"כפר מנדא\",\n \"כפר מנחם\",\n \"כפר מסריק\",\n \"כפר מצר\",\n \"כפר מרדכי\",\n \"כפר נהר הירדן\",\n \"כפר נוער בן שמן\",\n \"כפר נטר\",\n \"כפר סאלד\",\n \"כפר סבא\",\n \"כפר סילבר\",\n \"כפר סירקין\",\n \"כפר עבודה\",\n \"כפר עזה\",\n \"כפר עציון\",\n \"כפר פינס\",\n \"כפר קאסם\",\n \"כפר קיש\",\n \"כפר קרע\",\n \"כפר רופין\",\n \"כפר רות\",\n \"כפר שמאי\",\n \"כפר שמואל\",\n \"כפר שמריהו\",\n \"כפר תבור\",\n \"כפר תפוח\",\n \"כפר תקווה\",\n \"כרכום\",\n \"כרם ביבנה\",\n \"כרם בן זמרה\",\n \"כרם בן שמן\",\n \"כרם מהר''ל\",\n \"כרם שלום\",\n \"כרמי יוסף\",\n \"כרמי צור\",\n \"כרמי קטיף\",\n \"כרמיאל\",\n \"כרמיה\",\n \"כרמים\",\n \"כרמית\",\n \"כרמל\",\n \"לבון\",\n \"לביא\",\n \"לבנים\",\n \"להב\",\n \"להבות הבשן\",\n \"להבות חביבה\",\n \"להבים\",\n \"לוד\",\n \"לוזית\",\n \"לוחמי הגטאות\",\n \"לוטם וחמדון\",\n \"לוטן\",\n \"לטרון\",\n \"לימן\",\n \"לכיש\",\n \"לפיד\",\n \"לפידות\",\n \"לקיה\",\n \"מאור\",\n \"מאיר שפיה\",\n \"מבוא ביתר\",\n \"מבוא דותן\",\n \"מבוא חורון\",\n \"מבוא חמה\",\n \"מבוא מודיעים\",\n \"מבואות יריחו\",\n \"מבועים\",\n \"מבטחים, עמיעוז, ישע\",\n \"מבקיעים\",\n \"מבשרת ציון\",\n \"מג'דל כרום\",\n \"מג'דל שמס\",\n \"מגדים\",\n \"מגדל\",\n \"מגדל העמק\",\n \"מגדל עוז\",\n \"מגדל תפן\",\n \"מגדלים\",\n \"מגל\",\n \"מגן\",\n \"מגן שאול\",\n \"מגרון\",\n \"מגשימים\",\n \"מדרך עוז\",\n \"מדרשת בן גוריון\",\n \"מודיעין\",\n \"מודיעין - ישפרו סנטר\",\n \"מודיעין - ליגד סנטר\",\n \"מודיעין עילית\",\n \"מולדת\",\n \"מועאוויה\",\n \"מוצא עילית\",\n \"מוקיבלה\",\n \"מורן\",\n \"מורשת\",\n \"מזור\",\n \"מזכרת בתיה\",\n \"מזרע\",\n \"מזרעה\",\n \"מחוז אילת\",\n \"מחוז בקעה\",\n \"מחוז בקעת בית שאן\",\n \"מחוז גולן דרום\",\n \"מחוז גולן צפון\",\n \"מחוז גליל עליון\",\n \"מחוז גליל תחתון\",\n \"מחוז דן\",\n \"מחוז דרום הנגב\",\n \"מחוז הכרמל\",\n \"מחוז המפרץ\",\n \"מחוז העמקים\",\n \"מחוז השפלה\",\n \"מחוז ואדי ערה\",\n \"מחוז יהודה\",\n \"מחוז ים המלח\",\n \"מחוז ירושלים\",\n \"מחוז ירקון\",\n \"מחוז לכיש\",\n \"מחוז מנשה\",\n \"מחוז מערב הנגב\",\n \"מחוז מערב לכיש\",\n \"מחוז מרכז הגליל\",\n \"מחוז מרכז הנגב\",\n \"מחוז עוטף עזה\",\n \"מחוז ערבה\",\n \"מחוז קו העימות\",\n \"מחוז שומרון\",\n \"מחוז שפלת יהודה\",\n \"מחוז שרון\",\n \"מחולה\",\n \"מחניים\",\n \"מחסיה\",\n \"מטווח ניר עם\",\n \"מטולה\",\n \"מטע\",\n \"מי עמי\",\n \"מייסר\",\n \"מיצד\",\n \"מיצר\",\n \"מירב\",\n \"מירון\",\n \"מישר\",\n \"מיתר\",\n \"מכון וינגייט\",\n \"מכורה\",\n \"מכמורת\",\n \"מכמנים\",\n \"מלאה\",\n \"מלונות ים המלח מרכז\",\n \"מלכיה\",\n \"ממשית\",\n \"מנוחה\",\n \"מנוף\",\n \"מנות\",\n \"מנחמיה\",\n \"מנחת מחניים\",\n \"מנרה\",\n \"מנשית זבדה\",\n \"מסד\",\n \"מסדה\",\n \"מסילות\",\n \"מסילת ציון\",\n \"מסלול\",\n \"מסעדה\",\n \"מע'אר\",\n \"מעברות\",\n \"מעגלים, גבעולים, מלילות\",\n \"מעגן\",\n \"מעגן מיכאל\",\n \"מעוז חיים\",\n \"מעון\",\n \"מעון צופיה\",\n \"מעונה\",\n \"מעיין ברוך\",\n \"מעיין צבי\",\n \"מעיליא\",\n \"מעלה אדומים\",\n \"מעלה אפרים\",\n \"מעלה גלבוע\",\n \"מעלה גמלא\",\n \"מעלה החמישה\",\n \"מעלה חבר\",\n \"מעלה לבונה\",\n \"מעלה מכמש\",\n \"מעלה עירון\",\n \"מעלה עמוס\",\n \"מעלה צביה\",\n \"מעלה רחבעם\",\n \"מעלה שומרון\",\n \"מעלות תרשיחא\",\n \"מענית\",\n \"מעש\",\n \"מפלסים\",\n \"מצדה\",\n \"מצובה\",\n \"מצוקי דרגות\",\n \"מצליח\",\n \"מצפה\",\n \"מצפה אבי''ב\",\n \"מצפה אילן\",\n \"מצפה יריחו\",\n \"מצפה נטופה\",\n \"מצפה רמון\",\n \"מצפה שלם\",\n \"מצר\",\n \"מקווה ישראל\",\n \"מרגליות\",\n \"מרום גולן\",\n \"מרחב עם\",\n \"מרחביה מושב\",\n \"מרחביה קיבוץ\",\n \"מרחצאות עין גדי\",\n \"מרכז אומן\",\n \"מרכז אזורי דרום השרון\",\n \"מרכז אזורי מבואות חרמון\",\n \"מרכז אזורי מגילות\",\n \"מרכז אזורי מרום גליל\",\n \"מרכז אזורי משגב\",\n \"מרכז חבר\",\n \"מרכז ימי קיסריה\",\n \"מרכז מיר''ב\",\n \"מרכז שפירא\",\n \"מרעית\",\n \"משאבי שדה\",\n \"משגב דב\",\n \"משגב עם\",\n \"משהד\",\n \"משואה\",\n \"משואות יצחק\",\n \"משכיות\",\n \"משמר איילון\",\n \"משמר דוד\",\n \"משמר הירדן\",\n \"משמר הנגב\",\n \"משמר העמק\",\n \"משמר השבעה\",\n \"משמר השרון\",\n \"משמרות\",\n \"משמרת\",\n \"משען\",\n \"מתחם בני דרום\",\n \"מתחם פי גלילות\",\n \"מתחם צומת שוקת\",\n \"מתן\",\n \"מתת\",\n \"מתתיהו\",\n \"נאות גולן\",\n \"נאות הכיכר\",\n \"נאות חובב\",\n \"נאות מרדכי\",\n \"נאות סמדר\",\n \"נבטים\",\n \"נבי סמואל\",\n \"נגבה\",\n \"נגוהות\",\n \"נהורה\",\n \"נהלל\",\n \"נהריה\",\n \"נוב\",\n \"נוגה\",\n \"נוה איתן\",\n \"נווה\",\n \"נווה אור\",\n \"נווה אטי''ב\",\n \"נווה אילן\",\n \"נווה דניאל\",\n \"נווה זוהר\",\n \"נווה זיו\",\n \"נווה חריף\",\n \"נווה ים\",\n \"נווה ימין\",\n \"נווה ירק\",\n \"נווה מבטח\",\n \"נווה מיכאל - רוגלית\",\n \"נווה שלום\",\n \"נועם\",\n \"נוף איילון\",\n \"נוף הגליל\",\n \"נופי נחמיה\",\n \"נופי פרת\",\n \"נופים\",\n \"נופית\",\n \"נופך\",\n \"נוקדים\",\n \"נורדיה\",\n \"נורית\",\n \"נחושה\",\n \"נחל עוז\",\n \"נחלה\",\n \"נחליאל\",\n \"נחלים\",\n \"נחם\",\n \"נחף\",\n \"נחשולים\",\n \"נחשון\",\n \"נחשונים\",\n \"נטועה\",\n \"נטור\",\n \"נטע\",\n \"נטעים\",\n \"נטף\",\n \"ניל''י\",\n \"נין\",\n \"ניצן\",\n \"ניצנה\",\n \"ניצני עוז\",\n \"ניצנים\",\n \"ניר אליהו\",\n \"ניר בנים\",\n \"ניר גלים\",\n \"ניר דוד\",\n \"ניר ח''ן\",\n \"ניר יפה\",\n \"ניר יצחק\",\n \"ניר ישראל\",\n \"ניר משה\",\n \"ניר עוז\",\n \"ניר עציון\",\n \"ניר עקיבא\",\n \"ניר צבי\",\n \"נירים\",\n \"נירית\",\n \"נמרוד\",\n \"נס הרים\",\n \"נס עמים\",\n \"נס ציונה\",\n \"נעורה\",\n \"נעורים\",\n \"נעלה\",\n \"נעמה\",\n \"נען\",\n \"נערן\",\n \"נצר חזני\",\n \"נצר סרני\",\n \"נצרת\",\n \"נריה\",\n \"נשר\",\n \"נתיב הגדוד\",\n \"נתיב הל''ה\",\n \"נתיב העשרה\",\n \"נתיב השיירה\",\n \"נתיבות\",\n \"נתניה - כל האזורים\",\n \"נתניה - מזרח\",\n \"נתניה - מערב\",\n \"סאג'ור\",\n \"סאסא\",\n \"סביון\",\n \"סגולה\",\n \"סואעד חמירה\",\n \"סולם\",\n \"סוסיא\",\n \"סופה\",\n \"סינמה סיטי גלילות\",\n \"סכנין\",\n \"סלמה\",\n \"סלעית\",\n \"סמר\",\n \"סנדלה\",\n \"סנסנה\",\n \"סעד\",\n \"סעייה-מולדה\",\n \"סער\",\n \"ספיר\",\n \"ספסופה - כפר חושן\",\n \"סתריה\",\n \"ע'ג'ר\",\n \"עבדון\",\n \"עבדת\",\n \"עברון\",\n \"עגור\",\n \"עדי\",\n \"עדי עד\",\n \"עדנים\",\n \"עוזה\",\n \"עוזייר\",\n \"עולש\",\n \"עומר\",\n \"עופר\",\n \"עופרים\",\n \"עוצם\",\n \"עזוז\",\n \"עזר\",\n \"עזריאל\",\n \"עזריה\",\n \"עזריקם\",\n \"עטרת\",\n \"עידן\",\n \"עיינות\",\n \"עילבון\",\n \"עילוט\",\n \"עין איילה\",\n \"עין אל אסד\",\n \"עין אל-סהלה\",\n \"עין בוקק\",\n \"עין גב\",\n \"עין גדי\",\n \"עין דור\",\n \"עין הבשור\",\n \"עין הוד\",\n \"עין החורש\",\n \"עין המפרץ\",\n \"עין הנצי''ב\",\n \"עין העמק\",\n \"עין השופט\",\n \"עין השלושה\",\n \"עין ורד\",\n \"עין זיוון\",\n \"עין חוד\",\n \"עין חצבה\",\n \"עין חרוד\",\n \"עין חרוד איחוד\",\n \"עין יהב\",\n \"עין יעקב\",\n \"עין כמונים\",\n \"עין כרמל\",\n \"עין מאהל\",\n \"עין נקובא\",\n \"עין עירון\",\n \"עין צורים\",\n \"עין קנייא\",\n \"עין ראפה\",\n \"עין שמר\",\n \"עין שריד\",\n \"עין תמר\",\n \"עינבר\",\n \"עינת\",\n \"עיר אובות\",\n \"עכו\",\n \"עכו - אזור תעשייה\",\n \"עלומים\",\n \"עלי\",\n \"עלי זהב\",\n \"עלמה\",\n \"עלמון\",\n \"עמוקה\",\n \"עמיחי\",\n \"עמינדב\",\n \"עמיעד\",\n \"עמיקם\",\n \"עמיר\",\n \"עמנואל\",\n \"עמקה\",\n \"ענב\",\n \"עספיא\",\n \"עפולה\",\n \"עפרה\",\n \"עץ אפרים\",\n \"עצמון - שגב\",\n \"עראבה\",\n \"ערב אל עראמשה\",\n \"ערב אל-נעים\",\n \"ערד\",\n \"ערוגות\",\n \"ערערה\",\n \"ערערה בנגב\",\n \"עשאהל\",\n \"עשרת\",\n \"עתלית\",\n \"עתניאל\",\n \"פארן\",\n \"פארק תעשיות פלמחים\",\n \"פארק תעשייה ראם\",\n \"פדואל\",\n \"פדויים\",\n \"פדיה\",\n \"פוריה כפר עבודה\",\n \"פוריה נווה עובד\",\n \"פוריה עילית\",\n \"פוריידיס\",\n \"פורת\",\n \"פטיש\",\n \"פלך\",\n \"פלמחים\",\n \"פני קדם\",\n \"פנימיית עין כרם\",\n \"פסגות\",\n \"פסוטה\",\n \"פעמי תש''ז\",\n \"פצאל\",\n \"פקיעין\",\n \"פקיעין החדשה\",\n \"פרדס חנה-כרכור\",\n \"פרדסיה\",\n \"פרוד\",\n \"פרי גן\",\n \"פתח תקווה\",\n \"פתחיה\",\n \"צאלים\",\n \"צבעון\",\n \"צובה\",\n \"צוחר, אוהד\",\n \"צופים\",\n \"צופית\",\n \"צופר\",\n \"צוקים\",\n \"צור הדסה\",\n \"צור יצחק\",\n \"צור משה\",\n \"צור נתן\",\n \"צוריאל\",\n \"צורית גילון\",\n \"ציפורי\",\n \"צלפון\",\n \"צפריה\",\n \"צפרירים\",\n \"צפת\",\n \"צרופה\",\n \"צרעה\",\n \"קבוצת גבע\",\n \"קבוצת יבנה\",\n \"קדומים\",\n \"קדימה-צורן\",\n \"קדיתא\",\n \"קדמה\",\n \"קדמת צבי\",\n \"קדר\",\n \"קדרון\",\n \"קדרים\",\n \"קדש ברנע\",\n \"קוממיות\",\n \"קורנית\",\n \"קטורה\",\n \"קיבוץ דן\",\n \"קיבוץ מגידו\",\n \"קידה\",\n \"קיסריה\",\n \"קלחים\",\n \"קליה\",\n \"קלנסווה\",\n \"קלע\",\n \"קציר\",\n \"קצר-א-סיר\",\n \"קצרין\",\n \"קצרין - אזור תעשייה\",\n \"קריית אונו\",\n \"קריית אתא\",\n \"קריית ביאליק\",\n \"קריית גת, כרמי גת\",\n \"קריית חינוך מרחבים\",\n \"קריית טבעון-בית זייד\",\n \"קריית ים\",\n \"קריית יערים\",\n \"קריית מוצקין\",\n \"קריית מלאכי\",\n \"קריית נטפים\",\n \"קריית ענבים\",\n \"קריית עקרון\",\n \"קריית שמונה\",\n \"קרית ארבע\",\n \"קרני שומרון\",\n \"קשת\",\n \"ראמה\",\n \"ראס אל-עין\",\n \"ראס עלי\",\n \"ראש הנקרה\",\n \"ראש העין\",\n \"ראש פינה\",\n \"ראש צורים\",\n \"ראשון לציון - כל האזורים\",\n \"ראשון לציון - מזרח\",\n \"ראשון לציון - מערב\",\n \"רבבה\",\n \"רבדים\",\n \"רביבים\",\n \"רביד\",\n \"רגבה\",\n \"רגבים\",\n \"רהט\",\n \"רווחה\",\n \"רוויה\",\n \"רוחמה\",\n \"רומאנה\",\n \"רומת אל הייב\",\n \"רועי\",\n \"רותם\",\n \"רחוב\",\n \"רחובות\",\n \"רחלים\",\n \"רטורנו - גבעת שמש\",\n \"ריחאנייה\",\n \"ריחן\",\n \"ריינה\",\n \"רימונים\",\n \"רינתיה\",\n \"רכסים\",\n \"רם און\",\n \"רמות\",\n \"רמות השבים\",\n \"רמות מאיר\",\n \"רמות מנשה\",\n \"רמות נפתלי\",\n \"רמלה\",\n \"רמת גן - כל האזורים\",\n \"רמת גן - מזרח\",\n \"רמת גן - מערב\",\n \"רמת דוד\",\n \"רמת הכובש\",\n \"רמת הנדיב\",\n \"רמת השופט\",\n \"רמת השרון\",\n \"רמת יוחנן\",\n \"רמת ישי\",\n \"רמת מגשימים\",\n \"רמת צבי\",\n \"רמת רזיאל\",\n \"רמת רחל\",\n \"רנן\",\n \"רעים\",\n \"רעננה\",\n \"רקפת\",\n \"רשפון\",\n \"רשפים\",\n \"רתמים\",\n \"שאנטי במדבר\",\n \"שאר ישוב\",\n \"שבות רחל\",\n \"שבי דרום\",\n \"שבי ציון\",\n \"שבי שומרון\",\n \"שבלי\",\n \"שגב שלום\",\n \"שדה אברהם\",\n \"שדה אילן\",\n \"שדה אליהו\",\n \"שדה אליעזר\",\n \"שדה בוקר\",\n \"שדה דוד\",\n \"שדה ורבורג\",\n \"שדה יואב\",\n \"שדה יעקב\",\n \"שדה יצחק\",\n \"שדה משה\",\n \"שדה נחום\",\n \"שדה נחמיה\",\n \"שדה ניצן\",\n \"שדה עוזיהו\",\n \"שדה צבי\",\n \"שדות ים\",\n \"שדות מיכה\",\n \"שדי חמד\",\n \"שדי תרומות\",\n \"שדמה\",\n \"שדמות דבורה\",\n \"שדמות מחולה\",\n \"שדרות, איבים, ניר עם\",\n \"שהם\",\n \"שואבה\",\n \"שובל\",\n \"שומרה\",\n \"שומריה\",\n \"שומרת\",\n \"שוקדה\",\n \"שורש\",\n \"שורשים\",\n \"שושנת העמקים\",\n \"שזור\",\n \"שחר\",\n \"שחרות\",\n \"שיבולים\",\n \"שיטים\",\n \"שייח' דנון\",\n \"שילה\",\n \"שילת\",\n \"שכניה\",\n \"שלווה\",\n \"שלוחות\",\n \"שלומי\",\n \"שלומית\",\n \"שלפים\",\n \"שמיר\",\n \"שמעה\",\n \"שמשית\",\n \"שני ליבנה\",\n \"שניר\",\n \"שעב\",\n \"שעל\",\n \"שעלבים\",\n \"שער אפרים\",\n \"שער הגולן\",\n \"שער העמקים\",\n \"שער מנשה\",\n \"שערי תקווה\",\n \"שפיים\",\n \"שפיר\",\n \"שפר\",\n \"שפרעם\",\n \"שקד\",\n \"שקף\",\n \"שרונה\",\n \"שריגים - ליאון\",\n \"שריד\",\n \"שרשרת\",\n \"שתולה\",\n \"שתולים\",\n \"תארבין\",\n \"תאשור\",\n \"תדהר\",\n \"תובל\",\n \"תומר\",\n \"תחנת רכבת כפר יהושוע\",\n \"תחנת רכבת ראש העין\",\n \"תימורים\",\n \"תירוש\",\n \"תל אביב - דרום העיר ויפו\",\n \"תל אביב - כל האזורים\",\n \"תל אביב - מזרח\",\n \"תל אביב - מרכז העיר\",\n \"תל אביב - עבר הירקון\",\n \"תל חי\",\n \"תל יוסף\",\n \"תל יצחק\",\n \"תל מונד\",\n \"תל עדשים\",\n \"תל ערד\",\n \"תל ציון\",\n \"תל קציר\",\n \"תל שבע\",\n \"תל תאומים\",\n \"תלם\",\n \"תלמי אליהו\",\n \"תלמי אלעזר\",\n \"תלמי ביל''ו\",\n \"תלמי יוסף\",\n \"תלמי יחיאל\",\n \"תלמי יפה\",\n \"תלמים\",\n \"תמרת\",\n \"תנובות\",\n \"תעוז\",\n \"תעשיון חצב\",\n \"תעשיון צריפין\",\n \"תפרח\",\n \"תקומה\",\n \"תקומה וחוות יזרעם\",\n \"תקוע\",\n \"תרום\",\n]" } ]
import contextlib import voluptuous as vol import homeassistant.helpers.config_validation as cv from typing import Any from homeassistant.config_entries import ConfigEntry, ConfigFlow, OptionsFlow from homeassistant.core import async_get_hass, callback from homeassistant.data_entry_flow import FlowResult from homeassistant.exceptions import HomeAssistantError from homeassistant.helpers import selector from .const import ( CONF_AREAS, CONF_ALERT_MAX_AGE, CONF_OFF_ICON, CONF_ON_ICON, CONF_POLL_INTERVAL, DEFAULT_ALERT_MAX_AGE, DOMAIN, DEFAULT_OFF_ICON, DEFAULT_ON_ICON, DEFAULT_POLL_INTERVAL, TITLE, ) from .metadata.area_to_polygon import find_area from .metadata.areas_and_groups import AREAS_AND_GROUPS
18,888
"""Config flow for oref_alert integration.""" from __future__ import annotations AREAS_CONFIG = selector.SelectSelectorConfig( options=AREAS_AND_GROUPS, mode=selector.SelectSelectorMode.DROPDOWN, multiple=True, custom_value=False, ) CONFIG_SCHEMA = vol.Schema( {vol.Required(CONF_AREAS, default=[]): selector.SelectSelector(AREAS_CONFIG)} ) class OrefAlertConfigFlow(ConfigFlow, domain=DOMAIN): """Config flow.""" def __init__(self) -> None: """Initialize object with defaults.""" self._auto_detected_area: str | None = None async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle a flow initialized by the user.""" if self._async_current_entries(): return self.async_abort(reason="single_instance_allowed") if user_input is not None: return await self.async_step_confirm(user_input) hass = None with contextlib.suppress(HomeAssistantError): hass = async_get_hass() if hass: self._auto_detected_area = find_area( hass.config.latitude, hass.config.longitude ) if not self._auto_detected_area: return self.async_show_form(step_id="user", data_schema=CONFIG_SCHEMA) return await self.async_step_confirm(None) async def async_step_confirm( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Confirm the setup.""" if user_input is not None: return self.async_create_entry( title=TITLE, data={}, options={ CONF_AREAS: user_input.get(CONF_AREAS, [self._auto_detected_area]), CONF_ALERT_MAX_AGE: DEFAULT_ALERT_MAX_AGE, CONF_POLL_INTERVAL: DEFAULT_POLL_INTERVAL,
"""Config flow for oref_alert integration.""" from __future__ import annotations AREAS_CONFIG = selector.SelectSelectorConfig( options=AREAS_AND_GROUPS, mode=selector.SelectSelectorMode.DROPDOWN, multiple=True, custom_value=False, ) CONFIG_SCHEMA = vol.Schema( {vol.Required(CONF_AREAS, default=[]): selector.SelectSelector(AREAS_CONFIG)} ) class OrefAlertConfigFlow(ConfigFlow, domain=DOMAIN): """Config flow.""" def __init__(self) -> None: """Initialize object with defaults.""" self._auto_detected_area: str | None = None async def async_step_user( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Handle a flow initialized by the user.""" if self._async_current_entries(): return self.async_abort(reason="single_instance_allowed") if user_input is not None: return await self.async_step_confirm(user_input) hass = None with contextlib.suppress(HomeAssistantError): hass = async_get_hass() if hass: self._auto_detected_area = find_area( hass.config.latitude, hass.config.longitude ) if not self._auto_detected_area: return self.async_show_form(step_id="user", data_schema=CONFIG_SCHEMA) return await self.async_step_confirm(None) async def async_step_confirm( self, user_input: dict[str, Any] | None = None ) -> FlowResult: """Confirm the setup.""" if user_input is not None: return self.async_create_entry( title=TITLE, data={}, options={ CONF_AREAS: user_input.get(CONF_AREAS, [self._auto_detected_area]), CONF_ALERT_MAX_AGE: DEFAULT_ALERT_MAX_AGE, CONF_POLL_INTERVAL: DEFAULT_POLL_INTERVAL,
CONF_ON_ICON: DEFAULT_ON_ICON,
8
2023-10-18 11:16:41+00:00
24k
RobertCsordas/moe
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: bool, layers: List[torch.nn.Module], n_prev_states: int,\n n_prev_states_test: Optional[int] = None, adaptive_cutoffs: List[int] = [],\n same_length_eval: bool = True, norm_before_output: bool = False,\n p_drop_layer: float = 0.0, use_last_state: bool = False, same_length: bool = False,\n output_mode: str = \"normal\"):\n\n super().__init__()\n\n self.embedding = torch.nn.Embedding(voc_size, embedding_size or state_size)\n # with torch.no_grad():\n # self.embedding.weight.uniform_(-0.1, 0.1)\n\n torch.nn.init.xavier_uniform_(self.embedding.weight)\n\n self.shared_layers = all([la is layers[0] for la in layers])\n\n if embedding_size is None:\n self.embedding_adapter = lambda x: x\n else:\n self.embedding_adapter = torch.nn.Linear(embedding_size, state_size)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.layers = torch.nn.ModuleList(layers)\n self.output_adapter = lambda x: x\n self.n_prev_states = n_prev_states\n self.n_prev_states_test = n_prev_states_test or n_prev_states\n self.same_length_eval = same_length_eval\n self.embedding_scale = math.sqrt(state_size)\n self.p_drop_layer = p_drop_layer\n self.use_last_state = use_last_state\n self.same_length = same_length\n self.iter = 0\n self.output_mode = output_mode\n\n assert self.output_mode in {\"normal\", \"sum\", \"geometric\", \"sigmoid\"}\n\n if self.output_mode in {\"geometric\", \"sigmoid\"}:\n self.output_gate = torch.nn.Linear(state_size, 1)\n\n self.adaptive = bool(adaptive_cutoffs)\n\n out_proj_size = (embedding_size or state_size) if tied_embedding else state_size\n if self.adaptive:\n self.output = framework.layers.CustomAdaptiveLogSoftmaxWithLoss(\n out_proj_size, voc_size, adaptive_cutoffs, div_value=1,\n tied_to=self.embedding if tied_embedding else None)\n else:\n self.output = torch.nn.Linear(out_proj_size, voc_size)\n\n if norm_before_output or self.output_mode in {\"sum\", \"sigmoid\"}:\n self.out_norm = torch.nn.LayerNorm(state_size)\n else:\n self.out_norm = lambda x: x\n\n if tied_embedding:\n if not self.adaptive:\n self.output.weight = self.embedding.weight\n if embedding_size is not None:\n self.output_adapter = torch.nn.Linear(state_size, embedding_size)\n\n @staticmethod\n def generate_history_mask(sz: int, device: torch.device) -> torch.Tensor:\n return torch.tril(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=-1)\n\n def gen_output(self, x: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor:\n net = self.out_norm(x)\n net = self.output_adapter(net)\n net = self.dropout(net)\n\n if self.adaptive:\n net = self.output(net.transpose(0, 1), target)\n else:\n net = self.output(net.transpose(0, 1))\n\n return net\n\n def accumulate_output(self, features: List[torch.Tensor]) -> torch.Tensor:\n if self.output_mode == \"sum\":\n return sum(features)\n elif self.output_mode in {\"geometric\", \"sigmoid\"}:\n # Must cast it to float16, otherwise pytorch will crash after a few hundred iterations with an\n # incomprehensible error in the gradient scaler\n gates = torch.sigmoid(torch.cat([self.output_gate(f).float() for f in features], -1))\n if self.output_mode == \"geometric\":\n ngates = torch.cumprod(1.0 - gates, -1)\n scores = torch.cat([gates[..., 0:1], gates[..., 1:] * ngates[..., :-1]], -1)\n else:\n scores = gates\n\n if self.iter % 100 == 0 and self.training:\n self.log(\"output_gate_mean\", framework.visualize.plot.Barplot(scores.flatten(end_dim=-2).mean(0)))\n # return sum(f * scores[..., i: i+1] for i, f in enumerate(features))\n f = scores.unsqueeze(-2) @ torch.stack(features, -2)\n return f.squeeze(-2)\n else:\n assert False, \"Invalid output mode\"\n\n def forward(self, x: torch.Tensor, target: Optional[torch.Tensor], state) -> Tuple[torch.Tensor, Any]:\n causality_mask = Transformer.generate_square_subsequent_mask(x.shape[0], x.device)\n\n net = self.dropout(self.embedding(x.T.long()))\n net = self.embedding_adapter(net)\n net = net * self.embedding_scale\n\n new_state = []\n features = [net]\n\n n_prev_states = self.n_prev_states if self.training else self.n_prev_states_test\n\n same_length = self.same_length or ((not self.training) and self.same_length_eval)\n if same_length and state is not None:\n causality_mask = [self.generate_history_mask(x.shape[0], x.device)] + \\\n [torch.zeros_like(causality_mask)] * (len(state[0]) - 1) + [causality_mask]\n causality_mask = torch.cat(causality_mask, -1)\n\n plot_cossim = (self.iter % 100 == 0 and self.training)\n for li, l in enumerate(self.layers):\n if n_prev_states > 0:\n if li == 0:\n # Pos offset should be constant for all layers\n pos_offset = sum(s.shape[1] for s in state[0]) if state is not None else 0\n\n # Concatenate the new state with the previous states\n li_r = 0 if self.use_last_state else li\n s = (state[li_r] + [net]) if state is not None else [net]\n attend_to = torch.cat(s, 1)\n\n if not self.use_last_state:\n s[-1] = s[-1].detach()\n new_state.append(s[-n_prev_states:])\n else:\n pos_offset = None\n attend_to = None\n\n net_o = l(net, mask=AttentionMask(None, causality_mask), attend_to=attend_to,\n pos_offset=pos_offset)\n\n if plot_cossim or self.output_mode != \"normal\":\n features.append(net_o)\n\n with torch.no_grad():\n ndiff = torch.norm(net_o - net, p=2, dim=-1)\n n_in = torch.norm(net, p=2, dim=-1)\n self.log(f\"activation_norm/abs_update_layer_{li}\", ndiff.mean())\n self.log(f\"activation_norm/in_layer_{li}\", n_in.mean())\n self.log(f\"activation_norm/rel_update_layer_{li}\", (ndiff/n_in.clamp(min=torch.finfo(n_in.dtype).eps)).mean())\n\n if self.training and self.p_drop_layer > 0.0:\n net = torch.where(torch.rand_like(net_o[..., 0:1]) < self.p_drop_layer, net, net_o)\n else:\n net = net_o\n\n if self.use_last_state and n_prev_states > 0:\n # If we carry over the last state, save it here\n new_state = [((state[0] if state is not None else []) + [net.detach()])[-n_prev_states:]]\n\n if self.output_mode != \"normal\":\n net = self.accumulate_output(features)\n\n if plot_cossim:\n with torch.no_grad():\n f_sample = [f.view(-1, f.shape[-1])[:1024] for f in features]\n f_sample_all = torch.stack(f_sample, -2)\n scores = framework.utils.cossim(f_sample_all, f_sample_all).mean(0)\n self.log(\"feature_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n if self.output_mode != \"normal\":\n f_sample = [self.accumulate_output(f_sample[:i]) for i in range(1, len(f_sample)+1)]\n f_sample_all = torch.stack(f_sample, -2)\n\n outs = F.softmax(self.gen_output(f_sample_all, target).transpose(0, 1), -1)\n scores = framework.utils.cossim(outs, outs).mean(0)\n self.log(\"out_dist_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n real_out = outs[:, -1]\n for i in range(outs.shape[-2] - 1):\n self.log(f\"out_diff_{i}\", (outs[:, i] - real_out).norm(dim=-1, p=1).mean())\n\n del outs\n del features\n\n net = self.gen_output(net, target)\n self.iter += 1\n\n return net, new_state" }, { "identifier": "task", "path": "tasks/task_db.py", "snippet": "def task(name: Optional[str] = None):\n def wrapper(cls):\n n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))\n assert n not in TASKS, f\"Task {n} already exists\"\n TASKS[n] = cls\n return cls\n return wrapper" }, { "identifier": "args", "path": "tasks/task_db.py", "snippet": "def args(fn):\n global ARGS_REGISTERS\n ARGS_REGISTERS.append(fn)\n return fn" }, { "identifier": "RelativeTransformerEncoderLayer", "path": "layers/transformer/relative_transformer.py", "snippet": "class RelativeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, test_pos_clamp: Optional[int] = None, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, ln_after_attention: bool = True):\n super().__init__()\n self.ln_after_attention = ln_after_attention\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n if ln_after_attention:\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.self_attn(src, attend_to if attend_to is not None else src, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src = self.norm1(src) if self.ln_after_attention else src\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "PrelnRelativeTransformerEncoderLayer", "path": "layers/transformer/relative_preln_transformer.py", "snippet": "class PrelnRelativeTransformerEncoderLayer(RelativeTransformerEncoderLayer):\n is_preln = True\n\n def __init__(self, d_model, nhead, n_layers: int, dim_feedforward=2048, dropout=0.1,\n activation: ActivationFunction = F.relu, attention_dropout=0, test_pos_clamp: Optional[int] = None,\n drop_expand: bool = True, head_projection_size: Optional[int] = None):\n super().__init__(\n d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout,\n activation=activation, attention_dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n drop_expand=drop_expand, head_projection_size=head_projection_size)\n\n reset_prenorm_params(self, n_layers)\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n return src" }, { "identifier": "PrelnRelativeKVMemTransformerEncoderLayer", "path": "layers/transformer/relative_preln_kvmem_transformer.py", "snippet": "class PrelnRelativeKVMemTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, n_keys: Union[int, Tuple[int, int]], n_layers: int, dim_feedforward=2048,\n dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None, pkm_heads: int = 1, pkm_stochastic: bool = True,\n pkm_custom_init: int = 0, pkm_slice_values: bool = False,\n pkm_knn: int = 32, linproj: bool = False, head_merge_topk: bool = False, load_balance: bool = True,\n kvmem_dropout: str = \"none\", kvmem_randomize_indices: bool = False, kvmem_query_bias: bool = False,\n standard_parallel: bool = False, approx_topk: bool = False, factorize: bool = False,\n full_key: bool = False, key_redundancy_factor: int = 1, two_stage: bool = False,\n factors: Optional[List[int]] = None, head_exclusive: bool = False,\n head_projection_size: Optional[int] = None):\n super().__init__()\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n\n self.pkm = LowrankApproximate2Layer(\n d_model, n_keys, pkm_heads, stochastic=pkm_stochastic, custom_init=pkm_custom_init,\n weight_scale=math.sqrt(2.0 / n_layers), slice_values=pkm_slice_values, knn=pkm_knn,\n head_merge_topk=head_merge_topk, load_balance=load_balance, dropout=dropout,\n query_proj=linproj, randomize_indices=kvmem_randomize_indices, dropout_mode=kvmem_dropout,\n query_bias=kvmem_query_bias, approx=approx_topk, factorize=factorize, full_key=full_key,\n key_redundancy_factor=key_redundancy_factor, two_stage=two_stage, factors=factors,\n head_exclusive=head_exclusive, activation=activation)\n\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.standard_parallel = standard_parallel\n\n reset_prenorm_params(self, n_layers)\n\n if self.standard_parallel:\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward, bias=False)\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model, bias=False)\n\n initializer = self.pkm.get_custom_init()\n\n s_real = dim_feedforward + self.pkm.size\n # s_real = dim_feedforward + self.pkm.heads * self.pkm.knn\n initializer(self.linear2.weight, std=math.sqrt(2 / (n_layers * s_real)))\n initializer(self.pkm.values.weight, std=math.sqrt(2 / (n_layers * s_real)))\n initializer(self.linear1.weight, std=math.sqrt(2 / (n_layers * d_model)))\n\n if self.pkm.two_stage:\n initializer(self.pkm.full_keys, std=math.sqrt(2 / (n_layers * d_model)))\n\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout(src2)\n src2 = self.norm2(src)\n src3 = self.pkm(src2)\n\n if self.standard_parallel:\n src3 = src3 + self.linear2(self.dropout(self.activation(self.linear1(src2))))\n\n src = src + self.dropout(src3)\n return src" }, { "identifier": "RelativeMoeTransformerEncoderLayer", "path": "layers/transformer/relative_moe_transformer.py", "snippet": "class RelativeMoeTransformerEncoderLayer(LoggingLayer, torch.nn.Module):\n def __init__(self, d_model, nhead, n_experts: int, expert_size: int, n_layers: int, dim_feedforward=2048,\n dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None, knn: int = 0,\n standard_parallel: bool = False, custom_init: int = 0,\n dropout_mode: str = \"none\", selection_mode: str = \"add\",\n perplexity_reg: float = 0.0, key_mode: str = \"moe\", half_key: bool = False,\n n_heads: int = 1, norm_keys: bool = False, perplexity_reg_mode: str=\"step\",\n n_random: int = 0, reg_type: str = \"normal\", std_correction: bool = False,\n topk_mode: str = \"full\", head_projection_size: Optional[int] = None,\n activation_after_topk: bool = False, weight_grouping: str = \"none\",\n kmeans_distance: str = \"cosine\", drop_parallel: bool = True, block_expert_sel_in_grad: bool = False,\n mlp_selection: bool = False, classification_target: str = \"sum\",\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n norm_standard_parallel_values: bool = False, identical_init: bool = False,\n topological_sel_reg: float = 0.0, topological_expert_reg: float = 0.0,\n gumbel_select_only: bool = False, topk_value_norm_compensation: bool = False,\n norm_expert_scores: bool = False, sel_input_cluster_init: bool = False,\n init_norm_mode: str = \"full\", sel_bias: bool = False,\n bias: bool = False, rescale_normed: bool = False, sel_norm: str = \"none\",\n rescale_grads: bool = False, gumbel_decay: int = 0, preln: bool = True, ln_affine: bool = True,\n sinkhorn_local: bool = False, sinkhorn_n_iters: int = 3, moe_dropout_factor: float = 1.0,\n drop_expert: float = 0.0, expert_size_init: bool = False, sync_distributed: bool = True,\n modulation_amplitude: float = 0.5, invisible_selection: bool = False,\n slope_multiplier: float = 1.0, moe_init_scale: float = 1.0):\n super().__init__()\n self.preln = preln\n self.i = 0\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n\n std_scale = math.sqrt(2.0 / n_layers) if preln else 1.0\n std_scale *= math.sqrt(moe_init_scale)\n\n self.pkm = MoE(\n d_model, n_experts, expert_size, knn=knn, dropout=dropout * moe_dropout_factor, dropout_mode=dropout_mode,\n weight_scale=std_scale, custom_init=custom_init, selection_mode=selection_mode,\n perplexity_reg=perplexity_reg, key_mode=key_mode, half_key=half_key, n_heads=n_heads,\n norm_keys=norm_keys, perplexity_reg_mode=perplexity_reg_mode, n_random=n_random,\n reg_type=reg_type, std_correction=std_correction, topk_mode=topk_mode,\n activation_after_topk=activation_after_topk, weight_grouping=weight_grouping,\n kmeans_distance=kmeans_distance, activation=activation, block_expert_sel_in_grad=block_expert_sel_in_grad,\n mlp_selection=mlp_selection, classification_target=classification_target,\n normalize_expert_sel_init=normalize_expert_sel_init, norm_key_init=norm_key_init,\n norm_value_init=norm_value_init, identical_init=identical_init, topological_sel_reg=topological_sel_reg,\n topological_expert_reg=topological_expert_reg, gumbel_select_only=gumbel_select_only,\n topk_value_norm_compensation=topk_value_norm_compensation, norm_expert_scores=norm_expert_scores,\n sel_input_cluster_init=sel_input_cluster_init,\n n_parallel_expert_channels=dim_feedforward if standard_parallel else 0,\n init_norm_mode=init_norm_mode, sel_bias=sel_bias, bias=bias, rescale_normed=rescale_normed,\n sel_norm=sel_norm, rescale_grads=rescale_grads, gumbel_decay=gumbel_decay,\n sinkhorn_local=sinkhorn_local, sinkhorn_n_iters=sinkhorn_n_iters, expert_dropout=drop_expert,\n expert_size_init=expert_size_init, sync_distributed=sync_distributed,\n modulation_amplitude=modulation_amplitude, invisible_selection=invisible_selection,\n slope_multiplier=slope_multiplier)\n\n self.norm1 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.norm2 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.dropout = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.standard_parallel = standard_parallel\n self.drop_parallel = drop_parallel\n\n if preln:\n reset_prenorm_params(self, n_layers)\n\n if self.standard_parallel:\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward, bias=bias)\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model, bias=False)\n\n s_real = dim_feedforward + self.pkm.size\n # s_real = dim_feedforward + self.pkm.heads * self.pkm.knn\n\n init = self.pkm.get_initializer()\n\n init(self.linear1.weight, std=std_scale * math.sqrt(1.0 / d_model))\n init(self.linear2.weight, std=std_scale * math.sqrt(1.0 / s_real))\n\n if norm_standard_parallel_values:\n with torch.no_grad():\n self.linear2.weight.div_(self.linear2.weight.norm(dim=0, keepdim=True))\n\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n\n src2 = self.norm1(src) if self.preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout(src2)\n\n if self.preln:\n src2 = self.norm2(src)\n else:\n src = src2 = self.norm1(src)\n\n if self.i == 3:\n with profile(activities=[ProfilerActivity.CPU, ProfilerActivity.CUDA], record_shapes=True) as prof:\n src3 = self.pkm(src2)\n prof.export_chrome_trace(\"trace.json\")\n assert False\n else:\n src3 = self.pkm(src2)\n\n # self.i += 1\n\n if self.standard_parallel:\n x = self.linear1(src2)\n with torch.no_grad():\n self.log(\"standard_parallel_relu_pass_rate\", (x > 0).flatten(end_dim=-2).float().mean().item())\n x = self.activation(x)\n if self.drop_parallel:\n x = self.dropout(x)\n src3 = src3 + self.linear2(x)\n\n src = src + self.dropout(src3)\n if not self.preln:\n src = self.norm2(src)\n\n return src" }, { "identifier": "TopkTransformer", "path": "layers/transformer/topk_transformer.py", "snippet": "class TopkTransformer(PrelnRelativeTransformerEncoderLayer, LoggingLayer):\n def __init__(self, d_model, nhead, n_layers: int, dim_feedforward=2048, dropout=0.1,\n activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None, drop_expand: bool = True, k: int = 32,\n use_norm: bool = True, head_projection_size: Optional[int] = None):\n\n super().__init__(d_model, nhead, n_layers, dim_feedforward, dropout, activation, attention_dropout,\n test_pos_clamp, drop_expand, head_projection_size=head_projection_size)\n\n LoggingLayer.__init__(self)\n self.k = k\n self.use_norm = use_norm\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n\n middle = self.dropout(self.activation(self.linear1(src2)))\n\n with torch.no_grad():\n if self.use_norm:\n norms = self.linear2.weight.norm(dim=0)\n vals = - middle * norms\n else:\n vals = - middle\n mask = vals > vals.kthvalue(self.k, keepdim=True)[0]\n\n self.log(\"relu_pass_rate_before\", (middle > 0).float().mean())\n\n middle = middle.masked_fill(mask, 0)\n\n self.log(\"topk_positive_rate\", (middle > 0).float().sum(-1).mean()/self.k)\n\n src2 = self.linear2(middle)\n src = src + self.dropout2(src2)\n return src" }, { "identifier": "MoE", "path": "layers/moe_layer.py", "snippet": "class MoE(LoggingLayer, RegularizedLayer, OncePerIterLayer, torch.nn.Module):\n def __init__(self, dmodel: int, n_experts: int, expert_size: int, n_heads: int, knn: int = 0,\n dropout: float = 0, weight_scale: float = 1.0, custom_init: int = 0,\n dropout_mode: str = \"none\", selection_mode: str = \"add\", perplexity_reg: float = 0.0,\n key_mode: str = \"moe\", half_key: bool = False, norm_keys: bool = False,\n perplexity_reg_mode: str=\"step\", n_random: int = 0, reg_type: str = \"entropy\",\n std_correction: bool = False, topk_mode: str = \"full\", activation_after_topk: bool = False,\n weight_grouping: str = \"none\", kmeans_distance: str = \"cosine\",\n activation = lambda x: F.relu(x, inplace=True), block_expert_sel_in_grad: bool = False,\n mlp_selection: bool = False, classification_target: str = \"sum\",\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n identical_init: bool = False, topological_sel_reg: float = 0.0, topological_expert_reg: float = 0.0,\n gumbel_select_only: bool = False, topk_value_norm_compensation: bool = False,\n norm_expert_scores: bool = False, sel_input_cluster_init: bool = False,\n n_parallel_expert_channels: int = 0, init_norm_mode: str = \"full\", sel_bias: bool = False,\n bias: bool = False, rescale_normed: bool = False, sel_norm: str = \"none\",\n rescale_grads: bool = False, gumbel_decay: int = 0, v_dim: Optional[int] = None,\n sinkhorn_local: bool = False, sinkhorn_n_iters: int = 3, expert_dropout: float = 0.0,\n expert_size_init: bool = False, sync_distributed: bool = False,\n modulation_amplitude: float = 0.5, invisible_selection: bool = False,\n slope_multiplier: float = 1.0):\n\n super().__init__()\n self.custom_init = custom_init\n self.k_dim = dmodel\n self.v_dim = v_dim if v_dim is not None else dmodel\n self.n_experts = n_experts\n self.expert_size = expert_size\n self.size = self.n_experts * self.expert_size\n self.knn = knn\n self.dropout = dropout\n self.dropout_mode = dropout_mode\n self.selection_mode = selection_mode\n self.perplexity_reg = perplexity_reg\n self.half_key = half_key\n self.key_mode = key_mode\n self.k_vec_dim = self.k_dim // (2 if half_key else 1)\n self.n_heads = n_heads\n self.norm_keys = norm_keys\n self.perplexity_reg_mode = perplexity_reg_mode\n self.n_random = n_random\n self.reg_type = reg_type\n self.topk_mode = topk_mode\n self.activation_after_topk = activation_after_topk\n self.weight_grouping = weight_grouping\n self.kmeans_distance = kmeans_distance\n self.activation = activation\n self.block_expert_sel_in_grad = block_expert_sel_in_grad\n self.mlp_selection = mlp_selection\n self.classification_target = classification_target\n self.weight_scale = weight_scale\n self.normalize_expert_sel_init = normalize_expert_sel_init\n self.norm_key_init = norm_key_init\n self.norm_value_init = norm_value_init\n self.identical_init = identical_init\n self.topological_sel_reg = topological_sel_reg\n self.topological_expert_reg = topological_expert_reg\n self.gumbel_select_only = gumbel_select_only\n self.topk_value_norm_compensation = topk_value_norm_compensation\n self.norm_expert_scores = norm_expert_scores\n self.sel_input_cluster_init = sel_input_cluster_init\n self.iter = 0\n self.layer = 0\n self.initalized = False\n self.rescale_normed = rescale_normed\n self.sel_norm = sel_norm\n self.rescale_grads = rescale_grads\n self.gumbel_decay = gumbel_decay\n self.was_training = True\n self.sinkhorn_local = sinkhorn_local\n self.sinkhorn_n_iters = sinkhorn_n_iters\n self.expert_dropout = expert_dropout\n self.reg_counts = 0\n self.sync_distributed = sync_distributed and torch.distributed.is_initialized()\n self.modulation_amplitude = modulation_amplitude\n self.invisible_selection = invisible_selection\n self.slope_multiplier = slope_multiplier\n\n self.coocurence = None\n\n assert self.selection_mode in {\"add\", \"gate\", \"sigmoid\", \"gumbel\", \"hard_gumbel\", \"gumbel_sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"sinkhorn_local\", \"mul\", \"random\", \"sinkmoid2\", \"sinkmax2\", \"modulate\"}\n assert self.perplexity_reg_mode in {\"step\", \"global\", \"time\", \"global_time\"}\n assert self.dropout_mode in {\"none\", \"score\"}\n assert self.reg_type in {\"perplexity\", \"variance\", \"entropy\", \"l2\", \"switch\"}\n assert self.topk_mode in {\"full\", \"l1_approx\", \"approx\"}\n assert self.weight_grouping in {\"none\", \"keys_only\", \"keys_and_experts\"}\n assert self.classification_target in {\"sum\", \"max\"}\n assert self.sel_norm in {\"none\", \"cos\", \"input\", \"weights\"}\n\n if selection_mode in {\"mul\"} and activation_after_topk:\n raise ValueError(\"Activation after topk is not supported with mul selection\")\n\n if self.sel_norm != \"none\" and mlp_selection:\n raise ValueError(\"normalization not supported with mlp_selection\")\n\n if std_correction and self.selection_mode in {\"add\"}:\n if key_mode == \"both\":\n self.key_std_correction = math.sqrt(3)\n else:\n self.key_std_correction = math.sqrt(2)\n elif std_correction and self.selection_mode in {\"sigmoid\", \"sinkmoid\", \"sinkmoid2\"}:\n self.key_std_correction = 2.0\n else:\n self.key_std_correction = 1.0\n\n if self.key_mode in {\"moe\", \"both\"}:\n self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size))\n self.get_initializer()(self.keys, std=dmodel ** -0.5 * weight_scale * self.key_std_correction)\n else:\n self.keys = None\n\n if bias:\n self.bias = torch.nn.Parameter(torch.zeros(self.n_experts, self.expert_size))\n self.o_bias = torch.nn.Parameter(torch.zeros(self.v_dim))\n else:\n self.bias = None\n self.o_bias = None\n\n if self.key_mode in {\"shared\", \"both\"}:\n self.shared_keys = torch.nn.Parameter(torch.empty(self.k_vec_dim, self.expert_size))\n self.get_initializer()(self.shared_keys, std=dmodel ** -0.5 * weight_scale * self.key_std_correction)\n else:\n self.shared_keys = None\n\n self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim))\n\n if self.mlp_selection:\n self.sel = torch.nn.Sequential(\n torch.nn.Linear(self.k_vec_dim, dmodel),\n torch.nn.ReLU(),\n torch.nn.Linear(dmodel, self.n_experts, bias=bias)\n )\n self.get_initializer()(self.sel[0].weight, std=self.k_vec_dim ** -0.5 * weight_scale * self.key_std_correction)\n self.get_initializer()(self.sel[-1].weight, std=dmodel ** -0.5 * weight_scale * self.key_std_correction)\n self.expert_sel = None\n else:\n self.sel = lambda x: F.linear(x, self.expert_sel, self.sel_bias)\n self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim))\n self.sel_bias = torch.nn.Parameter(torch.zeros(self.n_experts)) if sel_bias else None\n\n self.get_initializer()(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_scale)\n\n if init_norm_mode == \"full\":\n real_size = self.size\n elif init_norm_mode == \"selected_experts\":\n real_size = self.expert_size * self.n_heads\n elif init_norm_mode == \"selected_channels\":\n real_size = self.knn\n elif init_norm_mode == \"expert_size\":\n real_size = self.expert_size\n else:\n raise ValueError(\"Unknown init_norm_mode\")\n\n real_size += n_parallel_expert_channels\n\n if expert_size_init:\n real_size = self.expert_size\n\n self.get_initializer()(self.values, std=real_size ** -0.5 * weight_scale)\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.sel_count_log = None\n\n self.register_buffer(\"kv_sel_counts\", torch.zeros(self.n_experts, self.expert_size), persistent=False)\n self.register_buffer(\"kv_sel_counts_100\", torch.zeros_like(self.kv_sel_counts))\n\n if self.rescale_normed and self.sel_norm != \"none\":\n self.sel_scale = torch.nn.Parameter(torch.ones([1]))\n else:\n self.sel_scale = 1.0\n\n if self.norm_expert_scores:\n self.expert_scale = torch.nn.Parameter(torch.full([1], math.sqrt(expert_size)))\n\n self.register_buffer(\"seq\", torch.arange(max(self.knn, self.n_heads, self.n_experts, self.k_dim, self.v_dim), dtype=torch.long), persistent=False)\n self.regroup_weights()\n\n def keys_to_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n k = keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n return k.permute(0, 2, 1).contiguous().view(-1, self.k_vec_dim)\n\n def keys_from_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n return keys.view(self.n_experts, self.expert_size, self.k_vec_dim).permute(0, 2, 1).contiguous().view(self.n_experts * self.k_vec_dim, self.expert_size)\n\n def init_sel(self, x: torch.Tensor):\n if not self.sel_input_cluster_init:\n return\n\n with torch.no_grad():\n from kmeans_pytorch import kmeans\n _, cluster_centers = kmeans(\n X=x, num_clusters=self.n_experts, distance=self.kmeans_distance, device=torch.device('cuda')\n )\n\n self.expert_sel.set_(cluster_centers.to(self.expert_sel.device).contiguous())\n if self.normalize_expert_sel_init:\n self.renorm_keep_std(self.expert_sel, dim=1)\n\n def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):\n with torch.no_grad():\n std = weight.std()\n weight.div_(weight.norm(dim=dim, keepdim=True))\n weight.mul_(std / weight.std())\n\n def regroup_weights(self) -> Optional[torch.Tensor]:\n with torch.no_grad():\n\n if self.norm_key_init:\n self.renorm_keep_std(self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size), dim=1)\n\n if self.norm_value_init:\n self.renorm_keep_std(self.values, dim=1)\n\n if self.identical_init:\n k = self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n self.keys.set_(k[:1].expand_as(k).reshape_as(self.keys))\n\n v = self.values.view(self.n_experts, self.expert_size, self.v_dim)\n self.values.set_(v[:1].expand_as(v).reshape_as(self.values))\n\n ids = None\n if self.weight_grouping != \"none\":\n # self.n_experts * self.k_vec_dim, self.expert_size\n k = self.keys_to_logical_order(self.keys)\n\n from kmeans_pytorch import kmeans\n cluster_ids_x, cluster_centers = kmeans(\n X=k, num_clusters=self.n_experts, distance=self.kmeans_distance, device=torch.device('cuda')\n )\n\n _, ids = cluster_ids_x.sort()\n k = self.keys_from_logical_order(k[ids])\n\n self.keys.set_(k.contiguous())\n self.values.set_(self.values[ids].contiguous())\n if self.weight_grouping == \"keys_and_experts\":\n self.expert_sel.set_(cluster_centers.contiguous().to(self.expert_sel.device))\n else:\n self.get_initializer()(self.expert_sel, std=self.k_vec_dim ** -0.5 * self.weight_scale)\n\n if self.normalize_expert_sel_init:\n self.renorm_keep_std(self.expert_sel, dim=1)\n\n return ids\n\n def patch_optimizer_state(self, optimizer: torch.optim.AdamW, ids: torch.Tensor):\n if self.weight_grouping == \"none\":\n return\n\n with torch.no_grad():\n ks = optimizer.state[self.keys]\n vs = optimizer.state[self.values]\n\n for p in {\"exp_avg\", \"exp_avg_sq\"}:\n k = self.keys_to_logical_order(ks[p])\n ks[p].set_(self.keys_from_logical_order(k[ids]))\n\n vs[p].set_(vs[p][ids])\n\n es = optimizer.state[self.expert_sel]\n for p in {\"exp_avg\", \"exp_avg_sq\", 'step'}:\n es[p].zero_()\n\n def get_initializer(self):\n return torch.nn.init.normal_ if self.custom_init in {0} else utils.init.trunc_normal_\n\n def sparse_matmul(self, indices: torch.Tensor, values: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:\n return F.embedding_bag(indices, weight.type_as(values), per_sample_weights=values, mode=\"sum\", sparse=False)\n\n # def sparse_matmul(self, indices: torch.Tensor, values: torch.Tensor, weight: torch.Tensor) -> torch.Tensor:\n # sin = torch.sparse_csr_tensor(\n # crow_indices=torch.arange(0, values.nelement() + 1, values.shape[-1], device=indices.device),\n # col_indices=indices.flatten(),\n # values=values.flatten(),\n # size=(values.shape[0], weight.shape[0])\n # )\n # return sin @ weight.type_as(values)\n\n def pre_train_forward(self):\n if self.norm_keys:\n with torch.no_grad():\n self.keys.div_(self.keys.norm(dim=-1, keepdim=True))\n\n if self.topk_value_norm_compensation:\n with torch.no_grad():\n self.value_norms = self.values.norm(2, dim=-1)\n\n def topoloss(self, x: torch.Tensor) -> torch.Tensor:\n return (F.mse_loss(x[1:], x[:-1], reduction='mean') +\n F.mse_loss(x[1:], x[:-1], reduction='mean'))\n\n def ani(self, x: torch.Tensor) -> torch.Tensor:\n assert x.ndim == 2\n chunk_size = 32\n\n xnorm = F.normalize(x, 2, dim=-1)\n\n accu = 0\n for i in range(0, x.shape[0], chunk_size):\n a = xnorm[i: i + chunk_size]\n sims = xnorm @ a.T\n sims[i : i + chunk_size].fill_diagonal_(0)\n accu += sims.sum()\n\n return accu / (x.shape[0] * (x.shape[0] - 1))\n\n def log_expert_sel_usage(self, prefix: str, channel_sel_counts: torch.Tensor):\n sel_nonzero = (channel_sel_counts != 0).type(torch.float).sum(axis=-1) / self.expert_size\n self.log(f\"{prefix}/mean\", sel_nonzero.mean())\n self.log(f\"{prefix}/min\", sel_nonzero.min())\n self.log(f\"{prefix}/max\", sel_nonzero.max())\n\n\n def post_train_forward(self):\n if self.training and self.rescale_grads:\n self.values.grad.view(self.n_experts, -1).mul_(self.rescale[:, None])\n self.keys.grad.view(self.n_experts, -1).mul_(self.rescale[:, None])\n self.expert_sel.grad.mul_(self.rescale[:, None])\n\n def pre_train_forward(self):\n if self.training and not self.was_training:\n sorted_counts = self.index_sel_counts.sort(descending=True).values\n self.log(\"test_exert_channel_usage\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n self.layer = 0\n if self.sel_hist:\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n self.reg_counts = 0\n\n def before_loss(self):\n if self.sel_hist:\n # Concatenate against time dimension. Important for the within-batch regularization\n sel = torch.cat(self.sel_hist, -2)\n self.add_perplexity_reg(sel)\n\n self.sel_hist = []\n\n if self.topological_sel_reg > 0:\n self.add_reg(lambda: self.topological_sel_reg * self.topoloss(self.expert_sel))\n\n if self.topological_expert_reg > 0:\n self.add_reg(lambda: self.topological_expert_reg * (\n self.topoloss(self.keys.view(self.n_experts, -1)) +\n self.topoloss(self.values.view(self.n_experts, -1))\n ))\n\n if self.rescale_grads:\n self.rescale = 1.0 / self.index_sel_counts.clamp(min=1)\n\n # json.dumps\n\n\n if self.index_sel_norm > 0:\n if self.training:\n with torch.no_grad():\n self.log(\"usag_rel_perplexity_all_layers\", utils.relative_perplexity(self.index_sel_counts / self.index_sel_norm))\n self.log(\"dead_expert_proportion_all_layers\", (self.index_sel_counts == 0).float().sum() / self.n_experts)\n\n self.log_expert_sel_usage(\"exert_channel_usage\", self.kv_sel_counts)\n\n self.kv_sel_counts_100.add_(self.kv_sel_counts)\n self.kv_sel_counts.zero_()\n\n self.index_sel_counts_100 = self.index_sel_counts_100 + self.index_sel_counts\n self.index_sel_norm_100 = self.index_sel_norm_100 + self.index_sel_norm\n\n if self.training and self.iter % 100 == 0:\n norm_cnt = self.index_sel_counts_100 / self.index_sel_norm_100\n self.log(\"usag_rel_perplexity_100\", utils.relative_perplexity(norm_cnt))\n self.log(\"dead_expert_proportion_100\", (self.index_sel_counts_100 == 0).float().sum() / self.n_experts)\n\n sorted_counts = self.index_sel_counts_100.sort(descending=True).values\n self.log(\"usage_counts_100\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n\n self.log_expert_sel_usage(\"exert_channel_usage_100\", self.kv_sel_counts_100)\n self.kv_sel_counts_100.zero_()\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.log(\"ani/keys\", self.ani(self.keys_to_logical_order(self.keys)))\n self.log(\"ani/values\", self.ani(self.values.flatten(0, -2)))\n self.log(\"ani/expert_sel\", self.ani(self.expert_sel.T))\n\n if self.training:\n self.iter += 1\n\n def topk(self, x: torch.Tensor, k: int, approx: bool) -> Tuple[torch.Tensor, torch.Tensor]:\n if approx:\n x = x.view(*x.shape[:-1], k, -1)\n scores, ind = x.max(-1)\n return scores, self.seq[:k] * x.shape[-1] + ind\n else:\n return x.topk(k, dim=-1, sorted=False)\n\n def add_perplexity_reg(self, sel: torch.Tensor):\n sync_distributed = self.sync_distributed and (self.perplexity_reg_mode not in {\"time\", \"global_time\"})\n\n def log_mean(x: torch.Tensor, dim: int = 0):\n if sync_distributed:\n xlse = framework.utils.distributed_ops.logsumexp(x, dim=dim)\n\n # Normalize\n n = torch.tensor(x.shape[dim]).to(x.device)\n torch.distributed.all_reduce(n, op=torch.distributed.ReduceOp.SUM)\n return xlse - n.log()\n else:\n return x.logsumexp(dim) - math.log(x.shape[dim])\n\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n sel = sel.flatten(0, -3)\n else:\n sel = sel.flatten(0, -2)\n\n # Note: sel are raw logits, no matter what activation is used\n if self.perplexity_reg > 0:\n if self.reg_type == \"perplexity\":\n sel_d = F.log_softmax(sel, dim=-1)\n sel_d = log_mean(sel_d, -2)\n loss = lambda: self.perplexity_reg * ( - utils.relative_perplexity_l(sel_d).mean())\n elif self.reg_type == \"entropy\":\n sel_d = F.log_softmax(sel, dim=-1)\n sel_d = log_mean(sel_d, -2)\n loss = lambda: self.perplexity_reg * ( - utils.entropy_l(sel_d).mean())\n elif self.reg_type == \"variance\":\n if sync_distributed:\n raise NotImplementedError(\"Variance regularization is not supported in distributed mode\")\n avg_sel = sel.mean(-2)\n loss = lambda: self.perplexity_reg * avg_sel.var(-1).mean()\n elif self.reg_type == \"l2\":\n loss = lambda: self.perplexity_reg * sel.pow(2).mean()\n elif self.reg_type == \"switch\":\n if sync_distributed:\n torch.distributed.all_reduce(self.reg_counts, op=torch.distributed.ReduceOp.SUM)\n\n p_sel_real = self.reg_counts / self.reg_counts.sum(-1, keepdims=True)\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n p_sel_real = p_sel_real.unsqueeze(-2)\n\n loss = lambda: self.perplexity_reg * (F.softmax(sel, dim=-1) * p_sel_real).mean()\n self.reg_counts = 0\n else:\n assert False\n\n self.add_reg(loss, \"moe\")\n\n def compute_scores(self, input: torch.Tensor, index: CVMMSel, expert_scores: torch.Tensor, shared_score: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n if self.keys is not None:\n # scores = self.sparse_matmul(\n # (self.seq[:input.shape[-1]] + index[:, None] * (self.k_dim // (2 if self.half_key else 1))),\n # input,\n # self.keys\n # )\n scores = cvmm(input, index, self.keys)\n if self.shared_keys is not None:\n scores = scores + shared_score\n else:\n scores = shared_score\n\n if self.bias is not None:\n scores = scores + self.bias[index.raw_sel]\n\n if self.invisible_selection:\n unmodulated_scores = scores\n scores = scores.detach()\n\n if self.selection_mode in {\"add\"}:\n with torch.no_grad():\n self.log(\"expert_key_positive_rate\", (scores > 0).type_as(scores).mean())\n scores = scores + expert_scores[..., None]\n elif self.selection_mode in {\"mul\"}:\n scores = scores * expert_scores[..., None]\n elif self.selection_mode in {\"gate\", \"sigmoid\", \"gumbel\", \"gumbel_sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"random\", \"modulate\", \"sinkmoid2\"}:\n # Handle it later\n pass\n elif self.selection_mode == \"hard_gumbel\":\n s = (torch.ones_like(expert_scores) - expert_scores).detach() + expert_scores\n scores = scores * s[..., None]\n\n if self.invisible_selection and scores is not unmodulated_scores:\n scores = unmodulated_scores + scores - scores.detach()\n\n scores = self.activation(scores)\n\n if self.norm_expert_scores:\n scores = F.normalize(scores, 1, dim=-1) * self.expert_scale\n\n if self.selection_mode in {\"gate\", \"sigmoid\", \"gumbel\", \"gumbel_sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"modulate\", \"sinkmoid2\"}:\n if self.invisible_selection:\n unmodulated_scores = scores\n scores = scores.detach()\n scores = scores * expert_scores[..., None]\n if self.invisible_selection:\n scores = unmodulated_scores + scores - scores.detach()\n\n if self.train and self.iter % 10 == 0:\n with torch.no_grad():\n gt0 = (scores > 0).float()\n gt0_s = gt0.sum()\n if self.selection_mode in {\"add\"}:\n self.log(\"k1_vs_k2_magnitude\", (scores / expert_scores[..., None]).sum() / gt0_s - 1)\n\n self.log(\"relu_pass_rate\", gt0_s / scores.numel())\n\n self.kv_sel_counts.index_add_(0, index.raw_sel.flatten(), gt0.flatten(end_dim=-2))\n\n\n # elif self.selection_mode in {\"predict_rank\"}:\n # self.add_reg(lambda: self.rank_loss(expert_scores, scores.detach().sum(-1)))\n\n if self.dropout > 0 and self.dropout_mode != \"none\":\n scores = F.dropout(scores, self.dropout, training=self.training)\n\n # indices = torch.arange(0, scores.shape[-1], device=input.device) + index[:, None] * self.expert_size\n return scores\n\n def sel_activation(self, sel: torch.Tensor, seq_len: int) -> Tuple[torch.Tensor, torch.Tensor]:\n reg_sel = sel\n if self.selection_mode in {\"gumbel\", \"hard_gumbel\"}:\n if self.training:\n sel = F.gumbel_softmax(sel)\n else:\n sel = F.softmax(sel)\n elif self.selection_mode == \"gumbel_sigmoid\":\n if self.training and (self.gumbel_decay == 0 or self.gumbel_decay > self.iter):\n noise = gumbel_sigmoid_noise(sel)\n if self.gumbel_decay:\n noise = noise * (1 - self.iter / self.gumbel_decay)\n sel = sel + noise\n else:\n sel = F.sigmoid(sel)\n elif self.selection_mode in {\"sinkhorn\", \"sinkmoid\", \"sinkmax\"}:\n if self.training:\n if self.sinkhorn_local:\n sel = sel.view(-1, seq_len, sel.shape[-1])\n\n for _ in range(self.sinkhorn_n_iters):\n if self.sinkhorn_local or (not self.sync_distributed):\n sel = sel - torch.logsumexp(sel, -2, keepdim=True)\n else:\n sel = sel - framework.utils.distributed_ops.logsumexp(sel, -2, keepdim=True)\n\n sel = sel - torch.logsumexp(sel, -1, keepdim=True)\n reg_sel = sel\n\n if self.sinkhorn_local:\n sel = sel.flatten(end_dim=-2).exp()\n\n sel = sel.exp()\n elif self.selection_mode == \"sinkmoid\":\n sel = F.sigmoid(sel)\n else:\n sel = F.softmax(sel, dim=-1)\n elif self.selection_mode in {\"sinkhorn2\", \"sinkmoid2\", \"sinkmax2\"}:\n if self.training:\n sel = self.sinkhorn(sel, self.selection_mode != \"sinkmoid2\")\n elif self.selection_mode == \"sinkmoid\":\n sel = F.sigmoid(sel)\n else:\n sel = F.softmax(sel, dim=-1)\n elif self.selection_mode in {\"sigmoid\"}:\n sel = torch.sigmoid(sel)\n elif self.selection_mode in {\"modulate\"}:\n sel = torch.tanh(sel) * (self.modulation_amplitude / 0.5) + 1\n elif self.selection_mode in {\"add\"}:\n sel = sel\n elif self.selection_mode in {\"mul\"}:\n sel = sel.abs()\n reg_sel = sel\n elif self.selection_mode in {\"gate\"}:\n sel = F.softmax(sel, dim=-1)\n with torch.no_grad():\n self.log(\"expert_rel_perplexity_per_selection\", utils.relative_perplexity(sel).mean())\n else:\n assert False\n\n return sel, reg_sel\n\n def sinkhorn(self, x: torch.Tensor, normalize:bool = True) -> torch.Tensor:\n # Based on\n A, B = x.shape[-2:]\n\n a = torch.zeros_like(x[..., 0, :])\n b = torch.zeros_like(x[..., 0])\n\n for _ in range(self.sinkhorn_n_iters):\n b = math.log(A) - (x - a[..., None, :]).logsumexp(-1)\n if self.sync_distributed:\n a = math.log(B) - framework.utils.distributed_ops.logsumexp(x - b[..., None], -2)\n else:\n a = math.log(B) - (x - b[..., None]).logsumexp(-2)\n\n r = (a[..., None, :] + b[..., None] + x).exp()\n\n if normalize and self.sync_distributed:\n A = torch.tensor(A, device=x.device)\n A = torch.distributed.reduce_all(A, op=torch.distributed.ReduceOp.SUM)\n A = A.item()\n return (r / (A * B)) if normalize else r\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n if not self.initalized:\n self.init_sel(input)\n self.initalized = True\n\n out = 0\n\n if self.half_key:\n in1 = input[..., :self.k_dim // 2]\n in2 = input[..., self.k_dim // 2:]\n else:\n in1 = in2 = input\n\n if self.selection_mode != \"random\":\n if self.block_expert_sel_in_grad:\n in1 = in1.detach()\n\n sel = self.sel(in1) * self.slope_multiplier\n\n if self.sel_norm == \"cos\":\n sel = sel / (in1.norm(dim=-1, keepdim=True) * self.expert_sel.norm(dim=-1)[None]) * self.sel_scale\n elif self.sel_norm == \"weights\":\n sel = sel * (self.sel_scale / self.expert_sel.norm(dim=-1)[None])\n elif self.sel_norm == \"input\":\n sel = sel * (self.sel_scale / in1.norm(dim=-1, keepdim=True))\n\n sel_raw = reg_sel = sel\n\n inv_val = float(\"-inf\")\n\n if (not self.activation_after_topk) or self.selection_mode in {\"sinkhorn\", \"sinkhorn2\", \"gumbel\", \"hard_gumbel\", \"gumbel_sigmoid\", \"sinkmoid\", \"sinkmax\", \"mul\", \"sinkmoid2\"}:\n # Sinkhorn should be always applied before top-k\n sel, reg_sel = self.sel_activation(sel, input.shape[-2])\n if self.selection_mode not in {\"sinkmoid\", \"sinkmoid2\"}:\n inv_val = 0\n\n if self.training and self.expert_dropout > 0:\n if self.selection_mode not in {\"sigmoid\", \"modulate\", \"gate\", \"sinkmoid\", \"sinkmoid2\"}:\n raise ValueError(\"Expert dropout not supported in this mode\")\n\n mask = torch.rand_like(sel) < self.expert_dropout\n sel2 = sel.masked_fill(mask, inv_val)\n else:\n sel2 = sel\n\n sel_val, sel_index = self.topk(sel2, self.n_heads, self.topk_mode in {\"l1_approx\", \"approx\"})\n\n if self.activation_after_topk or (self.selection_mode in {\"sinkmoid\", \"sinkmax\", \"mul\", \"sinkmoid2\"}) or (self.gumbel_select_only and self.selection_mode in {\"gumbel\", \"hard_gumbel\", \"gumbel_sigmoid\", \"gumbel_sigmoid\", \"sinkmax\"}):\n sel_val = torch.gather(sel_raw, -1, sel_index)\n if self.selection_mode in {\"gumbel_sigmoid\", \"sinkmoid\", \"sinkmoid2\"}:\n sel_val = torch.sigmoid(sel_val)\n elif self.selection_mode in {\"sinkhorn\", \"sinkhorn2\"}:\n # In case of sinkhorn, simulate the effect of post-topk activation by renormalizing\n sel_val = F.normalize(sel_val, p=1, dim=-1)\n else:\n sel_val, reg_sel = self.sel_activation(sel_val, input.shape[-2])\n else:\n sel_index = torch.randint(0, self.n_experts, (*input.shape[:-1], self.n_heads), device=input.device)\n sel_val = torch.ones_like(sel_index, dtype=input.dtype, device=input.device)\n reg_sel = None\n\n\n record_counts_now = (self.training and self.iter % 10 == 0) or (not self.training)\n\n if not self.training:\n sel_index_flat = sel_index.flatten(end_dim=-2)\n if self.coocurence is None:\n self.coocurence = torch.zeros([self.n_experts, self.n_experts], device=sel_index_flat.device, dtype=torch.long)\n\n for h1 in range(self.n_heads):\n for h2 in range(self.n_heads):\n ind_flat = sel_index_flat[..., h1] * self.n_experts + sel_index_flat[..., h2]\n values = torch.tensor([1], device=self.coocurence.device, dtype=self.coocurence.dtype).expand_as(ind_flat)\n # values = sel_val[..., h2].flatten()\n self.coocurence.flatten().put_(ind_flat, values, accumulate=True)\n # self.coocurence[sel_index_flat[..., h1], sel_index_flat[..., h2]] += 1\n\n if record_counts_now or self.reg_type == \"switch\":\n reg_counts = F.one_hot(sel_index, self.n_experts).type_as(input)\n\n if self.reg_type == \"switch\":\n reg_counts2 = reg_counts.view(*input.shape[:-2], input.shape[-2] * self.n_heads, self.n_experts)\n if self.perplexity_reg_mode == \"time\":\n reg_counts2 = reg_counts2.sum(-2)\n else:\n reg_counts2 = reg_counts2.flatten(end_dim=-2).sum(0)\n\n self.reg_counts = self.reg_counts + reg_counts2\n\n if record_counts_now:\n with torch.no_grad():\n sel_counts = reg_counts.flatten(end_dim=-2).sum(0)\n cnt = sel_index.nelement()\n\n p_expert_sel = sel_counts / cnt\n\n self.index_sel_counts = self.index_sel_counts + sel_counts\n self.index_sel_norm = self.index_sel_norm + cnt\n\n if self.training:\n self.log(\"min_sel_score\", sel_val.min(dim=-1).values.mean())\n self.log(\"max_sel_score\", sel_val.max(dim=-1).values.mean())\n\n sel_oh = F.one_hot(sel_index, self.n_experts).sum(-2).bool()\n if self.layer >= 1 and self.training:\n self.log(f\"layer_sel_overlap_{self.layer}\", ((self.prev_sel_oh & sel_oh).sum(-1).float() / self.n_heads).mean())\n\n self.prev_sel_oh = sel_oh\n\n ppl = utils.relative_perplexity(p_expert_sel)\n self.log(\"usage_rel_perplexity\", ppl)\n self.log(\"dead_expert_proportion\", (p_expert_sel == 0).float().sum() / self.n_experts)\n\n if self.perplexity_reg_mode in {\"step\", \"time\"}:\n self.add_perplexity_reg(reg_sel)\n elif self.perplexity_reg > 0 and self.training:\n self.sel_hist.append(reg_sel)\n\n shared_score = (in2 @ self.shared_keys) if self.shared_keys is not None else None\n\n scores_l = []\n\n sel_indices = [cvmm_prepare_sel(sel_index[..., h].int(), self.n_experts) for h in range(sel_index.shape[-1])]\n\n for h in range(sel_index.shape[-1]):\n hi = sel_indices[h]\n\n scores = self.compute_scores(in2, hi, sel_val[..., h], shared_score)\n scores_l.append(scores)\n\n if self.knn > 0 or self.selection_mode == \"classify\":\n with torch.no_grad():\n scores = torch.cat(scores_l, -1)\n\n if self.knn > 0:\n with torch.no_grad():\n tresh = scores.kthvalue(scores.shape[-1] - self.knn, -1).values\n\n scores_l = [s.masked_fill_(s < tresh[:, None], 0) for s in scores_l]\n\n out = 0\n for (hi, scores) in zip(sel_indices, scores_l):\n out = out + cvmm(scores, hi, self.values)\n\n # indices = torch.cat(ind_l, dim=-1)\n # scores = torch.cat(scores_l, dim=-1)\n\n if self.selection_mode == \"classify\":\n self.add_reg(lambda: self.cls_loss(sel_val, scores))\n\n # if self.knn > 0:\n # if self.topk_value_norm_compensation:\n # norms = self.value_norms[None].expand(indices.shape[0], -1).gather(-1, indices)\n # scores2 = scores * norms\n # _, ind2 = self.topk(scores2, self.knn, self.topk_mode == \"approx\")\n # indices = indices.gather(-1, ind2)\n # scores = scores.gather(-1, ind2)\n # else:\n # scores, ind2 = self.topk(scores, self.knn, self.topk_mode == \"approx\")\n # indices = indices.gather(-1, ind2)\n\n # if self.n_random > 0 and self.selection_mode not in {\"predict\", \"classify\"}:\n # with torch.no_grad():\n # rind = torch.arange(0, self.n_experts, device=input.device)\n # rind = torch.masked_select(rind, ~F.one_hot(sel_index, self.n_experts).sum(-2).bool()).view(in_flat.shape[0],-1)\n # rind = rind.gather(-1, torch.randint(0, rind.shape[-1], size=[*rind.shape[:-1], self.n_random], device=rind.device))\n\n # ind_l = [indices]\n # scores_l = [scores]\n # for i in range(self.n_random):\n # hi = rind[..., i]\n # indices, scores = self.compute_scores(in2, hi, sel.gather(-1, hi[:, None]).squeeze(), shared_score)\n\n # ind_l.append(indices)\n # scores_l.append(scores)\n\n # indices = torch.cat(ind_l, dim=-1)\n # scores = torch.cat(scores_l, dim=-1)\n\n # out = self.sparse_matmul(indices, scores, self.values)\n\n self.layer += 1\n\n self.was_training = self.training\n res = out.view(*input.shape[:-1], self.v_dim)\n if self.o_bias is not None:\n res = res + self.o_bias\n return res\n\n def dump_logs(self, save_dir: str):\n if self.coocurence is not None:\n os.makedirs(save_dir, exist_ok=True)\n torch.save(self.coocurence, os.path.join(save_dir, \"coocurence.pt\"))\n\n def get_logs(self) -> Dict[str, Any]:\n res = super().get_logs()\n\n if self.coocurence is not None:\n coo = self.coocurence / self.coocurence.diagonal().clamp(min=1)[:, None]\n res[\"expert_coocurence\"] = framework.visualize.plot.Heatmap(coo, xlabel=\"expert\", ylabel=\"expert\", textval=False)\n self.coocurence = None\n return res" }, { "identifier": "Result", "path": "interfaces/result.py", "snippet": "class Result:\n outputs: torch.Tensor\n loss: torch.Tensor\n\n batch_dim = 0\n\n def plot(self) -> Dict[str, Any]:\n return {}\n\n @property\n def batch_size(self) -> int:\n return self.outputs.shape[self.batch_dim]\n\n @staticmethod\n def merge(l: List, batch_weights: Optional[List[float]] = None):\n if len(l) == 1:\n return l[0]\n batch_weights = batch_weights if batch_weights is not None else [1] * len(l)\n loss = sum([r.loss * w for r, w in zip(l, batch_weights)]) / sum(batch_weights)\n out = torch.stack([r.outputs for r in l], l[0].batch_dim)\n return l[0].__class__(out, loss)" } ]
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_preln_kvmem_transformer import PrelnRelativeKVMemTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.topk_transformer import TopkTransformer from layers.moe_layer import MoE from interfaces import Result
18,811
parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.std_correction", default=False) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.weight_grouping", default="none", choice=["none", "keys_only", "keys_and_experts"]) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.mlp_selection", default=False) parser.add_argument("-moe.block_expert_sel_in_grad", default=False) parser.add_argument("-moe.classification_target", default="sum", choice=["sum", "max"]) parser.add_argument("-moe.recluster_steps", default="", parser=parser.int_list_parser) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-moe.norm_standard_parallel_values", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.topological_sel_reg", default=0.0) parser.add_argument("-moe.topological_expert_reg", default=0.0) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.gumbel_select_only", default=False) parser.add_argument("-moe.topk_value_norm_compensation", default=False) parser.add_argument("-moe.norm_expert_scores", default=False) parser.add_argument("-moe.sel_input_cluster_init", default=False) parser.add_argument("-moe.init_norm_mode", default="full") parser.add_argument("-moe.bias", default=False) parser.add_argument("-moe.sel_bias", default=False) parser.add_argument("-moe.rescale_normed", default=False) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.rescale_grads", default=False) parser.add_argument("-moe.gumbel_decay", default=0) parser.add_argument("-moe.sinkhorn_local", default=False) parser.add_argument("-moe.sinkhron_n_iters", default=3) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.expert_size_init", default=False) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.invisible_selection", default=False) parser.add_argument("-moe.slope_multiplier", default=1.0) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-kvmem.linproj", default=False) parser.add_argument("-kvmem.head_merge_topk", default=False) parser.add_argument("-kvmem.load_balance", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.randomize_indices", default=False) parser.add_argument("-kvmem.standard_parallel", default=False) parser.add_argument("-kvmem.query_bias", default=False) parser.add_argument("-kvmem.approx_topk", default=False) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-kvmem.factorize", default=False) parser.add_argument("-kvmem.full_key", default=False) parser.add_argument("-kvmem.key_redundancy_factor", default=1) parser.add_argument("-kvmem.two_stage", default=False) parser.add_argument("-kvmem.head_exclusive", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.universal.nonshared", default=0) parser.add_argument("-transformer.topk_use_norm", default=True) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-transformer.output_mode", default="normal", choice=["normal", "sum", "geometric", "sigmoid"]) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier), dropout=self.helper.args.dropout, activation=activation ) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_topk"}: mklayer = lambda: TopkTransformer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, k=self.helper.args.transformer.topk_value, use_norm=self.helper.args.transformer.topk_use_norm, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_kvmem"}:
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_keys", default="128", parser=parser.int_list_parser) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-pkm.knn", default=32) parser.add_argument("-pkm.stochastic", default=False) parser.add_argument("-pkm.query_batchnorm", default=False) parser.add_argument("-pkm.custom_init", default=0) parser.add_argument("-pkm.slice_values", default=False) parser.add_argument("-pkm.slice_proj", default=False) parser.add_argument("-pkm.sample_smallest", default=False) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="add", choice=["add", "gate", "sigmoid", "gumbel", "hard_gumbel", "predict", "predict_mlp", "classify", "gumbel_sigmoid", "sinkhorn", "sinkhorn2", "sinkmoid", "sinkmax", "moe", "mul", "random", "sinkmoid2", "sinkmax2", "modulate"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.key_mode", default="moe", choice=["moe", "both", "shared"]) parser.add_argument("-moe.half_key", default=False) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.kmeans_distance", default='cosine', choice=['cosine', 'euclidean']) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.std_correction", default=False) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.weight_grouping", default="none", choice=["none", "keys_only", "keys_and_experts"]) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.mlp_selection", default=False) parser.add_argument("-moe.block_expert_sel_in_grad", default=False) parser.add_argument("-moe.classification_target", default="sum", choice=["sum", "max"]) parser.add_argument("-moe.recluster_steps", default="", parser=parser.int_list_parser) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-moe.norm_standard_parallel_values", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.topological_sel_reg", default=0.0) parser.add_argument("-moe.topological_expert_reg", default=0.0) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.gumbel_select_only", default=False) parser.add_argument("-moe.topk_value_norm_compensation", default=False) parser.add_argument("-moe.norm_expert_scores", default=False) parser.add_argument("-moe.sel_input_cluster_init", default=False) parser.add_argument("-moe.init_norm_mode", default="full") parser.add_argument("-moe.bias", default=False) parser.add_argument("-moe.sel_bias", default=False) parser.add_argument("-moe.rescale_normed", default=False) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.rescale_grads", default=False) parser.add_argument("-moe.gumbel_decay", default=0) parser.add_argument("-moe.sinkhorn_local", default=False) parser.add_argument("-moe.sinkhron_n_iters", default=3) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.expert_size_init", default=False) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.invisible_selection", default=False) parser.add_argument("-moe.slope_multiplier", default=1.0) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-kvmem.linproj", default=False) parser.add_argument("-kvmem.head_merge_topk", default=False) parser.add_argument("-kvmem.load_balance", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.randomize_indices", default=False) parser.add_argument("-kvmem.standard_parallel", default=False) parser.add_argument("-kvmem.query_bias", default=False) parser.add_argument("-kvmem.approx_topk", default=False) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-kvmem.factorize", default=False) parser.add_argument("-kvmem.full_key", default=False) parser.add_argument("-kvmem.key_redundancy_factor", default=1) parser.add_argument("-kvmem.two_stage", default=False) parser.add_argument("-kvmem.head_exclusive", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.universal.nonshared", default=0) parser.add_argument("-transformer.topk_use_norm", default=True) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-transformer.output_mode", default="normal", choice=["normal", "sum", "geometric", "sigmoid"]) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dim_feedforward=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier), dropout=self.helper.args.dropout, activation=activation ) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_topk"}: mklayer = lambda: TopkTransformer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, k=self.helper.args.transformer.topk_value, use_norm=self.helper.args.transformer.topk_use_norm, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_kvmem"}:
mklayer = lambda: PrelnRelativeKVMemTransformerEncoderLayer(
5
2023-10-16 11:26:45+00:00
24k
Jacob-Zhou/gecdi
gec/parser.py
[ { "identifier": "Dataset", "path": "gec/data.py", "snippet": "class Dataset(torch.utils.data.Dataset):\n r\"\"\"\n Dataset that is compatible with :class:`torch.utils.data.Dataset`, serving as a wrapper for manipulating all data fields\n with the operating behaviours defined in :class:`~supar.utils.transform.Transform`.\n The data fields of all the instantiated sentences can be accessed as an attribute of the dataset.\n\n Args:\n transform (Transform):\n An instance of :class:`~supar.utils.transform.Transform` or its derivations.\n The instance holds a series of loading and processing behaviours with regard to the specific data format.\n data (Union[str, Iterable]):\n A filename or a list of instances that will be passed into :meth:`transform.load`.\n cache (bool):\n If ``True``, tries to use the previously cached binarized data for fast loading.\n In this way, sentences are loaded on-the-fly according to the meta data.\n If ``False``, all sentences will be directly loaded into the memory.\n Default: ``False``.\n binarize (bool):\n If ``True``, binarizes the dataset once building it. Only works if ``cache=True``. Default: ``False``.\n bin (str):\n Path for saving binarized files, required if ``cache=True``. Default: ``None``.\n max_len (int):\n Sentences exceeding the length will be discarded. Default: ``None``.\n kwargs (Dict):\n Together with `data`, kwargs will be passed into :meth:`transform.load` to control the loading behaviour.\n\n Attributes:\n transform (Transform):\n An instance of :class:`~supar.utils.transform.Transform`.\n sentences (List[Sentence]):\n A list of sentences loaded from the data.\n Each sentence includes fields obeying the data format defined in ``transform``.\n If ``cache=True``, each is a pointer to the sentence stored in the cache file.\n \"\"\"\n\n def __init__(\n self,\n transform: Transform,\n data: Union[str, Iterable],\n cache: bool = False,\n binarize: bool = False,\n bin: str = None,\n max_len: int = None,\n **kwargs\n ) -> Dataset:\n super(Dataset, self).__init__()\n\n self.transform = transform\n self.data = data\n self.cache = cache\n self.binarize = binarize\n self.bin = bin\n self.max_len = max_len or INF\n self.kwargs = kwargs\n\n if cache:\n if not isinstance(data, str) or not os.path.exists(data):\n raise FileNotFoundError(\"Only files are allowed for binarization, but not found\")\n if self.bin is None:\n self.fbin = data + '.pt'\n else:\n os.makedirs(self.bin, exist_ok=True)\n self.fbin = os.path.join(self.bin, os.path.split(data)[1]) + '.pt'\n if not self.binarize and os.path.exists(self.fbin):\n try:\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n except Exception:\n raise RuntimeError(f\"Error found while debinarizing {self.fbin}, which may have been corrupted. \"\n \"Try re-binarizing it first\")\n else:\n self.sentences = list(transform.load(data, **kwargs))\n\n def __repr__(self):\n s = f\"{self.__class__.__name__}(\"\n s += f\"n_sentences={len(self.sentences)}\"\n if hasattr(self, 'loader'):\n s += f\", n_batches={len(self.loader)}\"\n if hasattr(self, 'buckets'):\n s += f\", n_buckets={len(self.buckets)}\"\n if self.shuffle:\n s += f\", seed={self.seed}\"\n if self.cache:\n s += f\", cache={self.cache}\"\n if self.binarize:\n s += f\", binarize={self.binarize}\"\n if self.max_len < INF:\n s += f\", max_len={self.max_len}\"\n s += \")\"\n return s\n\n def __len__(self):\n return len(self.sentences)\n\n def __getitem__(self, index):\n return debinarize(self.fbin, self.sentences[index]) if self.cache else self.sentences[index]\n\n def __getattr__(self, name):\n if name not in {f.name for f in self.transform.flattened_fields}:\n raise AttributeError\n if self.cache:\n if os.path.exists(self.fbin) and not self.binarize:\n sentences = self\n else:\n sentences = self.transform.load(self.data, **self.kwargs)\n return (getattr(sentence, name) for sentence in sentences)\n return [getattr(sentence, name) for sentence in self.sentences]\n\n def __getstate__(self):\n return self.__dict__\n\n def __setstate__(self, state):\n self.__dict__.update(state)\n\n @lazy_property\n def sizes(self):\n if not self.cache:\n return [s.size for s in self.sentences]\n return debinarize(self.fbin, 'sizes')\n\n def build(\n self,\n batch_size: int,\n n_buckets: int = 1,\n shuffle: bool = False,\n distributed: bool = False,\n n_workers: int = 0,\n pin_memory: bool = True,\n chunk_size: int = 1000,\n seed: int = 1,\n ) -> Dataset:\n # numericalize all fields\n if not self.cache:\n self.sentences = [i for i in self.transform(self.sentences) if len(i) < self.max_len]\n else:\n # if not forced to do binarization and the binarized file already exists, directly load the meta file\n if os.path.exists(self.fbin) and not self.binarize:\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n else:\n @contextmanager\n def cache(sentences):\n ftemp = tempfile.mkdtemp()\n fs = os.path.join(ftemp, 'sentences')\n fb = os.path.join(ftemp, os.path.basename(self.fbin))\n global global_transform\n global_transform = self.transform\n sentences = binarize({'sentences': progress_bar(sentences)}, fs)[1]['sentences']\n try:\n yield ((sentences[s:s+chunk_size], fs, f\"{fb}.{i}\", self.max_len)\n for i, s in enumerate(range(0, len(sentences), chunk_size)))\n finally:\n del global_transform\n shutil.rmtree(ftemp)\n\n def numericalize(sentences, fs, fb, max_len):\n sentences = global_transform((debinarize(fs, sentence) for sentence in sentences))\n sentences = [i for i in sentences if len(i) < max_len]\n return binarize({'sentences': sentences, 'sizes': [sentence.size for sentence in sentences]}, fb)[0]\n\n logger.info(f\"Seeking to cache the data to {self.fbin} first\")\n # numericalize the fields of each sentence\n if is_master():\n with cache(self.transform.load(self.data, **self.kwargs)) as chunks, mp.Pool(32) as pool:\n results = [pool.apply_async(numericalize, chunk) for chunk in chunks]\n self.sentences = binarize((r.get() for r in results), self.fbin, merge=True)[1]['sentences']\n if is_dist():\n dist.barrier()\n if not is_master():\n self.sentences = debinarize(self.fbin, meta=True)['sentences']\n # NOTE: the final bucket count is roughly equal to n_buckets\n self.buckets = dict(zip(*kmeans(self.sizes, n_buckets)))\n self.loader = DataLoader(transform=self.transform,\n dataset=self,\n batch_sampler=Sampler(self.buckets, batch_size, shuffle, distributed, seed=seed),\n num_workers=n_workers,\n collate_fn=collate_fn,\n pin_memory=pin_memory)\n self.seed = seed\n self.shuffle = shuffle\n return self" }, { "identifier": "map_token_ids", "path": "gec/fn.py", "snippet": "def map_token_ids(vocab_0, vocab_1, equal_labels=None):\n \"\"\"\n Map token ids from vocab_0 to vocab_1\n\n Args:\n vocab_0 (dict): vocab_0\n vocab_1 (dict): vocab_1\n equal_labels (dict): equal_labels\n \"\"\"\n if equal_labels is None:\n equal_labels = {}\n return [(i, vocab_1[equal_labels.get(k, k)]) for k, i in vocab_0.items()\n if k in vocab_1]" }, { "identifier": "PerplexityMetric", "path": "gec/metric.py", "snippet": "class PerplexityMetric(Metric):\n def __init__(self,\n loss: Optional[float] = None,\n preds: Optional[torch.Tensor] = None,\n golds: Optional[torch.Tensor] = None,\n mask: Optional[torch.BoolTensor] = None,\n reverse: bool = True,\n eps: float = 1e-12) -> PerplexityMetric:\n super().__init__(reverse=reverse, eps=eps)\n\n self.n_tokens = 0.\n\n self.tp = 0.0\n self.pred = 0.0\n self.gold = 0.0\n\n self.total_loss = 0.\n\n if loss is not None:\n self(loss, preds, golds, mask)\n\n def __repr__(self):\n s = f\"loss: {self.loss:.4f} PPL: {self.ppl:.4f}\"\n if self.tp > 0:\n s += f\" - TGT: P: {self.p:6.2%} R: {self.r:6.2%} F0.5: {self.f:6.2%}\"\n return s\n\n def __call__(self, loss: float, preds: Tuple[List, torch.Tensor],\n golds: Tuple[List, torch.Tensor],\n mask: torch.BoolTensor) -> PerplexityMetric:\n n_tokens = mask.sum().item()\n self.n += len(mask)\n self.count += 1\n self.n_tokens += n_tokens\n self.total_loss += float(loss) * n_tokens\n\n if preds is not None:\n with tempfile.TemporaryDirectory() as t:\n fsrc, fpred, fgold = os.path.join(t, 'src'), os.path.join(\n t, 'pred'), os.path.join(t, 'gold')\n pred_m2, gold_m2 = os.path.join(t, 'pred.m2'), os.path.join(\n t, 'gold.m2')\n with open(fsrc, 'w') as fs, open(fpred, 'w') as f:\n for s, i in preds:\n fs.write(s + '\\n')\n f.write(i + '\\n')\n with open(fgold, 'w') as f:\n for _, i in golds:\n f.write(i + '\\n')\n subprocess.check_output([\n 'errant_parallel', '-orig', f'{fsrc}', '-cor', f'{fpred}',\n '-out', f'{pred_m2}'\n ])\n subprocess.check_output([\n 'errant_parallel', '-orig', f'{fsrc}', '-cor', f'{fgold}',\n '-out', f'{gold_m2}'\n ])\n out = subprocess.check_output(\n [\n 'errant_compare', '-hyp', f'{pred_m2}', '-ref',\n f'{gold_m2}'\n ],\n stderr=subprocess.STDOUT).decode()\n tp, fp, fn = (int(i) for i in out.split('\\n')[3].split()[:3])\n self.tp += tp\n self.pred += tp + fp\n self.gold += tp + fn\n return self\n\n def __add__(self, other: PerplexityMetric) -> PerplexityMetric:\n metric = PerplexityMetric(eps=self.eps)\n metric.n = self.n + other.n\n metric.count = self.count + other.count\n metric.n_tokens = self.n_tokens + other.n_tokens\n metric.total_loss = self.total_loss + other.total_loss\n\n metric.tp = self.tp + other.tp\n metric.pred = self.pred + other.pred\n metric.gold = self.gold + other.gold\n metric.reverse = self.reverse or other.reverse\n return metric\n\n @property\n def score(self):\n return self.f if self.f > 0 else self.ppl\n\n @property\n def loss(self):\n return self.total_loss / self.n_tokens\n\n @property\n def ppl(self):\n return math.pow(2, (self.loss / math.log(2)))\n\n @property\n def p(self):\n return self.tp / (self.pred + self.eps)\n\n @property\n def r(self):\n return self.tp / (self.gold + self.eps)\n\n @property\n def f(self):\n return (1 + 0.5**2) * self.p * self.r / (0.5**2 * self.p + self.r +\n self.eps)" }, { "identifier": "SpanMetric", "path": "gec/metric.py", "snippet": "class SpanMetric(Metric):\n def __init__(self,\n loss: Optional[float] = None,\n preds: Optional[List[List[Tuple]]] = None,\n golds: Optional[List[List[Tuple]]] = None,\n reverse: bool = False,\n beta: Optional[float] = 1.,\n eps: float = 1e-12) -> SpanMetric:\n super().__init__(reverse=reverse, eps=eps)\n\n self.n_ucm = 0.0\n self.n_lcm = 0.0\n self.n_tr = 0.0\n self.n_fr = 0.0\n self.n_e = 0.0\n self.n_c = 0.0\n self.utp = 0.0\n self.ltp = 0.0\n self.pred = 0.0\n self.gold = 0.0\n self.beta = beta\n\n if loss is not None:\n self(loss, preds, golds)\n\n def __repr__(self):\n s = f\"ErrorSents: {self.n_e:6.0f} CorrectSents: {self.n_c:6.0f} TR: {self.tr:7.2%} FR: {self.fr:7.2%} \"\n # s += f\"GoldSpans: {self.gold:6.0f} PredSpans: {self.pred:6.0f} \"\n s += f\"UP: {self.up:7.2%} UR: {self.ur:7.2%} UF{'' if self.beta == 1.0 else self.beta}: {self.uf:7.2%} \"\n s += f\"LP: {self.lp:7.2%} LR: {self.lr:7.2%} LF{'' if self.beta == 1.0 else self.beta}: {self.lf:7.2%}\"\n return s\n\n def __call__(self, loss: float, preds: List[List[Tuple]],\n golds: List[List[Tuple]]) -> SpanMetric:\n self.n += len(preds)\n self.count += 1\n self.total_loss += float(loss)\n for pred, gold in zip(preds, golds):\n upred, ugold = Counter([tuple(span[:-1])\n for span in pred]), Counter(\n [tuple(span[:-1]) for span in gold])\n lpred, lgold = Counter([tuple(span) for span in pred\n ]), Counter([tuple(span) for span in gold])\n utp, ltp = list((upred & ugold).elements()), list(\n (lpred & lgold).elements())\n self.n_ucm += len(utp) == len(pred) == len(gold)\n self.n_lcm += len(ltp) == len(pred) == len(gold)\n self.n_tr += ((len(gold) > 0) and (len(pred) > 0))\n self.n_fr += ((len(gold) == 0) and (len(pred) > 0))\n self.n_e += (len(gold) > 0)\n self.n_c += (len(gold) == 0)\n self.utp += len(utp)\n self.ltp += len(ltp)\n self.pred += len(pred)\n self.gold += len(gold)\n return self\n\n def __add__(self, other: SpanMetric) -> SpanMetric:\n metric = SpanMetric(eps=self.eps, beta=self.beta)\n metric.n = self.n + other.n\n metric.count = self.count + other.count\n metric.total_loss = self.total_loss + other.total_loss\n metric.n_ucm = self.n_ucm + other.n_ucm\n metric.n_lcm = self.n_lcm + other.n_lcm\n metric.n_tr = self.n_tr + other.n_tr\n metric.n_fr = self.n_fr + other.n_fr\n metric.n_e = self.n_e + other.n_e\n metric.n_c = self.n_c + other.n_c\n metric.utp = self.utp + other.utp\n metric.ltp = self.ltp + other.ltp\n metric.pred = self.pred + other.pred\n metric.gold = self.gold + other.gold\n metric.reverse = self.reverse or other.reverse\n return metric\n\n @property\n def score(self):\n return self.lf\n\n @property\n def ucm(self):\n return self.n_ucm / (self.n + self.eps)\n\n @property\n def lcm(self):\n return self.n_lcm / (self.n + self.eps)\n\n @property\n def tr(self):\n return self.n_tr / (self.n_e + self.eps)\n\n @property\n def fr(self):\n return self.n_fr / (self.n_c + self.eps)\n\n @property\n def up(self):\n return self.utp / (self.pred + self.eps)\n\n @property\n def ur(self):\n return self.utp / (self.gold + self.eps)\n\n @property\n def uf(self):\n return (1 + self.beta**2) * self.utp / (self.pred +\n (self.beta**2) * self.gold +\n self.eps)\n\n @property\n def lp(self):\n return self.ltp / (self.pred + self.eps)\n\n @property\n def lr(self):\n return self.ltp / (self.gold + self.eps)\n\n @property\n def lf(self):\n return (1 + self.beta**2) * self.ltp / (self.pred +\n (self.beta**2) * self.gold +\n self.eps)" }, { "identifier": "Seq2SeqDetectModel", "path": "gec/model.py", "snippet": "class Seq2SeqDetectModel(Seq2SeqModel):\n r\"\"\"\n The implementation of Semantic Role Labeling Parser using span-constrained CRF.\n\n Args:\n n_words (int):\n The size of the word vocabulary.\n n_tags (int):\n The number of POS tags, required if POS tag embeddings are used. Default: ``None``.\n n_chars (int):\n The number of characters, required if character-level representations are used. Default: ``None``.\n n_lemmas (int):\n The number of lemmas, required if lemma embeddings are used. Default: ``None``.\n encoder (str):\n Encoder to use.\n ``'lstm'``: BiLSTM encoder.\n ``'bert'``: BERT-like pretrained language model (for finetuning), e.g., ``'bert-base-cased'``.\n Default: ``'lstm'``.\n n_embed (int):\n The size of word embeddings. Default: 100.\n n_pretrained (int):\n The size of pretrained word embeddings. Default: 125.\n n_feat_embed (int):\n The size of feature representations. Default: 100.\n n_char_embed (int):\n The size of character embeddings serving as inputs of CharLSTM, required if using CharLSTM. Default: 50.\n n_char_hidden (int):\n The size of y states of CharLSTM, required if using CharLSTM. Default: 100.\n char_pad_index (int):\n The index of the padding token in the character vocabulary, required if using CharLSTM. Default: 0.\n elmo (str):\n Name of the pretrained ELMo registered in `ELMoEmbedding.OPTION`. Default: ``'original_5b'``.\n elmo_bos_eos (tuple[bool]):\n A tuple of two boolean values indicating whether to keep start/end boundaries of elmo outputs.\n Default: ``(True, False)``.\n bert (str):\n Specifies which kind of language model to use, e.g., ``'bert-base-cased'``.\n This is required if ``encoder='bert'`` or using BERT features. The full list can be found in `transformers`_.\n Default: ``None``.\n n_bert_layers (int):\n Specifies how many last layers to use, required if ``encoder='bert'`` or using BERT features.\n The final outputs would be weighted sum of the y states of these layers.\n Default: 4.\n mix_dropout (float):\n The dropout ratio of BERT layers, required if ``encoder='bert'`` or using BERT features. Default: .0.\n bert_pooling (str):\n Pooling way to get token embeddings.\n ``first``: take the first subtoken. ``last``: take the last subtoken. ``mean``: take a mean over all.\n Default: ``mean``.\n bert_pad_index (int):\n The index of the padding token in BERT vocabulary, required if ``encoder='bert'`` or using BERT features.\n Default: 0.\n freeze (bool):\n If ``True``, freezes BERT parameters, required if using BERT features. Default: ``True``.\n embed_dropout (float):\n The dropout ratio of input embeddings. Default: .2.\n n_encoder_hidden (int):\n The size of LSTM y states. Default: 600.\n n_encoder_layers (int):\n The number of LSTM layers. Default: 3.\n encoder_dropout (float):\n The dropout ratio of encoder layer. Default: .33.\n mlp_dropout (float):\n The dropout ratio of unary edge factor MLP layers. Default: .33.\n pad_index (int):\n The index of the padding token in the word vocabulary. Default: 0.\n unk_index (int):\n The index of the unknown token in the word vocabulary. Default: 1.\n\n .. _transformers:\n https://github.com/huggingface/transformers\n \"\"\"\n\n def __init__(self,\n n_words,\n n_labels,\n n_tags=None,\n n_chars=None,\n n_lemmas=None,\n encoder='lstm',\n n_embed=100,\n n_pretrained=100,\n n_feat_embed=100,\n n_char_embed=50,\n n_char_hidden=100,\n char_pad_index=0,\n char_dropout=0,\n elmo='original_5b',\n elmo_bos_eos=(True, False),\n bert=None,\n n_bert_layers=4,\n mix_dropout=.0,\n bert_pooling='mean',\n bert_pad_index=0,\n freeze=True,\n embed_dropout=.33,\n n_encoder_hidden=1024,\n n_encoder_layers=3,\n encoder_dropout=.1,\n pad_index=0,\n unk_index=1,\n **kwargs):\n super().__init__(**Config().update(locals()))\n\n del self.classifier\n self.error_classifier = nn.Linear(self.model.config.d_model,\n self.args.n_labels)\n self.criterion = CrossEntropyLoss(\n label_smoothing=self.args.label_smoothing)\n\n def loss(self, x, tgt, src_error, tgt_error, src_mask, tgt_mask):\n src_mask = src_mask & True\n tgt_mask = tgt_mask & True\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n\n n_shift = 1 if self.args.encoder == 'transformer' else 2\n y, tgt_mask = y[:, n_shift:], tgt_mask[:, n_shift:]\n\n y = self.decoder_dropout(y)\n # s_src_error = self.error_classifier(x[:, 1:-1])\n s_tgt_error = self.error_classifier(y)\n\n # src_mask = src_mask[:, 2:]\n\n if \"partial\" in self.args.error_schema:\n # src_mask = src_mask & (src_error != self.args.nul_index)\n tgt_mask = tgt_mask & (tgt_error != self.args.nul_index)\n # src_error_loss = self.criterion(s_src_error[src_mask], src_error[src_mask])\n tgt_error_loss = self.criterion(s_tgt_error[tgt_mask],\n tgt_error[tgt_mask])\n # return src_error_loss + tgt_error_loss\n return tgt_error_loss\n\n def decode(self, x, tgt, src_mask, tgt_mask):\n src_mask = src_mask & True\n tgt_mask = tgt_mask & True\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n\n n_shift = 1 if self.args.encoder == 'transformer' else 2\n y, mask = y[:, n_shift:], tgt_mask[:, n_shift:]\n\n s_errors = self.error_classifier(y)\n if \"partial\" in self.args.error_schema:\n s_errors[...,\n self.args.nul_index] = torch.finfo(s_errors.dtype).min\n errors = s_errors.argmax(-1)\n errors[~mask] = -1\n\n return errors" }, { "identifier": "Seq2SeqModel", "path": "gec/model.py", "snippet": "class Seq2SeqModel(Model):\n r\"\"\"\n The implementation of Semantic Role Labeling Parser using span-constrained CRF.\n\n Args:\n n_words (int):\n The size of the word vocabulary.\n n_tags (int):\n The number of POS tags, required if POS tag embeddings are used. Default: ``None``.\n n_chars (int):\n The number of characters, required if character-level representations are used. Default: ``None``.\n n_lemmas (int):\n The number of lemmas, required if lemma embeddings are used. Default: ``None``.\n encoder (str):\n Encoder to use.\n ``'lstm'``: BiLSTM encoder.\n ``'bert'``: BERT-like pretrained language model (for finetuning), e.g., ``'bert-base-cased'``.\n Default: ``'lstm'``.\n n_embed (int):\n The size of word embeddings. Default: 100.\n n_pretrained (int):\n The size of pretrained word embeddings. Default: 125.\n n_feat_embed (int):\n The size of feature representations. Default: 100.\n n_char_embed (int):\n The size of character embeddings serving as inputs of CharLSTM, required if using CharLSTM. Default: 50.\n n_char_hidden (int):\n The size of y states of CharLSTM, required if using CharLSTM. Default: 100.\n char_pad_index (int):\n The index of the padding token in the character vocabulary, required if using CharLSTM. Default: 0.\n elmo (str):\n Name of the pretrained ELMo registered in `ELMoEmbedding.OPTION`. Default: ``'original_5b'``.\n elmo_bos_eos (tuple[bool]):\n A tuple of two boolean values indicating whether to keep start/end boundaries of elmo outputs.\n Default: ``(True, False)``.\n bert (str):\n Specifies which kind of language model to use, e.g., ``'bert-base-cased'``.\n This is required if ``encoder='bert'`` or using BERT features. The full list can be found in `transformers`_.\n Default: ``None``.\n n_bert_layers (int):\n Specifies how many last layers to use, required if ``encoder='bert'`` or using BERT features.\n The final outputs would be weighted sum of the y states of these layers.\n Default: 4.\n mix_dropout (float):\n The dropout ratio of BERT layers, required if ``encoder='bert'`` or using BERT features. Default: .0.\n bert_pooling (str):\n Pooling way to get token embeddings.\n ``first``: take the first subtoken. ``last``: take the last subtoken. ``mean``: take a mean over all.\n Default: ``mean``.\n bert_pad_index (int):\n The index of the padding token in BERT vocabulary, required if ``encoder='bert'`` or using BERT features.\n Default: 0.\n freeze (bool):\n If ``True``, freezes BERT parameters, required if using BERT features. Default: ``True``.\n embed_dropout (float):\n The dropout ratio of input embeddings. Default: .2.\n n_encoder_hidden (int):\n The size of LSTM y states. Default: 600.\n n_encoder_layers (int):\n The number of LSTM layers. Default: 3.\n encoder_dropout (float):\n The dropout ratio of encoder layer. Default: .33.\n mlp_dropout (float):\n The dropout ratio of unary edge factor MLP layers. Default: .33.\n pad_index (int):\n The index of the padding token in the word vocabulary. Default: 0.\n unk_index (int):\n The index of the unknown token in the word vocabulary. Default: 1.\n\n .. _transformers:\n https://github.com/huggingface/transformers\n \"\"\"\n\n def __init__(self,\n n_words,\n n_tags=None,\n n_chars=None,\n n_lemmas=None,\n encoder='lstm',\n n_embed=100,\n n_pretrained=100,\n n_feat_embed=100,\n n_char_embed=50,\n n_char_hidden=100,\n char_pad_index=0,\n char_dropout=0,\n elmo='original_5b',\n elmo_bos_eos=(True, False),\n bert=None,\n n_bert_layers=4,\n mix_dropout=.0,\n bert_pooling='mean',\n bert_pad_index=0,\n freeze=True,\n embed_dropout=.33,\n n_encoder_hidden=512,\n n_encoder_layers=3,\n encoder_dropout=.1,\n pad_index=0,\n unk_index=1,\n **kwargs):\n super().__init__(**Config().update(locals()))\n\n if self.args.encoder == 'transformer':\n self.token_dropout = TokenDropout(self.args.token_dropout)\n self.decoder = TransformerDecoder(\n layer=TransformerDecoderLayer(\n n_heads=self.args.n_decoder_heads,\n n_model=self.args.n_decoder_hidden,\n n_inner=self.args.n_decoder_inner,\n dropout=self.args.decoder_dropout),\n n_layers=self.args.n_decoder_layers)\n\n else:\n from transformers import AutoModel\n self.model = AutoModel.from_pretrained(self.args.bart,\n dropout=self.args.dropout)\n self.encoder, self.decoder = self.model.encoder, self.model.decoder\n self.decoder_dropout = nn.Dropout(self.args.decoder_dropout)\n self.classifier = nn.Linear(self.args.n_encoder_hidden,\n self.args.n_words)\n self.classifier.weight = (self.word_embed.embed\n if self.args.encoder == 'transformer' else\n self.model.shared).weight\n self.criterion = CrossEntropyLoss(\n label_smoothing=self.args.label_smoothing)\n\n def forward(self, words):\n r\"\"\"\n Args:\n words (~torch.LongTensor): ``[batch_size, seq_len]``.\n Word indices.\n\n Returns:\n ~torch.Tensor:\n Representations for the src sentences of the shape ``[batch_size, seq_len, n_model]``.\n \"\"\"\n # we need to do token dropout, so the TranformerWordEmbedding layer is not invoked here\n if self.args.encoder == 'transformer':\n embed = self.token_dropout(self.word_embed.embed(words))\n embed = embed * self.word_embed.embed_scale + self.word_embed.pos_embed(\n embed)\n embed = self.embed_dropout(embed)\n return self.encoder(embed, words.ne(self.args.pad_index))\n else:\n return self.encoder(input_ids=words,\n attention_mask=words.ne(\n self.args.pad_index))[0]\n\n def loss(self, x, tgt, src_mask, tgt_mask):\n if self.args.encoder == 'transformer':\n tgt_mask = tgt_mask[:, 1:]\n shifted, tgt, = tgt[:, :-1], tgt[:, 1:]\n batch_size, seq_len = tgt.shape\n attn_mask = tgt.new_ones(seq_len, seq_len,\n dtype=torch.bool).tril_()\n y = self.decoder(self.embed(shifted), x, tgt_mask, src_mask,\n attn_mask)\n else:\n shifted = torch.full_like(tgt, self.args.eos_index)\n shifted[:, 1:] = tgt[:, :-1]\n y = self.decoder(input_ids=shifted,\n attention_mask=tgt_mask,\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask)[0]\n tgt_mask[:, 0] = 0\n y = self.decoder_dropout(y)\n s_y = self.classifier(y)\n return self.criterion(s_y[tgt_mask], tgt[tgt_mask])\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n # cached cross_attention states don't have to be reordered -> they are always the same\n reordered_past += (tuple(\n past_state.index_select(0, beam_idx)\n for past_state in layer_past), )\n return reordered_past\n\n def decode(self, x, src_mask):\n batch_size, *_ = x.shape\n beam_size, n_words = self.args.beam_size, self.args.n_words\n\n # repeat the src inputs beam_size times\n # [batch_size * beam_size, ...]\n x = x.unsqueeze(1).repeat(1, beam_size, 1, 1).view(-1, *x.shape[1:])\n src_mask = src_mask.unsqueeze(1).repeat(1, beam_size, 1).view(\n -1, *src_mask.shape[1:])\n # initialize the tgt inputs by <bos>\n # [batch_size * beam_size, seq_len]\n tgt = x.new_full((batch_size * beam_size, 1),\n self.args.bos_index,\n dtype=torch.long)\n # [batch_size * beam_size]\n active = src_mask.new_ones(batch_size * beam_size)\n # [batch_size]\n batches = tgt.new_tensor(range(batch_size)) * beam_size\n # accumulated scores\n scores = x.new_full((batch_size, self.args.beam_size),\n MIN).index_fill_(-1, tgt.new_tensor(0), 0).view(-1)\n\n def rank(scores, mask, k):\n scores = scores / mask.sum(-1).unsqueeze(\n -1)**self.args.length_penalty\n return scores.view(batch_size, -1).topk(k, -1)[1]\n\n if self.args.encoder != 'transformer':\n past_key_values = self.decoder(\n input_ids=torch.full_like(tgt[:, :1], self.args.eos_index),\n attention_mask=torch.ones_like(src_mask[:, :1]),\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask,\n past_key_values=None,\n use_cache=True)[1]\n\n for t in range(1, min(self.args.max_len + 1, int(1.8 * x.shape[1]))):\n tgt_mask = tgt.ne(self.args.pad_index)\n if self.args.encoder == 'transformer':\n attn_mask = tgt_mask.new_ones(t, t).tril_()\n s_y = self.decoder(self.embed(tgt[active]), x[active],\n tgt_mask[active], src_mask[active],\n attn_mask)\n # [n_active, n_words]\n s_y = self.classifier(s_y[:, -1]).log_softmax(-1)\n # only allow finished sequences to get <pad>\n # [batch_size * beam_size, n_words]\n s_y = x.new_full((batch_size * beam_size, n_words),\n MIN).masked_scatter_(active.unsqueeze(-1),\n s_y)\n else:\n input_ids = tgt[:, -1:]\n s_y, new_past_key_values = self.decoder(\n input_ids=input_ids,\n attention_mask=torch.cat(\n (torch.ones_like(tgt_mask[:, :1]), tgt_mask), 1),\n encoder_hidden_states=x,\n encoder_attention_mask=src_mask,\n past_key_values=past_key_values,\n use_cache=True)[:2]\n del past_key_values\n past_key_values = new_past_key_values\n # [n_active, n_words]\n s_y = self.classifier(s_y[:, -1]).log_softmax(-1)\n # only allow finished sequences to get <pad>\n s_y[~active] = MIN\n\n s_y[~active, self.args.pad_index] = 0\n\n # [batch_size * beam_size, n_words]\n scores = scores.unsqueeze(-1) + s_y\n # [batch_size, beam_size]\n cands = rank(scores, tgt_mask, beam_size)\n # [batch_size * beam_size]\n scores = scores.view(batch_size, -1).gather(-1, cands).view(-1)\n # beams, tokens = cands // n_words, cands % n_words\n beams, tokens = cands.div(\n n_words, rounding_mode='floor'), (cands % n_words).view(-1, 1)\n indices = (batches.unsqueeze(-1) + beams).view(-1)\n # [batch_size * beam_size, seq_len + 1]\n tgt = torch.cat((tgt[indices], tokens), 1)\n past_key_values = self._reorder_cache(past_key_values, indices)\n active = tokens.ne(\n tokens.new_tensor(\n (self.args.eos_index, self.args.pad_index))).all(-1)\n\n if not active.any():\n break\n cands = rank(scores.view(-1, 1), tgt.ne(self.args.pad_index),\n self.args.topk)\n return tgt[(batches.unsqueeze(-1) + cands).view(-1)].view(\n batch_size, self.args.topk, -1)" }, { "identifier": "Field", "path": "gec/transform.py", "snippet": "class Field(supar.utils.Field):\n r\"\"\"\n Defines a datatype together with instructions for converting to :class:`~torch.Tensor`.\n :class:`Field` models common text processing datatypes that can be represented by tensors.\n It holds a :class:`~supar.utils.vocab.Vocab` object that defines the set of possible values\n for elements of the field and their corresponding numerical representations.\n The :class:`Field` object also holds other parameters relating to how a datatype\n should be numericalized, such as a tokenization method.\n\n Args:\n name (str):\n The name of the field.\n pad_token (str):\n The string token used as padding. Default: ``None``.\n unk_token (str):\n The string token used to represent OOV words. Default: ``None``.\n bos_token (str):\n A token that will be prepended to every example using this field, or ``None`` for no `bos_token`.\n Default: ``None``.\n eos_token (str):\n A token that will be appended to every example using this field, or ``None`` for no `eos_token`.\n lower (bool):\n Whether to lowercase the text in this field. Default: ``False``.\n use_vocab (bool):\n Whether to use a :class:`~supar.utils.vocab.Vocab` object.\n If ``False``, the data in this field should already be numerical.\n Default: ``True``.\n tokenize (function):\n The function used to tokenize strings using this field into sequential examples. Default: ``None``.\n fn (function):\n The function used for preprocessing the examples. Default: ``None``.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n self.padding_side = kwargs.pop('padding_side') if 'padding_side' in kwargs else 'right'\n super().__init__(*args, **kwargs)\n\n def compose(self, batch: Iterable[torch.Tensor]) -> torch.Tensor:\n r\"\"\"\n Composes a batch of sequences into a padded tensor.\n\n Args:\n batch (Iterable[~torch.Tensor]):\n A list of tensors.\n\n Returns:\n A padded tensor converted to proper device.\n \"\"\"\n\n return pad(batch, self.pad_index, padding_side=self.padding_side).to(self.device, non_blocking=True)" }, { "identifier": "Text", "path": "gec/transform.py", "snippet": "class Text(Transform):\n\n fields = ['SRC', 'TGT']\n\n def __init__(\n self,\n SRC: Optional[Union[Field, Iterable[Field]]] = None,\n TGT: Optional[Union[Field, Iterable[Field]]] = None\n ) -> Text:\n super().__init__()\n\n self.SRC = SRC\n self.TGT = TGT\n\n @property\n def src(self):\n return self.SRC,\n\n @property\n def tgt(self):\n return self.TGT,\n\n def load(\n self,\n data: Union[str, Iterable],\n lang: Optional[str] = None,\n **kwargs\n ) -> Iterable[TextSentence]:\n r\"\"\"\n Loads the data in Text-X format.\n Also supports for loading data from Text-U file with comments and non-integer IDs.\n\n Args:\n data (str or Iterable):\n A filename or a list of instances.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n\n Returns:\n A list of :class:`TextSentence` instances.\n \"\"\"\n\n if lang is not None:\n tokenizer = Tokenizer(lang)\n if isinstance(data, str) and os.path.exists(data):\n f = open(data)\n if data.endswith('.txt'):\n lines = (i\n for s in f\n if len(s) > 1\n for i in StringIO((s.split() if lang is None else tokenizer(s)) + '\\n'))\n else:\n lines = f\n else:\n if lang is not None:\n data = [tokenizer(s) for s in ([data] if isinstance(data, str) else data)]\n else:\n data = [data] if isinstance(data[0], str) else data\n lines = (i for s in data for i in StringIO(s + '\\n'))\n\n index, sentence = 0, []\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n sentence = TextSentence(self, sentence, index)\n yield sentence\n index += 1\n sentence = []\n else:\n sentence.append(line)" }, { "identifier": "Tree", "path": "gec/transform.py", "snippet": "class Tree(Transform):\n\n fields = ['SRC', 'TGT', 'SRCERROR', 'TGTERROR']\n\n def __init__(\n self,\n SRC: Optional[Union[Field, Iterable[Field]]] = None,\n TGT: Optional[Union[Field, Iterable[Field]]] = None,\n SRCERROR: Optional[Union[Field, Iterable[Field]]] = None,\n TGTERROR: Optional[Union[Field, Iterable[Field]]] = None,\n **kwargs\n ) -> Tree:\n super().__init__()\n self.error_schema = kwargs.pop('error_schema') if 'error_schema' in kwargs else 'last'\n self.fine_error_type = kwargs.pop('fine_error_type') if 'fine_error_type' in kwargs else False\n\n self.SRC = SRC\n self.TGT = TGT\n self.SRCERROR = SRCERROR\n self.TGTERROR = TGTERROR\n\n @property\n def src(self):\n return self.SRC, self.TGT\n\n @property\n def tgt(self):\n return self.SRCERROR, self.TGTERROR\n\n def load(\n self,\n data: Union[str, Iterable],\n lang: Optional[str] = None,\n **kwargs\n ) -> Iterable[TextSentence]:\n r\"\"\"\n Loads the data in Text-X format.\n Also supports for loading data from Text-U file with comments and non-integer IDs.\n\n Args:\n data (Union[str, Iterable]):\n A filename or a list of instances.\n lang (str):\n Language code (e.g., ``en``) or language name (e.g., ``English``) for the text to tokenize.\n ``None`` if tokenization is not required.\n Default: ``None``.\n\n Returns:\n A list of :class:`TextSentence` instances.\n \"\"\"\n\n if lang is not None:\n tokenizer = Tokenizer(lang)\n if isinstance(data, str) and os.path.exists(data):\n f = open(data)\n if data.endswith('.txt'):\n lines = (i\n for s in f\n if len(s) > 1\n for i in StringIO((s.split() if lang is None else tokenizer(s)) + '\\n'))\n else:\n lines = f\n else:\n if lang is not None:\n data = [tokenizer(s) for s in ([data] if isinstance(data, str) else data)]\n else:\n data = [data] if isinstance(data[0], str) else data\n lines = (i for s in data for i in StringIO(s + '\\n'))\n\n def consume(lines, chunksize=10000):\n index, sentence, chunk = 0, [], []\n for line in lines:\n line = line.strip()\n if len(line) == 0:\n chunk.append((sentence, index))\n if len(chunk) == chunksize:\n yield chunk\n chunk = []\n index += 1\n sentence = []\n else:\n sentence.append(line)\n if len(chunk) > 0:\n yield chunk\n\n @contextmanager\n def cache(lines):\n global global_transform\n global_transform = self\n ftemp = tempfile.mkdtemp()\n fbin = os.path.join(ftemp, 'data')\n try:\n yield ((chunk, f\"{fbin}.{i}\") for i, chunk in enumerate(consume(lines))), fbin\n finally:\n if dist.is_initialized() and not is_master():\n dist.barrier()\n del global_transform\n shutil.rmtree(ftemp)\n\n with cache(lines) as (chunks, fbin):\n if is_master():\n def process(chunk, fb):\n sentences = [TreeSentence(global_transform, *s) for s in progress_bar(chunk)]\n sentences = [s for s in sentences if s.vaild]\n return binarize({'sentences': sentences}, fb)[0]\n with mp.Pool(32) as pool:\n results = [pool.apply_async(process, (chunk, fb)) for chunk, fb in chunks]\n binarize((r.get() for r in results), fbin, merge=True)\n if dist.is_initialized() and not is_master():\n fbin = gather(fbin)[0]\n dist.barrier()\n for s in debinarize(fbin, meta=True)['sentences']:\n yield debinarize(fbin, s)" } ]
import os import shutil import tempfile import math import dill import torch import torch.distributed as dist from datetime import datetime, timedelta from typing import Iterable, Union from gec.data import Dataset from gec.fn import map_token_ids from supar.parser import Parser from supar.utils import Config from supar.utils.common import MIN, NUL, UNK from supar.utils.field import RawField from supar.utils.fn import set_rng_state from supar.utils.logging import get_logger, init_logger, progress_bar from supar.utils.metric import Metric from supar.utils.optim import PolynomialLR from supar.utils.parallel import DistributedDataParallel as DDP, gather, is_dist from supar.utils.parallel import is_master from supar.utils.tokenizer import TransformerTokenizer from supar.utils.transform import AttachJuxtaposeTree, Batch from torch.cuda.amp import GradScaler from torch.optim import AdamW from torch.optim.lr_scheduler import ExponentialLR from torch.nn.functional import embedding from .metric import PerplexityMetric, SpanMetric from .model import Seq2SeqDetectModel, Seq2SeqModel from .transform import Field, Text, Tree from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from torch.distributed.algorithms.ddp_comm_hooks.default_hooks import fp16_compress_hook from transformers import AutoTokenizer, GPT2LMHeadModel
14,429
self.checkpoint_state_dict.pop('optimizer_state_dict')) self.scheduler.load_state_dict( self.checkpoint_state_dict.pop('scheduler_state_dict')) self.scaler.load_state_dict( self.checkpoint_state_dict.pop('scaler_state_dict')) set_rng_state(self.checkpoint_state_dict.pop('rng_state')) for k, v in self.checkpoint_state_dict.items(): setattr(self, k, v) train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def train_step(self, batch: Batch) -> torch.Tensor: src, tgt, _, src_error, _, tgt_error = batch src_mask, tgt_mask = src.ne(self.args.pad_index), tgt.ne( self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_error, tgt_error, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt, _, src_error, tgt_error_raw, tgt_error = batch src_mask, tgt_mask = src.ne(self.args.pad_index), tgt.ne( self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_error, tgt_error, src_mask, tgt_mask) def error_label_factorize(errors): return sum( [[(i, i + 1, e) for e in eb.split("::")] for i, eb in enumerate(errors) if eb not in {'CORRECT', NUL}], []) ged_golds = [error_label_factorize(e) for e in tgt_error_raw] ged_preds = [ error_label_factorize( [self.TGT_ERROR.vocab[i] for i in e if i >= 0]) for e in self.model.decode(x, tgt, src_mask, tgt_mask).tolist() ]
# -*- coding: utf-8 -*- logger = get_logger(__name__) class Seq2SeqParser(Parser): NAME = 'seq2seq' MODEL = Seq2SeqModel def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.SRC = self.transform.SRC self.TGT = self.transform.TGT def train(self, train: Union[str, Iterable], dev: Union[str, Iterable], test: Union[str, Iterable], epochs: int, patience: int, batch_size: int = 5000, update_steps: int = 1, buckets: int = 32, workers: int = 0, clip: float = 5.0, amp: bool = False, cache: bool = False, verbose: bool = True, **kwargs) -> None: args = self.args.update(locals()) init_logger(logger, verbose=args.verbose) self.transform.train() batch_size = batch_size // update_steps if dist.is_initialized(): batch_size = batch_size // dist.get_world_size() logger.info("Loading the data") if args.cache: args.bin = os.path.join(os.path.dirname(args.path), 'bin') train = Dataset(self.transform, args.train, **args).build(batch_size, buckets, True, dist.is_initialized(), workers, chunk_size=args.chunk_size, seed=args.seed) dev = Dataset(self.transform, args.dev, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'train:':6} {train}") if not args.test: logger.info(f"{'dev:':6} {dev}\n") else: test = Dataset(self.transform, args.test, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'dev:':6} {dev}") logger.info(f"{'test:':6} {test}\n") self.optimizer = AdamW(self.model.parameters(), args.lr, (args.mu, args.nu), args.eps, args.weight_decay) steps = len(train.loader) * epochs // args.update_steps self.scheduler = PolynomialLR(self.optimizer, warmup_steps=self.args.warmup_steps, steps=steps) self.scaler = GradScaler(enabled=args.amp) if dist.is_initialized(): self.model = DDP(self.model, device_ids=[args.local_rank], find_unused_parameters=args.get( 'find_unused_parameters', True)) if args.amp: self.model.register_comm_hook(dist.group.WORLD, fp16_compress_hook) self.step, self.epoch, self.best_e, self.patience, self.n_batches = 1, 1, 1, patience, len( train.loader) self.best_metric, self.elapsed = Metric(), timedelta() if self.args.checkpoint: try: self.optimizer.load_state_dict( self.checkpoint_state_dict.pop('optimizer_state_dict')) self.scheduler.load_state_dict( self.checkpoint_state_dict.pop('scheduler_state_dict')) self.scaler.load_state_dict( self.checkpoint_state_dict.pop('scaler_state_dict')) set_rng_state(self.checkpoint_state_dict.pop('rng_state')) for k, v in self.checkpoint_state_dict.items(): setattr(self, k, v) train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def evaluate(self, data: Union[str, Iterable], batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, punct: bool = False, tree: bool = True, proj: bool = False, partial: bool = False, verbose: bool = True, **kwargs): return super().evaluate(**Config().update(locals())) def predict(self, data: Union[str, Iterable], pred: str = None, lang: str = None, prob: bool = False, batch_size: int = 5000, buckets: int = 8, workers: int = 0, amp: bool = False, cache: bool = False, tree: bool = True, proj: bool = False, verbose: bool = True, **kwargs): return super().predict(**Config().update(locals())) def train_step(self, batch: Batch) -> torch.Tensor: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt = batch src_mask, tgt_mask = batch.mask, tgt.ne(self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_mask, tgt_mask) preds = golds = None if self.args.eval_tgt: golds = [(s.values[0], s.values[1]) for s in batch.sentences] preds = [(s.values[0], self.TGT.tokenize.decode(i[0])) for s, i in zip(batch.sentences, self.model.decode(x, batch.mask).tolist()) ] return PerplexityMetric(loss, preds, golds, tgt_mask, not self.args.eval_tgt) @torch.no_grad() def pred_step(self, batch: Batch) -> Batch: src, = batch x = self.model(src) tgt = self.model.decode(x, batch.mask) batch.tgt = [[self.TGT.tokenize.decode(cand) for cand in i] for i in tgt.tolist()] return batch @classmethod def build(cls, path, min_freq=2, fix_len=20, **kwargs): r""" Build a brand-new Parser, including initialization of all data fields and model parameters. Args: path (str): The path of the model to be saved. min_freq (str): The minimum frequency needed to include a token in the vocabulary. Default: 2. fix_len (int): The max length of all subword pieces. The excess part of each piece will be truncated. Required if using CharLSTM/BERT. Default: 20. kwargs (dict): A dict holding the unconsumed arguments. """ args = Config(**locals()) os.makedirs(os.path.dirname(path) or './', exist_ok=True) if os.path.exists(path) and not args.build: return cls.load(**args) logger.info("Building the fields") t = TransformerTokenizer(name=args.bart) SRC = Field('src', pad=t.pad, unk=t.unk, bos=t.bos, eos=t.eos, tokenize=t) TGT = Field('tgt', pad=t.pad, unk=t.unk, bos=t.bos, eos=t.eos, tokenize=t) transform = Text(SRC=SRC, TGT=TGT) # share the vocab SRC.vocab = TGT.vocab = t.vocab args.update({ 'n_words': len(SRC.vocab), 'pad_index': SRC.pad_index, 'unk_index': SRC.unk_index, 'bos_index': SRC.bos_index, 'eos_index': SRC.eos_index }) logger.info(f"{transform}") logger.info("Building the model") model = cls.MODEL(**args) logger.info(f"{model}\n") parser = cls(args, model, transform) parser.model.to(parser.device) return parser class Seq2SeqDetector(Seq2SeqParser): NAME = 'seq2seq' MODEL = Seq2SeqDetectModel def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) self.SRC = self.transform.SRC self.TGT = self.transform.TGT (_, self.TGT_ERROR) = self.transform.TGTERROR def train(self, train: Union[str, Iterable], dev: Union[str, Iterable], test: Union[str, Iterable], epochs: int, patience: int, batch_size: int = 5000, update_steps: int = 1, buckets: int = 32, workers: int = 0, clip: float = 5.0, amp: bool = False, cache: bool = False, verbose: bool = True, **kwargs) -> None: args = self.args.update(locals()) init_logger(logger, verbose=args.verbose) self.transform.train() batch_size = batch_size // update_steps if dist.is_initialized(): batch_size = batch_size // dist.get_world_size() logger.info("Loading the data") if args.cache: if args.bin_path is None: args.bin = os.path.join(os.path.dirname(args.path), 'bin') else: args.bin = args.bin_path train = Dataset(self.transform, args.train, **args).build(batch_size, buckets, True, dist.is_initialized(), workers, chunk_size=args.chunk_size, seed=args.seed) dev = Dataset(self.transform, args.dev, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'train:':6} {train}") if not args.test: logger.info(f"{'dev:':6} {dev}\n") else: test = Dataset(self.transform, args.test, **args).build(batch_size, buckets, False, dist.is_initialized(), workers) logger.info(f"{'dev:':6} {dev}") logger.info(f"{'test:':6} {test}\n") def ged_param(name): if name.startswith("encoder."): return False elif name.startswith("decoder."): return False else: return True no_decay = [] self.optimizer = AdamW([{ 'params': p, 'lr': args.lr * (1 if not ged_param(n) else args.lr_rate), "weight_decay": args.weight_decay if not any(nd in n for nd in no_decay) else 0.0, } for n, p in self.model.named_parameters()], args.lr, (args.mu, args.nu), args.eps, args.weight_decay) self.scheduler = ExponentialLR(self.optimizer, args.decay**(1 / args.decay_steps)) self.scaler = GradScaler(enabled=args.amp) if dist.is_initialized(): self.model = DDP(self.model, device_ids=[args.local_rank], find_unused_parameters=args.get( 'find_unused_parameters', True)) if args.amp: self.model.register_comm_hook(dist.group.WORLD, fp16_compress_hook) self.step, self.epoch, self.best_e, self.patience, self.n_batches = 1, 1, 1, patience, len( train.loader) self.best_metric, self.elapsed = Metric(), timedelta() if self.args.checkpoint: try: self.optimizer.load_state_dict( self.checkpoint_state_dict.pop('optimizer_state_dict')) self.scheduler.load_state_dict( self.checkpoint_state_dict.pop('scheduler_state_dict')) self.scaler.load_state_dict( self.checkpoint_state_dict.pop('scaler_state_dict')) set_rng_state(self.checkpoint_state_dict.pop('rng_state')) for k, v in self.checkpoint_state_dict.items(): setattr(self, k, v) train.loader.batch_sampler.epoch = self.epoch except AttributeError: logger.warning( "No checkpoint found. Try re-launching the traing procedure instead" ) for epoch in range(self.epoch, args.epochs + 1): start = datetime.now() bar, metric = progress_bar(train.loader), Metric() logger.info(f"Epoch {epoch} / {args.epochs}:") self.model.train() if self.epoch == 1: torch.cuda.empty_cache() with self.join(): # we should zero `step` as the number of batches in different processes is not necessarily equal self.step = 0 for batch in bar: with self.sync(): with torch.autocast(self.device, enabled=self.args.amp): loss = self.train_step(batch) self.backward(loss) if self.sync_grad: self.clip_grad_norm_(self.model.parameters(), self.args.clip) self.scaler.step(self.optimizer) self.scaler.update() self.scheduler.step() self.optimizer.zero_grad(True) bar.set_postfix_str( f"lr: {self.scheduler.get_last_lr()[0]:.4e} - loss: {loss:.4f}" ) self.step += 1 logger.info(f"{bar.postfix}") self.model.eval() with self.join(), torch.autocast(self.device, enabled=self.args.amp): metric = self.reduce( sum([self.eval_step(i) for i in progress_bar(dev.loader)], Metric())) logger.info(f"{'dev:':5} {metric}") if args.test: test_metric = sum( [self.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {self.reduce(test_metric)}") t = datetime.now() - start self.epoch += 1 self.patience -= 1 self.elapsed += t if metric > self.best_metric: self.best_e, self.patience, self.best_metric = epoch, patience, metric if is_master(): self.save_checkpoint(args.path) logger.info(f"{t}s elapsed (saved)\n") else: logger.info(f"{t}s elapsed\n") if self.patience < 1: break if dist.is_initialized(): dist.barrier() best = self.load(**args) # only allow the master device to save models if is_master(): best.save(args.path) logger.info(f"Epoch {self.best_e} saved") logger.info(f"{'dev:':5} {self.best_metric}") if args.test: best.model.eval() with best.join(): test_metric = sum( [best.eval_step(i) for i in progress_bar(test.loader)], Metric()) logger.info(f"{'test:':5} {best.reduce(test_metric)}") logger.info(f"{self.elapsed}s elapsed, {self.elapsed / epoch}s/epoch") def train_step(self, batch: Batch) -> torch.Tensor: src, tgt, _, src_error, _, tgt_error = batch src_mask, tgt_mask = src.ne(self.args.pad_index), tgt.ne( self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_error, tgt_error, src_mask, tgt_mask) return loss @torch.no_grad() def eval_step(self, batch: Batch) -> PerplexityMetric: src, tgt, _, src_error, tgt_error_raw, tgt_error = batch src_mask, tgt_mask = src.ne(self.args.pad_index), tgt.ne( self.args.pad_index) x = self.model(src) loss = self.model.loss(x, tgt, src_error, tgt_error, src_mask, tgt_mask) def error_label_factorize(errors): return sum( [[(i, i + 1, e) for e in eb.split("::")] for i, eb in enumerate(errors) if eb not in {'CORRECT', NUL}], []) ged_golds = [error_label_factorize(e) for e in tgt_error_raw] ged_preds = [ error_label_factorize( [self.TGT_ERROR.vocab[i] for i in e if i >= 0]) for e in self.model.decode(x, tgt, src_mask, tgt_mask).tolist() ]
return SpanMetric(loss, ged_preds, ged_golds)
3
2023-10-18 10:55:33+00:00
24k
boppreh/hello_tls
src/hello_tls/protocol.py
[ { "identifier": "Protocol", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Protocol(Enum):\n # Keep protocols in order of preference.\n TLS1_3 = b\"\\x03\\x04\"\n TLS1_2 = b\"\\x03\\x03\"\n TLS1_1 = b\"\\x03\\x02\"\n TLS1_0 = b\"\\x03\\x01\"\n SSLv3 = b\"\\x03\\x00\"\n\n def __repr__(self):\n return self.name\n def __lt__(self, other):\n if self.__class__ != other.__class__:\n return NotImplemented\n return self.value < other.value" }, { "identifier": "RecordType", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class RecordType(Enum):\n INVALID = b'\\x00' # Unused in this script.\n CHANGE_CIPHER_SPEC = b'\\x14' # Unused in this script.\n ALERT = b'\\x15'\n HANDSHAKE = b'\\x16'\n APPLICATION_DATA = b'\\x17' # Unused in this script." }, { "identifier": "HandshakeType", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class HandshakeType(Enum):\n client_hello = b'\\x01'\n server_hello = b'\\x02'\n new_session_ticket = b'\\x04'\n end_of_early_data = b'\\x05'\n encrypted_extensions = b'\\x08'\n certificate = b'\\x0B'\n server_key_exchange = b'\\x0C'\n certificate_request = b'\\x0D'\n server_hello_done = b'\\x0E'\n certificate_verify = b'\\x0F'\n finished = b'\\x14'\n certificate_status = b'\\x16'\n key_update = b'\\x18'\n message_hash = b'\\x19'" }, { "identifier": "CompressionMethod", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CompressionMethod(Enum):\n NULL = b'\\x00'\n DEFLATE = b'\\x01'" }, { "identifier": "CipherSuite", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class CipherSuite(Enum):\n def __repr__(self):\n return self.name\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each cipher suite with the protocols it's supported at.\n # Default to all but TLS 1.3, because that's the most common.\n def __init__(self, _: bytes, protocols: Sequence[Protocol] = (Protocol.SSLv3, Protocol.TLS1_0, Protocol.TLS1_1, Protocol.TLS1_2)):\n self.protocols = protocols\n\n # Pseudo cipher suite, not actually picked.\n #TLS_EMPTY_RENEGOTIATION_INFO_SCSV = b\"\\x00\\xff\"\n\n # TLS 1.3 cipher suites.\n TLS_AES_128_GCM_SHA256 = b\"\\x13\\x01\", (Protocol.TLS1_3,)\n TLS_AES_256_GCM_SHA384 = b\"\\x13\\x02\", (Protocol.TLS1_3,)\n TLS_CHACHA20_POLY1305_SHA256 = b\"\\x13\\x03\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_SHA256 = b\"\\x13\\x04\", (Protocol.TLS1_3,)\n TLS_AES_128_CCM_8_SHA256 = b\"\\x13\\x05\", (Protocol.TLS1_3,)\n\n # Cipher suite that had its number reassigned.\n OLD_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xcc\\x13'\n \n # Cipher suites adapted from IANA assignments:\n # https://www.iana.org/assignments/tls-parameters/tls-parameters.xhtml#tls-parameters-4\n TLS_AEGIS_128L_SHA256 = b'\\x13\\x07' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_AEGIS_256_SHA384 = b'\\x13\\x06' # [draft-irtf-cfrg-aegis-aead-00]\n TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x19' # [RFC4346]\n TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x17' # [RFC4346][RFC6347]\n TLS_DH_anon_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1B' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA = b'\\x00\\x34' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_CBC_SHA256 = b'\\x00\\x6C' # [RFC5246]\n TLS_DH_anon_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA6' # [RFC5288]\n TLS_DH_anon_WITH_AES_256_CBC_SHA = b'\\x00\\x3A' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6D' # [RFC5246]\n TLS_DH_anon_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA7' # [RFC5288]\n TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x46' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5A' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x47' # [RFC6209]\n TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5B' # [RFC6209]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x46' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBF' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x84' # [RFC6367]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x89' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC5' # [RFC5932]\n TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x85' # [RFC6367]\n TLS_DH_anon_WITH_DES_CBC_SHA = b'\\x00\\x1A' # [RFC8996]\n TLS_DH_anon_WITH_RC4_128_MD5 = b'\\x00\\x18' # [RFC5246][RFC6347]\n TLS_DH_anon_WITH_SEED_CBC_SHA = b'\\x00\\x9B' # [RFC4162]\n TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0B' # [RFC4346]\n TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0D' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x30' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3E' # [RFC5246]\n TLS_DH_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA4' # [RFC5288]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x36' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x68' # [RFC5246]\n TLS_DH_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA5' # [RFC5288]\n TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3E' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x58' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3F' # [RFC6209]\n TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x59' # [RFC6209]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x42' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBB' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x82' # [RFC6367]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x85' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC1' # [RFC5932]\n TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x83' # [RFC6367]\n TLS_DH_DSS_WITH_DES_CBC_SHA = b'\\x00\\x0C' # [RFC8996]\n TLS_DH_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x97' # [RFC4162]\n TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x0E' # [RFC4346]\n TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x10' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x31' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3F' # [RFC5246]\n TLS_DH_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA0' # [RFC5288]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x37' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x69' # [RFC5246]\n TLS_DH_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA1' # [RFC5288]\n TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x40' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x54' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x41' # [RFC6209]\n TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x55' # [RFC6209]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x43' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBC' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7E' # [RFC6367]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x86' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC2' # [RFC5932]\n TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7F' # [RFC6367]\n TLS_DH_RSA_WITH_DES_CBC_SHA = b'\\x00\\x0F' # [RFC8996]\n TLS_DH_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x98' # [RFC4162]\n TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x11' # [RFC4346]\n TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x13' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA = b'\\x00\\x32' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 = b'\\x00\\x40' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA2' # [RFC5288]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA = b'\\x00\\x38' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6A' # [RFC5246]\n TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA3' # [RFC5288]\n TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x42' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x56' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x43' # [RFC6209]\n TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x57' # [RFC6209]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x44' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBD' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x80' # [RFC6367]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x87' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC3' # [RFC5932]\n TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x81' # [RFC6367]\n TLS_DHE_DSS_WITH_DES_CBC_SHA = b'\\x00\\x12' # [RFC8996]\n TLS_DHE_DSS_WITH_SEED_CBC_SHA = b'\\x00\\x99' # [RFC4162]\n TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8F' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x90' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB2' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_128_CCM = b'\\xC0\\xA6' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAA' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x91' # [RFC4279]\n TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB3' # [RFC5487]\n TLS_DHE_PSK_WITH_AES_256_CCM = b'\\xC0\\xA7' # [RFC6655]\n TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAB' # [RFC5487]\n TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x66' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6C' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x67' # [RFC6209]\n TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6D' # [RFC6209]\n TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x96' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x90' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x97' # [RFC6367]\n TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x91' # [RFC6367]\n TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAD' # [RFC7905]\n TLS_DHE_PSK_WITH_NULL_SHA = b'\\x00\\x2D' # [RFC4785]\n TLS_DHE_PSK_WITH_NULL_SHA256 = b'\\x00\\xB4' # [RFC5487]\n TLS_DHE_PSK_WITH_NULL_SHA384 = b'\\x00\\xB5' # [RFC5487]\n TLS_DHE_PSK_WITH_RC4_128_SHA = b'\\x00\\x8E' # [RFC4279][RFC6347]\n TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x14' # [RFC4346]\n TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x16' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x33' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x67' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_128_CCM = b'\\xC0\\x9E' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA2' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9E' # [RFC5288]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x39' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x6B' # [RFC5246]\n TLS_DHE_RSA_WITH_AES_256_CCM = b'\\xC0\\x9F' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA3' # [RFC6655]\n TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9F' # [RFC5288]\n TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x44' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x52' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x45' # [RFC6209]\n TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x53' # [RFC6209]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x45' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBE' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7C' # [RFC6367]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x88' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC4' # [RFC5932]\n TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7D' # [RFC6367]\n TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAA' # [RFC7905]\n TLS_DHE_RSA_WITH_DES_CBC_SHA = b'\\x00\\x15' # [RFC8996]\n TLS_DHE_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x9A' # [RFC4162]\n TLS_ECCPWD_WITH_AES_128_CCM_SHA256 = b'\\xC0\\xB2' # [RFC8492]\n TLS_ECCPWD_WITH_AES_128_GCM_SHA256 = b'\\xC0\\xB0' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_CCM_SHA384 = b'\\xC0\\xB3' # [RFC8492]\n TLS_ECCPWD_WITH_AES_256_GCM_SHA384 = b'\\xC0\\xB1' # [RFC8492]\n TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x17' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_128_CBC_SHA = b'\\xC0\\x18' # [RFC8422]\n TLS_ECDH_anon_WITH_AES_256_CBC_SHA = b'\\xC0\\x19' # [RFC8422]\n TLS_ECDH_anon_WITH_NULL_SHA = b'\\xC0\\x15' # [RFC8422]\n TLS_ECDH_anon_WITH_RC4_128_SHA = b'\\xC0\\x16' # [RFC8422][RFC6347]\n TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x03' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x04' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x25' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2D' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x05' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x26' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2E' # [RFC5289]\n TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4A' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5E' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4B' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5F' # [RFC6209]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x74' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x88' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x75' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x89' # [RFC6367]\n TLS_ECDH_ECDSA_WITH_NULL_SHA = b'\\xC0\\x01' # [RFC8422]\n TLS_ECDH_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x02' # [RFC8422][RFC6347]\n TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x0D' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x0E' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x29' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x31' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0F' # [RFC8422]\n TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x2A' # [RFC5289]\n TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x32' # [RFC5289]\n TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4E' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x62' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4F' # [RFC6209]\n TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x63' # [RFC6209]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x78' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8C' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x79' # [RFC6367]\n TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8D' # [RFC6367]\n TLS_ECDH_RSA_WITH_NULL_SHA = b'\\xC0\\x0B' # [RFC8422]\n TLS_ECDH_RSA_WITH_RC4_128_SHA = b'\\xC0\\x0C' # [RFC8422][RFC6347]\n TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x08' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x09' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x23' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM = b'\\xC0\\xAC' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 = b'\\xC0\\xAE' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2B' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x0A' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x24' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM = b'\\xC0\\xAD' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 = b'\\xC0\\xAF' # [RFC7251]\n TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x2C' # [RFC5289]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x48' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x5C' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x49' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x5D' # [RFC6209]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x72' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x86' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x73' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x87' # [RFC6367]\n TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA9' # [RFC7905]\n TLS_ECDHE_ECDSA_WITH_NULL_SHA = b'\\xC0\\x06' # [RFC8422]\n TLS_ECDHE_ECDSA_WITH_RC4_128_SHA = b'\\xC0\\x07' # [RFC8422][RFC6347]\n TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x34' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA = b'\\xC0\\x35' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x37' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_8_SHA256 = b'\\xD0\\x03' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_CCM_SHA256 = b'\\xD0\\x05' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_128_GCM_SHA256 = b'\\xD0\\x01' # [RFC8442]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA = b'\\xC0\\x36' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x38' # [RFC5489]\n TLS_ECDHE_PSK_WITH_AES_256_GCM_SHA384 = b'\\xD0\\x02' # [RFC8442]\n TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x70' # [RFC6209]\n TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x71' # [RFC6209]\n TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x9A' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x9B' # [RFC6367]\n TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAC' # [RFC7905]\n TLS_ECDHE_PSK_WITH_NULL_SHA = b'\\xC0\\x39' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA256 = b'\\xC0\\x3A' # [RFC5489]\n TLS_ECDHE_PSK_WITH_NULL_SHA384 = b'\\xC0\\x3B' # [RFC5489]\n TLS_ECDHE_PSK_WITH_RC4_128_SHA = b'\\xC0\\x33' # [RFC5489][RFC6347]\n TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x12' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x13' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 = b'\\xC0\\x27' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 = b'\\xC0\\x2F' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x14' # [RFC8422]\n TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 = b'\\xC0\\x28' # [RFC5289]\n TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 = b'\\xC0\\x30' # [RFC5289]\n TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x4C' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x60' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x4D' # [RFC6209]\n TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x61' # [RFC6209]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x76' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8A' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x77' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8B' # [RFC6367]\n TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xA8' # [RFC7905]\n TLS_ECDHE_RSA_WITH_NULL_SHA = b'\\xC0\\x10' # [RFC8422]\n TLS_ECDHE_RSA_WITH_RC4_128_SHA = b'\\xC0\\x11' # [RFC8422][RFC6347]\n TLS_GOSTR341112_256_WITH_28147_CNT_IMIT = b'\\xC1\\x02' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_CTR_OMAC = b'\\xC1\\x00' # [RFC9189]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_L = b'\\xC1\\x03' # [RFC9367]\n TLS_GOSTR341112_256_WITH_KUZNYECHIK_MGM_S = b'\\xC1\\x05' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_CTR_OMAC = b'\\xC1\\x01' # [RFC9189]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_L = b'\\xC1\\x04' # [RFC9367]\n TLS_GOSTR341112_256_WITH_MAGMA_MGM_S = b'\\xC1\\x06' # [RFC9367]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 = b'\\x00\\x29' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA = b'\\x00\\x26' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x2A' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA = b'\\x00\\x27' # [RFC2712]\n TLS_KRB5_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x2B' # [RFC2712][RFC6347]\n TLS_KRB5_EXPORT_WITH_RC4_40_SHA = b'\\x00\\x28' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_3DES_EDE_CBC_MD5 = b'\\x00\\x23' # [RFC2712]\n TLS_KRB5_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x1F' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_MD5 = b'\\x00\\x22' # [RFC2712]\n TLS_KRB5_WITH_DES_CBC_SHA = b'\\x00\\x1E' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_MD5 = b'\\x00\\x25' # [RFC2712]\n TLS_KRB5_WITH_IDEA_CBC_SHA = b'\\x00\\x21' # [RFC2712]\n TLS_KRB5_WITH_RC4_128_MD5 = b'\\x00\\x24' # [RFC2712][RFC6347]\n TLS_KRB5_WITH_RC4_128_SHA = b'\\x00\\x20' # [RFC2712][RFC6347]\n TLS_NULL_WITH_NULL_NULL = b'\\x00\\x00' # [RFC5246]\n TLS_PSK_DHE_WITH_AES_128_CCM_8 = b'\\xC0\\xAA' # [RFC6655]\n TLS_PSK_DHE_WITH_AES_256_CCM_8 = b'\\xC0\\xAB' # [RFC6655]\n TLS_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x8B' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x8C' # [RFC4279]\n TLS_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xAE' # [RFC5487]\n TLS_PSK_WITH_AES_128_CCM = b'\\xC0\\xA4' # [RFC6655]\n TLS_PSK_WITH_AES_128_CCM_8 = b'\\xC0\\xA8' # [RFC6655]\n TLS_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xA8' # [RFC5487]\n TLS_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x8D' # [RFC4279]\n TLS_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xAF' # [RFC5487]\n TLS_PSK_WITH_AES_256_CCM = b'\\xC0\\xA5' # [RFC6655]\n TLS_PSK_WITH_AES_256_CCM_8 = b'\\xC0\\xA9' # [RFC6655]\n TLS_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xA9' # [RFC5487]\n TLS_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x64' # [RFC6209]\n TLS_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6A' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x65' # [RFC6209]\n TLS_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6B' # [RFC6209]\n TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x94' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x8E' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x95' # [RFC6367]\n TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x8F' # [RFC6367]\n TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAB' # [RFC7905]\n TLS_PSK_WITH_NULL_SHA = b'\\x00\\x2C' # [RFC4785]\n TLS_PSK_WITH_NULL_SHA256 = b'\\x00\\xB0' # [RFC5487]\n TLS_PSK_WITH_NULL_SHA384 = b'\\x00\\xB1' # [RFC5487]\n TLS_PSK_WITH_RC4_128_SHA = b'\\x00\\x8A' # [RFC4279][RFC6347]\n TLS_RSA_EXPORT_WITH_DES40_CBC_SHA = b'\\x00\\x08' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 = b'\\x00\\x06' # [RFC4346]\n TLS_RSA_EXPORT_WITH_RC4_40_MD5 = b'\\x00\\x03' # [RFC4346][RFC6347]\n TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x93' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA = b'\\x00\\x94' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 = b'\\x00\\xB6' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 = b'\\x00\\xAC' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA = b'\\x00\\x95' # [RFC4279]\n TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 = b'\\x00\\xB7' # [RFC5487]\n TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 = b'\\x00\\xAD' # [RFC5487]\n TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x68' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x6E' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x69' # [RFC6209]\n TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x6F' # [RFC6209]\n TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 = b'\\xC0\\x98' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x92' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 = b'\\xC0\\x99' # [RFC6367]\n TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x93' # [RFC6367]\n TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 = b'\\xCC\\xAE' # [RFC7905]\n TLS_RSA_PSK_WITH_NULL_SHA = b'\\x00\\x2E' # [RFC4785]\n TLS_RSA_PSK_WITH_NULL_SHA256 = b'\\x00\\xB8' # [RFC5487]\n TLS_RSA_PSK_WITH_NULL_SHA384 = b'\\x00\\xB9' # [RFC5487]\n TLS_RSA_PSK_WITH_RC4_128_SHA = b'\\x00\\x92' # [RFC4279][RFC6347]\n TLS_RSA_WITH_3DES_EDE_CBC_SHA = b'\\x00\\x0A' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA = b'\\x00\\x2F' # [RFC5246]\n TLS_RSA_WITH_AES_128_CBC_SHA256 = b'\\x00\\x3C' # [RFC5246]\n TLS_RSA_WITH_AES_128_CCM = b'\\xC0\\x9C' # [RFC6655]\n TLS_RSA_WITH_AES_128_CCM_8 = b'\\xC0\\xA0' # [RFC6655]\n TLS_RSA_WITH_AES_128_GCM_SHA256 = b'\\x00\\x9C' # [RFC5288]\n TLS_RSA_WITH_AES_256_CBC_SHA = b'\\x00\\x35' # [RFC5246]\n TLS_RSA_WITH_AES_256_CBC_SHA256 = b'\\x00\\x3D' # [RFC5246]\n TLS_RSA_WITH_AES_256_CCM = b'\\xC0\\x9D' # [RFC6655]\n TLS_RSA_WITH_AES_256_CCM_8 = b'\\xC0\\xA1' # [RFC6655]\n TLS_RSA_WITH_AES_256_GCM_SHA384 = b'\\x00\\x9D' # [RFC5288]\n TLS_RSA_WITH_ARIA_128_CBC_SHA256 = b'\\xC0\\x3C' # [RFC6209]\n TLS_RSA_WITH_ARIA_128_GCM_SHA256 = b'\\xC0\\x50' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_CBC_SHA384 = b'\\xC0\\x3D' # [RFC6209]\n TLS_RSA_WITH_ARIA_256_GCM_SHA384 = b'\\xC0\\x51' # [RFC6209]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA = b'\\x00\\x41' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 = b'\\x00\\xBA' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 = b'\\xC0\\x7A' # [RFC6367]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA = b'\\x00\\x84' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 = b'\\x00\\xC0' # [RFC5932]\n TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 = b'\\xC0\\x7B' # [RFC6367]\n TLS_RSA_WITH_DES_CBC_SHA = b'\\x00\\x09' # [RFC8996]\n TLS_RSA_WITH_IDEA_CBC_SHA = b'\\x00\\x07' # [RFC8996]\n TLS_RSA_WITH_NULL_MD5 = b'\\x00\\x01' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA = b'\\x00\\x02' # [RFC5246]\n TLS_RSA_WITH_NULL_SHA256 = b'\\x00\\x3B' # [RFC5246]\n TLS_RSA_WITH_RC4_128_MD5 = b'\\x00\\x04' # [RFC5246][RFC6347]\n TLS_RSA_WITH_RC4_128_SHA = b'\\x00\\x05' # [RFC5246][RFC6347]\n TLS_RSA_WITH_SEED_CBC_SHA = b'\\x00\\x96' # [RFC4162]\n TLS_SHA256_SHA256 = b'\\xC0\\xB4' # [RFC9150]\n TLS_SHA384_SHA384 = b'\\xC0\\xB5' # [RFC9150]\n TLS_SM4_CCM_SM3 = b'\\x00\\xC7' # [RFC8998]\n TLS_SM4_GCM_SM3 = b'\\x00\\xC6' # [RFC8998]\n TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1C' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA = b'\\xC0\\x1F' # [RFC5054]\n TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA = b'\\xC0\\x22' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1B' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1E' # [RFC5054]\n TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA = b'\\xC0\\x21' # [RFC5054]\n TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA = b'\\xC0\\x1A' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_128_CBC_SHA = b'\\xC0\\x1D' # [RFC5054]\n TLS_SRP_SHA_WITH_AES_256_CBC_SHA = b'\\xC0\\x20' # [RFC5054]" }, { "identifier": "ExtensionType", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class ExtensionType(Enum):\n server_name = b'\\x00\\x00'\n max_fragment_length = b'\\x00\\x01'\n client_certificate_url = b'\\x00\\x02'\n trusted_ca_keys = b'\\x00\\x03'\n truncated_hmac = b'\\x00\\x04'\n status_request = b'\\x00\\x05'\n user_mapping = b'\\x00\\x06'\n client_authz = b'\\x00\\x07'\n server_authz = b'\\x00\\x08'\n cert_type = b'\\x00\\x09'\n supported_groups = b'\\x00\\x0a'\n ec_point_formats = b'\\x00\\x0b'\n srp = b'\\x00\\x0c'\n signature_algorithms = b'\\x00\\x0d'\n use_srtp = b'\\x00\\x0e'\n heartbeat = b'\\x00\\x0f'\n application_layer_protocol_negotiation = b'\\x00\\x10'\n status_request_v2 = b'\\x00\\x11'\n signed_certificate_timestamp = b'\\x00\\x12'\n client_certificate_type = b'\\x00\\x13'\n server_certificate_type = b'\\x00\\x14'\n padding = b'\\x00\\x15'\n encrypt_then_mac = b'\\x00\\x16'\n extended_master_secret = b'\\x00\\x17'\n token_binding = b'\\x00\\x18'\n cached_info = b'\\x00\\x19'\n tls_lts = b'\\x00\\x1a'\n compress_certificate = b'\\x00\\x1b'\n record_size_limit = b'\\x00\\x1c'\n pwd_protect = b'\\x00\\x1d'\n pwd_clear = b'\\x00\\x1e'\n password_salt = b'\\x00\\x1f'\n ticket_pinning = b'\\x00\\x20'\n tls_cert_with_extern_psk = b'\\x00\\x21'\n delegated_credential = b'\\x00\\x22'\n session_ticket = b'\\x00\\x23'\n TLMSP = b'\\x00\\x24'\n TLMSP_proxying = b'\\x00\\x25'\n TLMSP_delegate = b'\\x00\\x26'\n supported_ekt_ciphers = b'\\x00\\x27'\n pre_shared_key = b'\\x00\\x29'\n early_data = b'\\x00\\x2a'\n supported_versions = b'\\x00\\x2b'\n cookie = b'\\x00\\x2c'\n psk_key_exchange_modes = b'\\x00\\x2d'\n certificate_authorities = b'\\x00\\x2f'\n oid_filters = b'\\x00\\x30'\n post_handshake_auth = b'\\x00\\x31'\n signature_algorithms_cert = b'\\x00\\x32'\n key_share = b'\\x00\\x33'\n transparency_info = b'\\x00\\x34'\n connection_id_deprecated = b'\\x00\\x35'\n connection_id = b'\\x00\\x36'\n external_id_hash = b'\\x00\\x37'\n external_session_id = b'\\x00\\x38'\n quic_transport_parameters = b'\\x00\\x39'\n ticket_request = b'\\x00\\x3a'\n dnssec_chain = b'\\x00\\x3b'\n sequence_number_encryption_algorithms = b'\\x00\\x3c'" }, { "identifier": "Group", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class Group(Enum):\n def __new__(cls, value, *rest, **kwds):\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n # Annotate each group with whether it's a PQ group.\n def __init__(self, _: bytes, is_pq: bool = False):\n self.is_pq = is_pq\n def __repr__(self):\n return self.name\n \n sect163k1 = b'\\x00\\x01'\n sect163r1 = b'\\x00\\x02'\n sect163r2 = b'\\x00\\x03'\n sect193r1 = b'\\x00\\x04'\n sect193r2 = b'\\x00\\x05'\n sect233k1 = b'\\x00\\x06'\n sect233r1 = b'\\x00\\x07'\n sect239k1 = b'\\x00\\x08'\n sect283k1 = b'\\x00\\x09'\n sect283r1 = b'\\x00\\x0a'\n sect409k1 = b'\\x00\\x0b'\n sect409r1 = b'\\x00\\x0c'\n sect571k1 = b'\\x00\\x0d'\n sect571r1 = b'\\x00\\x0e'\n secp160k1 = b'\\x00\\x0f'\n secp160r1 = b'\\x00\\x10'\n secp160r2 = b'\\x00\\x11'\n secp192k1 = b'\\x00\\x12'\n secp192r1 = b'\\x00\\x13'\n secp224k1 = b'\\x00\\x14'\n secp224r1 = b'\\x00\\x15'\n secp256k1 = b'\\x00\\x16'\n secp256r1 = b'\\x00\\x17'\n secp384r1 = b'\\x00\\x18'\n secp521r1 = b'\\x00\\x19'\n brainpoolP256r1 = b'\\x00\\x1a'\n brainpoolP384r1 = b'\\x00\\x1b'\n brainpoolP512r1 = b'\\x00\\x1c'\n x25519 = b'\\x00\\x1d'\n x448 = b'\\x00\\x1e'\n brainpoolP256r1tls13 = b'\\x00\\x1f'\n brainpoolP384r1tls13 = b'\\x00\\x20'\n brainpoolP512r1tls13 = b'\\x00\\x21'\n GC256A = b'\\x00\\x22'\n GC256B = b'\\x00\\x23'\n GC256C = b'\\x00\\x24'\n GC256D = b'\\x00\\x25'\n GC512A = b'\\x00\\x26'\n GC512B = b'\\x00\\x27'\n GC512C = b'\\x00\\x28'\n curveSM2 = b'\\x00\\x29'\n ffdhe2048 = b'\\x01\\x00'\n ffdhe3072 = b'\\x01\\x01'\n ffdhe4096 = b'\\x01\\x02'\n ffdhe6144 = b'\\x01\\x03'\n ffdhe8192 = b'\\x01\\x04'\n arbitrary_explicit_prime_curves = b'\\xff\\x01'\n arbitrary_explicit_char2_curves = b'\\xff\\x02'\n\n # Somewhat common post-quantum groups, not yet standardized:\n X25519Kyber768Draft00 = b'\\x63\\x99', True\n X25519Kyber768Draft00_obsolete = b'\\xfe\\x31', True\n X25519Kyber512Draft00 = b'\\xfe\\x30', True\n SecP256r1Kyber768Draft00 = b'\\x63\\x9a', True\n\n # Long list of unusual post-quantum groups from liboqs:\n # https://github.com/open-quantum-safe/oqs-provider/blob/main/ALGORITHMS.md?plain=1#L13\n frodo640aes = b'\\x02\\x00', True\n p256_frodo640aes = b'\\x2F\\x00', True\n x25519_frodo640aes = b'\\x2F\\x80', True\n frodo640shake = b'\\x02\\x01', True\n p256_frodo640shake = b'\\x2F\\x01', True\n x25519_frodo640shake = b'\\x2F\\x81', True\n frodo976aes = b'\\x02\\x02', True\n p384_frodo976aes = b'\\x2F\\x02', True\n x448_frodo976aes = b'\\x2F\\x82', True\n frodo976shake = b'\\x02\\x03', True\n p384_frodo976shake = b'\\x2F\\x03', True\n x448_frodo976shake = b'\\x2F\\x83', True\n frodo1344aes = b'\\x02\\x04', True\n p521_frodo1344aes = b'\\x2F\\x04', True\n frodo1344shake = b'\\x02\\x05', True\n p521_frodo1344shake = b'\\x2F\\x05', True\n kyber512 = b'\\x02\\x3A', True\n p256_kyber512 = b'\\x2F\\x3A', True\n x25519_kyber512 = b'\\x2F\\x39', True\n kyber768 = b'\\x02\\x3C', True\n p384_kyber768 = b'\\x2F\\x3C', True\n x448_kyber768 = b'\\x2F\\x90', True\n kyber1024 = b'\\x02\\x3D', True\n p521_kyber1024 = b'\\x2F\\x3D', True\n bikel1 = b'\\x02\\x41', True\n p256_bikel1 = b'\\x2F\\x41', True\n x25519_bikel1 = b'\\x2F\\xAE', True\n bikel3 = b'\\x02\\x42', True\n p384_bikel3 = b'\\x2F\\x42', True\n x448_bikel3 = b'\\x2F\\xAF', True\n bikel5 = b'\\x02\\x43', True\n p521_bikel5 = b'\\x2F\\x43', True\n hqc128 = b'\\x02\\x2C', True\n p256_hqc128 = b'\\x2F\\x2C', True\n x25519_hqc128 = b'\\x2F\\xAC', True\n hqc192 = b'\\x02\\x2D', True\n p384_hqc192 = b'\\x2F\\x2D', True\n x448_hqc192 = b'\\x2F\\xAD', True\n hqc256 = b'\\x02\\x2E', True\n p521_hqc256 = b'\\x2F\\x2E', True\n dilithium2 = b'\\xfe\\xa0', True\n p256_dilithium2 = b'\\xfe\\xa1', True\n rsa3072_dilithium2 = b'\\xfe\\xa2', True\n dilithium3 = b'\\xfe\\xa3', True\n p384_dilithium3 = b'\\xfe\\xa4', True\n dilithium5 = b'\\xfe\\xa5', True\n p521_dilithium5 = b'\\xfe\\xa6', True\n falcon512 = b'\\xfe\\xae', True\n p256_falcon512 = b'\\xfe\\xaf', True\n rsa3072_falcon512 = b'\\xfe\\xb0', True\n falcon1024 = b'\\xfe\\xb1', True\n p521_falcon1024 = b'\\xfe\\xb2', True\n sphincssha2128fsimple = b'\\xfe\\xb3', True\n p256_sphincssha2128fsimple = b'\\xfe\\xb4', True\n rsa3072_sphincssha2128fsimple = b'\\xfe\\xb5', True\n sphincssha2128ssimple = b'\\xfe\\xb6', True\n p256_sphincssha2128ssimple = b'\\xfe\\xb7', True\n rsa3072_sphincssha2128ssimple = b'\\xfe\\xb8', True\n sphincssha2192fsimple = b'\\xfe\\xb9', True\n p384_sphincssha2192fsimple = b'\\xfe\\xba', True\n sphincssha2192ssimple = b'\\xfe\\xbb', True\n p384_sphincssha2192ssimple = b'\\xfe\\xbc', True\n sphincssha2256fsimple = b'\\xfe\\xbd', True\n p521_sphincssha2256fsimple = b'\\xfe\\xbe', True\n sphincssha2256ssimple = b'\\xfe\\xc0', True\n p521_sphincssha2256ssimple = b'\\xfe\\xc1', True\n sphincsshake128fsimple = b'\\xfe\\xc2', True\n p256_sphincsshake128fsimple = b'\\xfe\\xc3', True\n rsa3072_sphincsshake128fsimple = b'\\xfe\\xc4', True\n sphincsshake128ssimple = b'\\xfe\\xc5', True\n p256_sphincsshake128ssimple = b'\\xfe\\xc6', True\n rsa3072_sphincsshake128ssimple = b'\\xfe\\xc7', True\n sphincsshake192fsimple = b'\\xfe\\xc8', True\n p384_sphincsshake192fsimple = b'\\xfe\\xc9', True\n sphincsshake192ssimple = b'\\xfe\\xca', True\n p384_sphincsshake192ssimple = b'\\xfe\\xcb', True\n sphincsshake256fsimple = b'\\xfe\\xcc', True\n p521_sphincsshake256fsimple = b'\\xfe\\xcd', True\n sphincsshake256ssimple = b'\\xfe\\xce', True\n p521_sphincsshake256ssimple = b'\\xfe\\xcf', True" }, { "identifier": "AlertLevel", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class AlertLevel(Enum):\n \"\"\" Different alert levels that can be sent by the server. \"\"\"\n WARNING = b'\\x01'\n FATAL = b'\\x02'" }, { "identifier": "AlertDescription", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class AlertDescription(Enum):\n \"\"\" Different alert messages that can be sent by the server. \"\"\"\n close_notify = b'\\x00'\n unexpected_message = b'\\x0a'\n bad_record_mac = b'\\x14'\n record_overflow = b'\\x16'\n handshake_failure = b'\\x28'\n bad_certificate = b'\\x2a'\n unsupported_certificate = b'\\x2b'\n certificate_revoked = b'\\x2c'\n certificate_expired = b'\\x2d'\n certificate_unknown = b'\\x2e'\n illegal_parameter = b'\\x2f'\n unknown_ca = b'\\x30'\n access_denied = b'\\x31'\n decode_error = b'\\x32'\n decrypt_error = b'\\x33'\n protocol_version = b'\\x46'\n insufficient_security = b'\\x47'\n internal_error = b'\\x50'\n inappropriate_fallback = b'\\x56'\n user_canceled = b'\\x5a'\n missing_extension = b'\\x6d'\n unsupported_extension = b'\\x6e'\n unrecognized_name = b'\\x70'\n bad_certificate_status_response = b'\\x71'\n unknown_psk_identity = b'\\x73'\n certificate_required = b'\\x74'\n no_application_protocol = b'\\x78'" }, { "identifier": "PskKeyExchangeMode", "path": "src/hello_tls/names_and_numbers.py", "snippet": "class PskKeyExchangeMode(Enum):\n psk_ke = b'\\x00'\n psk_dhe_ke = b'\\x01'" } ]
from typing import Iterator, List, Sequence, Optional, Iterable, Callable, Tuple from contextlib import contextmanager from dataclasses import dataclass from .names_and_numbers import Protocol, RecordType, HandshakeType, CompressionMethod, CipherSuite, ExtensionType, Group, AlertLevel, AlertDescription, PskKeyExchangeMode import logging
14,587
logger = logging.getLogger(__name__) class ScanError(Exception): """ Base error class for errors that occur during scanning. """ pass class ServerAlertError(ScanError):
logger = logging.getLogger(__name__) class ScanError(Exception): """ Base error class for errors that occur during scanning. """ pass class ServerAlertError(ScanError):
def __init__(self, level: AlertLevel, description: AlertDescription):
8
2023-10-21 02:00:13+00:00
24k
zhaojw1998/AccoMontage-3
arrangement_utils.py
[ { "identifier": "split_phrases", "path": "piano_arranger/acc_utils.py", "snippet": "def split_phrases(segmentation):\n \"\"\"Split a phrase label string into individual phrase meta info\"\"\"\n if '\\n' not in segmentation:\n segmentation += '\\n'\n phrases = []\n lengths = []\n current = 0\n while segmentation[current] != '\\n':\n if segmentation[current].isalpha():\n j = 1\n while not (segmentation[current + j].isalpha() or segmentation[current + j] == '\\n'):\n j += 1\n phrases.append(segmentation[current])\n lengths.append(int(segmentation[current+1: current+j]))\n current += j\n return [(phrases[i], lengths[i], sum(lengths[:i])) for i in range(len(phrases))] " }, { "identifier": "DisentangleVAE", "path": "piano_arranger/models/Poly_Dis.py", "snippet": "class DisentangleVAE(PytorchModel):\n\n def __init__(self, name, device, chd_encoder, rhy_encoder, decoder,\n chd_decoder):\n super(DisentangleVAE, self).__init__(name, device)\n self.chd_encoder = chd_encoder\n self.rhy_encoder = rhy_encoder\n self.decoder = decoder\n self.num_step = self.decoder.num_step\n self.chd_decoder = chd_decoder\n\n def confuse_prmat(self, pr_mat):\n non_zero_ent = torch.nonzero(pr_mat.long())\n eps = torch.randint(0, 2, (non_zero_ent.size(0),))\n eps = ((2 * eps) - 1).long()\n confuse_ent = torch.clamp(non_zero_ent[:, 2] + eps, min=0, max=127)\n pr_mat[non_zero_ent[:, 0], non_zero_ent[:, 1], confuse_ent] = \\\n pr_mat[non_zero_ent[:, 0], non_zero_ent[:, 1], non_zero_ent[:, 2]]\n return pr_mat\n\n def get_chroma(self, pr_mat):\n bs = pr_mat.size(0)\n pad = torch.zeros(bs, 32, 4).to(self.device)\n pr_mat = torch.cat([pr_mat, pad], dim=-1)\n c = pr_mat.view(bs, 32, -1, 12).contiguous()\n c = c.sum(dim=-2) # (bs, 32, 12)\n c = c.view(bs, 8, 4, 12)\n c = c.sum(dim=-2).float()\n c = torch.log(c + 1)\n return c.to(self.device)\n\n def run(self, x, c, pr_mat, tfr1, tfr2, tfr3, confuse=True):\n embedded_x, lengths = self.decoder.emb_x(x)\n # cc = self.get_chroma(pr_mat)\n dist_chd = self.chd_encoder(c)\n # pr_mat = self.confuse_prmat(pr_mat)\n dist_rhy = self.rhy_encoder(pr_mat)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n pitch_outs, dur_outs = self.decoder(dec_z, False, embedded_x,\n lengths, tfr1, tfr2)\n recon_root, recon_chroma, recon_bass = self.chd_decoder(z_chd, False,\n tfr3, c)\n return pitch_outs, dur_outs, dist_chd, dist_rhy, recon_root, \\\n recon_chroma, recon_bass\n\n def loss_function(self, x, c, recon_pitch, recon_dur, dist_chd,\n dist_rhy, recon_root, recon_chroma, recon_bass,\n beta, weights, weighted_dur=False):\n recon_loss, pl, dl = self.decoder.recon_loss(x, recon_pitch, recon_dur,\n weights, weighted_dur)\n kl_loss, kl_chd, kl_rhy = self.kl_loss(dist_chd, dist_rhy)\n chord_loss, root, chroma, bass = self.chord_loss(c, recon_root,\n recon_chroma,\n recon_bass)\n loss = recon_loss + beta * kl_loss + chord_loss\n return loss, recon_loss, pl, dl, kl_loss, kl_chd, kl_rhy, chord_loss, \\\n root, chroma, bass\n\n def chord_loss(self, c, recon_root, recon_chroma, recon_bass):\n loss_fun = nn.CrossEntropyLoss()\n root = c[:, :, 0: 12].max(-1)[-1].view(-1).contiguous()\n chroma = c[:, :, 12: 24].long().view(-1).contiguous()\n bass = c[:, :, 24:].max(-1)[-1].view(-1).contiguous()\n\n recon_root = recon_root.view(-1, 12).contiguous()\n recon_chroma = recon_chroma.view(-1, 2).contiguous()\n recon_bass = recon_bass.view(-1, 12).contiguous()\n root_loss = loss_fun(recon_root, root)\n chroma_loss = loss_fun(recon_chroma, chroma)\n bass_loss = loss_fun(recon_bass, bass)\n chord_loss = root_loss + chroma_loss + bass_loss\n return chord_loss, root_loss, chroma_loss, bass_loss\n\n def kl_loss(self, *dists):\n # kl = kl_with_normal(dists[0])\n kl_chd = kl_with_normal(dists[0])\n kl_rhy = kl_with_normal(dists[1])\n kl_loss = kl_chd + kl_rhy\n return kl_loss, kl_chd, kl_rhy\n\n def loss(self, x, c, pr_mat, dt_x, tfr1=0., tfr2=0., tfr3=0., beta=0.1, weights=(1, 0.5)):\n #print(pr_mat.shape, dt_x.shape)\n outputs = self.run(x, c, pr_mat, tfr1, tfr2, tfr3)\n loss = self.loss_function(x, c, *outputs, beta, weights)\n return loss\n\n # def inference(self, c, pr_mat):\n # self.eval()\n # with torch.no_grad():\n # dist_chd = self.chd_encoder(c)\n # # pr_mat = self.confuse_prmat(pr_mat)\n # dist_rhy = self.rhy_encoder(pr_mat)\n # z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n # dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n # pitch_outs, dur_outs = self.decoder(dec_z, True, None,\n # None, 0., 0.)\n # est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)\n # return est_x\n #\n # def swap(self, c1, c2, pr_mat1, pr_mat2, fix_rhy, fix_chd):\n # pr_mat = pr_mat1 if fix_rhy else pr_mat2\n # c = c1 if fix_chd else c2\n # est_x = self.inference(c, pr_mat)\n # return est_x\n\n def inference_encode(self, pr_mat, c):\n self.eval()\n with torch.no_grad():\n dist_chd = self.chd_encoder(c)\n dist_rhy = self.rhy_encoder(pr_mat)\n return dist_chd, dist_rhy\n\n def inference_decode(self, z_chd, z_rhy):\n self.eval()\n with torch.no_grad():\n dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n pitch_outs, dur_outs = self.decoder(dec_z, True, None,\n None, 0., 0.)\n est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)\n return est_x\n\n def inference(self, pr_mat, c, sample):\n self.eval()\n with torch.no_grad():\n dist_chd = self.chd_encoder(c)\n dist_rhy = self.rhy_encoder(pr_mat)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], sample)\n dec_z = torch.cat([z_chd, z_rhy], dim=-1)\n pitch_outs, dur_outs = self.decoder(dec_z, True, None,\n None, 0., 0.)\n est_x, _, _ = self.decoder.output_to_numpy(pitch_outs, dur_outs)\n return est_x\n\n def swap(self, pr_mat1, pr_mat2, c1, c2, fix_rhy, fix_chd):\n pr_mat = pr_mat1 if fix_rhy else pr_mat2\n c = c1 if fix_chd else c2\n est_x = self.inference(pr_mat, c, sample=False)\n return est_x\n\n def posterior_sample(self, pr_mat, c, scale=None, sample_chd=True,\n sample_txt=True):\n if scale is None and sample_chd and sample_txt:\n est_x = self.inference(pr_mat, c, sample=True)\n else:\n dist_chd, dist_rhy = self.inference_encode(pr_mat, c)\n if scale is not None:\n mean_chd = dist_chd.mean\n mean_rhy = dist_rhy.mean\n # std_chd = torch.ones_like(dist_chd.mean) * scale\n # std_rhy = torch.ones_like(dist_rhy.mean) * scale\n std_chd = dist_chd.scale * scale\n std_rhy = dist_rhy.scale * scale\n dist_rhy = Normal(mean_rhy, std_rhy)\n dist_chd = Normal(mean_chd, std_chd)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n if not sample_chd:\n z_chd = dist_chd.mean\n if not sample_txt:\n z_rhy = dist_rhy.mean\n est_x = self.inference_decode(z_chd, z_rhy)\n return est_x\n\n def prior_sample(self, x, c, sample_chd=False, sample_rhy=False,\n scale=1.):\n dist_chd, dist_rhy = self.inference_encode(x, c)\n mean = torch.zeros_like(dist_rhy.mean)\n loc = torch.ones_like(dist_rhy.mean) * scale\n if sample_chd:\n dist_chd = Normal(mean, loc)\n if sample_rhy:\n dist_rhy = Normal(mean, loc)\n z_chd, z_rhy = get_zs_from_dists([dist_chd, dist_rhy], True)\n return self.inference_decode(z_chd, z_rhy)\n\n def gt_sample(self, x):\n out = x[:, :, 1:].numpy()\n return out\n\n def interp(self, pr_mat1, c1, pr_mat2, c2, interp_chd=False,\n interp_rhy=False, int_count=10):\n dist_chd1, dist_rhy1 = self.inference_encode(pr_mat1, c1)\n dist_chd2, dist_rhy2 = self.inference_encode(pr_mat2, c2)\n [z_chd1, z_rhy1, z_chd2, z_rhy2] = \\\n get_zs_from_dists([dist_chd1, dist_rhy1, dist_chd2, dist_rhy2],\n False)\n if interp_chd:\n z_chds = self.interp_z(z_chd1, z_chd2, int_count)\n else:\n z_chds = z_chd1.unsqueeze(1).repeat(1, int_count, 1)\n if interp_rhy:\n z_rhys = self.interp_z(z_rhy1, z_rhy2, int_count)\n else:\n z_rhys = z_rhy1.unsqueeze(1).repeat(1, int_count, 1)\n bs = z_chds.size(0)\n z_chds = z_chds.view(bs * int_count, -1).contiguous()\n z_rhys = z_rhys.view(bs * int_count, -1).contiguous()\n estxs = self.inference_decode(z_chds, z_rhys)\n return estxs.reshape((bs, int_count, 32, 15, -1))\n\n def interp_z(self, z1, z2, int_count=10):\n z1 = z1.numpy()\n z2 = z2.numpy()\n zs = torch.stack([self.interp_path(zz1, zz2, int_count)\n for zz1, zz2 in zip(z1, z2)], dim=0)\n return zs\n\n def interp_path(self, z1, z2, interpolation_count=10):\n result_shape = z1.shape\n z1 = z1.reshape(-1)\n z2 = z2.reshape(-1)\n\n def slerp2(p0, p1, t):\n omega = np.arccos(\n np.dot(p0 / np.linalg.norm(p0), p1 / np.linalg.norm(p1)))\n so = np.sin(omega)\n return np.sin((1.0 - t) * omega)[:, None] / so * p0[\n None] + np.sin(\n t * omega)[:, None] / so * p1[None]\n\n percentages = np.linspace(0.0, 1.0, interpolation_count)\n\n normalized_z1 = z1 / np.linalg.norm(z1)\n normalized_z2 = z2 / np.linalg.norm(z2)\n dirs = slerp2(normalized_z1, normalized_z2, percentages)\n length = np.linspace(np.log(np.linalg.norm(z1)),\n np.log(np.linalg.norm(z2)),\n interpolation_count)\n out = (dirs * np.exp(length[:, None])).reshape(\n [interpolation_count] + list(result_shape))\n # out = np.array([(1 - t) * z1 + t * z2 for t in percentages])\n return torch.from_numpy(out).to(self.device).float()\n\n @staticmethod\n def init_model(device=None, chd_size=256, txt_size=256, num_channel=10):\n name = 'disvae'\n if device is None:\n device = torch.device('cuda' if torch.cuda.is_available()\n else 'cpu')\n # chd_encoder = RnnEncoder(36, 1024, 256)\n chd_encoder = RnnEncoder(36, 1024, chd_size)\n # rhy_encoder = TextureEncoder(256, 1024, 256)\n rhy_encoder = TextureEncoder(256, 1024, txt_size, num_channel)\n # pt_encoder = PtvaeEncoder(device=device, z_size=152)\n # chd_decoder = RnnDecoder(z_dim=256)\n chd_decoder = RnnDecoder(z_dim=chd_size)\n # pt_decoder = PtvaeDecoder(note_embedding=None,\n # dec_dur_hid_size=64, z_size=512)\n pt_decoder = PtvaeDecoder(note_embedding=None,\n dec_dur_hid_size=64,\n z_size=chd_size + txt_size)\n\n model = DisentangleVAE(name, device, chd_encoder,\n rhy_encoder, pt_decoder, chd_decoder)\n return model" }, { "identifier": "find_by_length", "path": "piano_arranger/AccoMontage.py", "snippet": "def find_by_length(melody_data, acc_data, chord_data, velocity_data, cc_data, length):\n \"\"\"Search from POP909 phrase data for a certain phrase length.\"\"\"\n melody_record = []\n acc_record = []\n chord_record = []\n velocity_record = []\n cc_record = []\n song_reference = []\n for song_idx in range(acc_data.shape[0]):\n for phrase_idx in range(len(acc_data[song_idx])):\n melody = melody_data[song_idx][phrase_idx]\n if not melody.shape[0] == length * 16:\n continue\n if np.sum(melody[:, :128]) <= 2:\n continue\n melody_record.append(melody)\n acc = acc_data[song_idx][phrase_idx]\n acc_record.append(acc)\n chord = chord_data[song_idx][phrase_idx]\n chord_record.append(chord)\n velocity = velocity_data[song_idx][phrase_idx]\n velocity_record.append(velocity)\n cc = cc_data[song_idx][phrase_idx]\n cc_record.append(cc)\n song_reference.append((song_idx, phrase_idx))\n return np.array(melody_record), np.array(acc_record), np.array(chord_record), np.array(velocity_record), np.array(cc_record), song_reference" }, { "identifier": "dp_search", "path": "piano_arranger/AccoMontage.py", "snippet": "def dp_search(query_phrases, seg_query, acc_pool, edge_weights, texture_filter=None, filter_id=None, spotlights=None, randomness=0):\n \"\"\"Search for texture donors based on dynamic programming.\n * query_phrases: lead sheet in segmented phrases. Shape of each phrase: (T, 142), quantized at 1/4-beat level. This format is defined in R. Yang et al., \"Deep music analogy via latent representation disentanglement,\" ISMIR 2019.\n * seg_query: phrase annotation for the lead sheet. Format of each phrase: (label, length, start). For example, seg_query=[('A', 8, 0), ('A', 8, 8), ('B', 4, 16)].\n * acc_pool: search space for piano texture donors.\n * edge_weights: pre-computed transition scores for texture donor i to i+1.\n * texture_filter: filter on voice number (VN) and rhythmic density (RD).\n * filter_id: specified VN abd RD to filter for the first phrase.\n * spotlights: specified a preference for certain songs and/or artists for the search process.\n * randomness: degree of randomness tobe introduced to the search process.\n \"\"\"\n seg_query = [item[0] + str(item[1]) for item in seg_query] #['A8', 'A8', 'B8', 'B8']\n #Searching for phrase 1\n query_length = [query_phrases[i].shape[0]//16 for i in range(len(query_phrases))]\n mel, acc, chord, _, _, song_ref = acc_pool[query_length[0]]\n mel_set = mel\n rhy_set = np.concatenate((np.sum(mel_set[:, :, :128], axis=-1, keepdims=True), mel_set[:, :, 128: 130]), axis=-1)\n query_rhy = np.concatenate((np.sum(query_phrases[0][:, : 128], axis=-1, keepdims=True), query_phrases[0][:, 128: 130]), axis=-1)[np.newaxis, :, :]\n rhythm_result = cosine_rhy(query_rhy+1e-5, rhy_set+1e-5)\n\n chord_set = chord\n chord_set, num_total, shift_const = chord_shift(chord_set)\n chord_set_TIV = computeTIV(chord_set)\n query_chord = query_phrases[0][:, 130:][::4]\n query_chord_TIV = computeTIV(query_chord)[np.newaxis, :, :]\n chord_score, arg_chord = cosine(query_chord_TIV, chord_set_TIV)\n\n score = .5*rhythm_result + .5*chord_score\n score += randomness * np.random.normal(0, 1, size=len(score)) #to introduce some randomness\n if spotlights is not None:\n for spot_idx in spotlights:\n for ref_idx, ref_item in enumerate(song_ref):\n if ref_item[0] == spot_idx: \n score[ref_idx] += 1\n if filter_id is not None:\n mask = texture_filter[query_length[0]][0][filter_id[0]] * texture_filter[query_length[0]][1][filter_id[1]] - 1\n score += mask\n\n path = [[(i, score[i])] for i in range(acc.shape[0])]\n shift = [[shift_const[i]] for i in arg_chord]\n melody_record = np.argmax(mel_set, axis=-1)\n record = []\n\n #Searching for phrase 2, 3, ...\n for i in tqdm(range(1, len(query_length))):\n mel, acc, chord, _, _, song_ref = acc_pool[query_length[i]]\n weight_key = f\"l_{str(query_length[i-1]).zfill(2)}_{str(query_length[i]).zfill(2)}\"\n contras_result = edge_weights[weight_key]\n if query_length[i-1] == query_length[i]:\n for j in range(contras_result.shape[0]):\n contras_result[j, j] = -1 #the ith phrase does not transition to itself at i+1\n for k in range(j-1, -1, -1):\n if song_ref[k][0] != song_ref[j][0]:\n break\n contras_result[j, k] = -1 #ith phrase does not transition to its ancestors in the same song.\n if i > 1:\n contras_result = contras_result[[item[-1][1] for item in record]]\n if spotlights is not None:\n for spot_idx in spotlights:\n for ref_idx, ref_item in enumerate(song_ref):\n if ref_item[0] == spot_idx:\n contras_result[:, ref_idx] += 1\n mel_set = mel\n rhy_set = np.concatenate((np.sum(mel_set[:, :, :128], axis=-1, keepdims=True), mel_set[:, :, 128: 130]), axis=-1)\n query_rhy = np.concatenate((np.sum(query_phrases[i][:, : 128], axis=-1, keepdims=True), query_phrases[i][:, 128: 130]), axis=-1)[np.newaxis, :, :]\n rhythm_result = cosine_rhy(query_rhy, rhy_set)\n chord_set = chord\n chord_set, num_total, shift_const = chord_shift(chord_set)\n chord_set_TIV = computeTIV(chord_set)\n query_chord = query_phrases[i][:, 130:][::4]\n query_chord_TIV = computeTIV(query_chord)[np.newaxis, :, :]\n chord_score, arg_chord = cosine(query_chord_TIV, chord_set_TIV)\n sim_this_layer = .5*rhythm_result + .5*chord_score\n sim_this_layer += randomness * np.random.normal(0, 1, size=len(sim_this_layer))\n if spotlights is not None:\n for spot_idx in spotlights:\n for ref_idx, ref_item in enumerate(song_ref):\n if ref_item[0] == spot_idx: \n sim_this_layer[ref_idx] += 1\n score_this_layer = .7*contras_result + .3*np.tile(sim_this_layer[np.newaxis, :], (contras_result.shape[0], 1)) + np.tile(score[:, np.newaxis], (1, contras_result.shape[1]))\n melody_flat = np.argmax(mel_set, axis=-1)\n if seg_query[i] == seg_query[i-1]:\n melody_pre = melody_record\n matrix = np.matmul(melody_pre, np.transpose(melody_flat, (1, 0))) / (np.linalg.norm(melody_pre, axis=-1)[:, np.newaxis]*(np.linalg.norm(np.transpose(melody_flat, (1, 0)), axis=0))[np.newaxis, :])\n if i == 1:\n for k in range(matrix.shape[1]):\n matrix[k, :k] = -1\n else:\n for k in range(len(record)):\n matrix[k, :record[k][-1][1]] = -1\n matrix = (matrix > 0.99) * 1.\n score_this_layer += matrix\n topk = 1\n args = np.argsort(score_this_layer, axis=0)[::-1, :][:topk, :]\n record = []\n for j in range(args.shape[-1]):\n for k in range(args.shape[0]):\n record.append((score_this_layer[args[k, j], j], (args[k, j], j)))\n shift_this_layer = [[shift_const[k]] for k in arg_chord]\n new_path = [path[item[-1][0]] + [(item[-1][1], sim_this_layer[item[-1][1]])] for item in record]\n new_shift = [shift[item[-1][0]] + shift_this_layer[item[-1][1]] for item in record]\n melody_record = melody_flat[[item[-1][1] for item in record]]\n path = new_path\n shift = new_shift\n score = np.array([item[0] for item in record])\n\n arg = score.argsort()[::-1]\n return [path[arg[i]] for i in range(topk)], [shift[arg[i]] for i in range(topk)]" }, { "identifier": "re_harmonization", "path": "piano_arranger/AccoMontage.py", "snippet": "def re_harmonization(lead_sheet, chord_table, query_phrases, indices, shifts, acc_pool, model, get_est=True, tempo=120):\n \"\"\"Re-harmonize the accompaniment texture donors and save in MIDI.\n * lead_sheet: the conditional lead sheet. Its melody track will be taken. Shape: (T, 142), quantized at 1-beat level. This format is defined in R. Yang et al., \"Deep music analogy via latent representation disentanglement,\" ISMIR 2019.\n * chord_table: the conditional chord progression from the lead sheet. Shape: (T', 36), quantized at 1-beat level. This format is defined in Z. Wang et al., \"Learning interpretable representation for controllable polyphonic music generation,\" ISMIR 2020.\n * seg_query: phrase annotation for the lead sheet. Format of each phrase: (label, length, start). For example, seg_query=[('A', 8, 0), ('A', 8, 8), ('B', 4, 16)].\n * indices: the indices of selected texture donor phrases in the acc_pool.\n * shifts: pitch transposition of each selected phrase.\n * acc_pool: search space for piano texture donors.\n * tempo: the tempo to render the piece.\n \"\"\"\n acc_roll = np.empty((0, 128))\n vel_roll = []\n phrase_mean_vel = []\n cc_roll = np.empty((0, 128))\n #retrive texture donor data of the corrresponding indices from the acc_pool\n for i, idx in enumerate(indices):\n length = query_phrases[i][-2]\n shift = shifts[i]\n # notes\n acc_matrix = np.roll(acc_pool[length][1][idx[0]], shift, axis=-1)\n acc_roll = np.concatenate((acc_roll, acc_matrix), axis=0)\n #MIDI velocity\n vel_matrix = np.roll(acc_pool[length][3][idx[0]], shift, axis=-1)\n phrase_mean_vel.append(np.mean(np.ma.masked_equal(vel_matrix, value=0)))\n vel_roll.append(vel_matrix)\n #MIDI control messages (mainly for pedals)\n cc_matrix = acc_pool[length][4][idx[0]]\n cc_roll = np.concatenate((cc_roll, cc_matrix), axis=0)\n # normalize the scale of velocity across different retrieved phrases\n global_mean_vel = np.mean(np.ma.masked_equal(np.concatenate(vel_roll, axis=0), value=0))\n for i in range(len(vel_roll)):\n vel_roll[i][vel_roll[i] > 0] += (global_mean_vel - phrase_mean_vel[i])\n vel_roll = np.concatenate(vel_roll, axis=0)\n #re-harmonization\n if len(acc_roll) % 32 != 0:\n pad_len = (len(acc_roll)//32+1)*32 - len(acc_roll)\n acc_roll = np.pad(acc_roll, ((0, pad_len), (0, 0)))\n vel_roll = np.pad(vel_roll, ((0, pad_len), (0, 0)))\n cc_roll = np.pad(cc_roll, ((0, pad_len), (0, 0)), mode='constant', constant_values=-1)\n chord_table = np.pad(chord_table, ((0, pad_len//4), (0, 0)))\n chord_table[-pad_len:, 0] = -1\n chord_table[-pad_len:, -1] = -1\n acc_roll = acc_roll.reshape(-1, 32, 128)\n chord_table = chord_table.reshape(-1, 8, 36)\n acc_roll = torch.from_numpy(acc_roll).float().cuda()\n acc_roll = torch.clip(acc_roll, min=0, max=31)\n gt_chord = torch.from_numpy(chord_table).float().cuda()\n est_x = model.inference(acc_roll, gt_chord, sample=False)\n acc_roll = cvt.grid2pr(est_x.reshape(-1, 15, 6))\n #interpolate MIDI velocity\n adapt_vel_roll = np.zeros(vel_roll.shape)\n masked_dyn_matrix = np.ma.masked_equal(vel_roll, value=0)\n mean = np.mean(masked_dyn_matrix, axis=-1)\n onsets = np.nonzero(mean.data)\n dynamic = mean.data[onsets]\n onsets = onsets[0].tolist()\n dynamic = dynamic.tolist()\n if not 0 in onsets:\n onsets = [0] + onsets\n dynamic = [dynamic[0]] + dynamic\n if not len(vel_roll)-1 in onsets:\n onsets = onsets + [len(vel_roll)-1]\n dynamic = dynamic + [dynamic[-1]]\n dyn_curve = interp1d(onsets, dynamic)\n for t, p in zip(*np.nonzero(acc_roll)):\n adapt_vel_roll[t, p] = dyn_curve(t)\n adapt_vel_roll = np.clip(adapt_vel_roll, a_min=0, a_max=127)\n #reconstruct MIDI\n accompaniment = np.stack([acc_roll, adapt_vel_roll, cc_roll], axis=-1)[np.newaxis, :, :, :]\n midi_recon = cvt.matrix2midi_with_dynamics(accompaniment, programs=[0], init_tempo=tempo)\n melody_track = cvt.melody_matrix2data(melody_matrix=lead_sheet[:, :130], tempo=tempo)\n midi_recon.instruments = [melody_track] + midi_recon.instruments\n if get_est:\n return midi_recon, est_x\n else:\n return midi_recon" }, { "identifier": "get_texture_filter", "path": "piano_arranger/AccoMontage.py", "snippet": "def get_texture_filter(acc_pool):\n \"\"\"Divide accompaniment texture donors into fifths in terms of voice number (VN) and rhythmic density (RD).\"\"\"\n texture_filter = {}\n for key in acc_pool:\n acc_track = acc_pool[key][1]\n # CALCULATE HORIZONTAL DENSITY (rhythmic density)\n onset_positions = (np.sum(acc_track, axis=-1) > 0) * 1.\n HD = np.sum(onset_positions, axis=-1) / acc_track.shape[1] #(N)\n # CALCULATE VERTICAL DENSITY (voice number)\n beat_positions = acc_track[:, ::4, :]\n downbeat_positions = acc_track[:, ::16, :]\n upbeat_positions = acc_track[:, 2::4, :]\n\n simu_notes_on_beats = np.sum((beat_positions > 0) * 1., axis=-1) #N*T\n simu_notes_on_downbeats = np.sum((downbeat_positions > 0) * 1., axis=-1)\n simu_notes_on_upbeats = np.sum((upbeat_positions > 0) * 1., axis=-1)\n\n VD_beat = np.sum(simu_notes_on_beats, axis=-1) / (np.sum((simu_notes_on_beats > 0) * 1., axis=-1) + 1e-10)\n VD_upbeat = np.sum(simu_notes_on_upbeats, axis=-1) / (np.sum((simu_notes_on_upbeats > 0) * 1., axis=-1) + 1e-10)\n\n VD = np.max(np.stack((VD_beat, VD_upbeat), axis=-1), axis=-1)\n #get five-equal-divident-points of HD\n dst = np.sort(HD)\n HD_anchors = [dst[len(dst) // 5], dst[len(dst) // 5 * 2], dst[len(dst) // 5 * 3], dst[len(dst) // 5 * 4]]\n HD_Bins = [\n HD < HD_anchors[0],\n (HD >= HD_anchors[0]) * (HD < HD_anchors[1]),\n (HD >= HD_anchors[1]) * (HD < HD_anchors[2]),\n (HD >= HD_anchors[2]) * (HD < HD_anchors[3]),\n HD >= HD_anchors[3]\n ]\n #get five-equal-divident-points of VD\n dst = np.sort(VD)\n VD_anchors = [dst[len(dst) // 5], dst[len(dst) // 5 * 2], dst[len(dst) // 5 * 3], dst[len(dst) // 5 * 4]]\n VD_Bins = [\n VD < VD_anchors[0],\n (VD >= VD_anchors[0]) * (VD < VD_anchors[1]),\n (VD >= VD_anchors[1]) * (VD < VD_anchors[2]),\n (VD >= VD_anchors[2]) * (VD < VD_anchors[3]),\n VD >= VD_anchors[3]\n ]\n texture_filter[key] = (HD_Bins, VD_Bins) #((5, N), (5, N))\n return texture_filter" }, { "identifier": "ref_spotlight", "path": "piano_arranger/AccoMontage.py", "snippet": "def ref_spotlight(ref_name_list, reference_check):\n \"\"\"convert spotlight song/artist names into the indices of corresponding pieces in the dataset.\"\"\"\n if ref_name_list is None:\n return None\n check_idx = []\n #POP909 song_id\n for name in ref_name_list:\n line = reference_check[reference_check.song_id == name]\n if not line.empty:\n check_idx.append(line.index)#read by pd, neglect first row, index starts from 0.\n #song name\n for name in ref_name_list:\n line = reference_check[reference_check.name == name]\n if not line.empty:\n check_idx.append(line.index)#read by pd, neglect first row, index starts from 0.\n #artist name\n for name in ref_name_list:\n line = reference_check[reference_check.artist == name]\n if not line.empty:\n check_idx += list(line.index)#read by pd, neglect first row, index starts from 0\n return check_idx" }, { "identifier": "Slakh2100_Pop909_Dataset", "path": "orchestrator/QA_dataset.py", "snippet": "class Slakh2100_Pop909_Dataset(Dataset):\n def __init__(self, slakh_dir, pop909_dir, sample_len=SAMPLE_LEN, hop_len=BAR_HOP_LEN, debug_mode=False, split='train', mode='train', with_dynamics=False, merge_pop909=0):\n super(Slakh2100_Pop909_Dataset, self).__init__()\n self.split = split\n self.mode = mode\n self.debug_mode = debug_mode\n\n self.with_dynamics = with_dynamics\n self.merge_pop909 = merge_pop909\n\n self.memory = dict({'tracks': [],\n 'programs': [],\n 'dynamics': [],\n 'dir': []\n })\n self.anchor_list = []\n self.sample_len = sample_len\n \n if slakh_dir is not None:\n print('loading Slakh2100 Dataset ...')\n self.load_data(slakh_dir, sample_len, hop_len)\n if pop909_dir is not None:\n print('loading Pop909 Dataset ...')\n self.load_data(pop909_dir, sample_len, hop_len)\n\n def __len__(self):\n return len(self.anchor_list)\n \n def __getitem__(self, idx):\n song_id, start = self.anchor_list[idx]\n\n if self.mode == 'train': \n tracks_sample = self.memory['tracks'][song_id][:, start: start+self.sample_len]\n program_sample = self.memory['programs'][song_id]\n #delete empty tracks if any\n non_empty = np.nonzero(np.sum(tracks_sample, axis=(1, 2)))[0]\n tracks_sample = tracks_sample[non_empty]\n program_sample = program_sample[non_empty]\n\n elif (self.mode == 'test') or (self.mode == 'inference'): \n tracks_sample = self.memory['tracks'][song_id][:, start:]\n program_sample = self.memory['programs'][song_id]\n\n if ((len(program_sample) <= 3) and (program_sample == 0).all()):\n #merge pop909 into a single piano track at certain probability\n if np.random.rand() < self.merge_pop909: \n tracks_sample = np.max(tracks_sample, axis=0, keepdims=True)\n program_sample = np.array([0])\n\n if self.with_dynamics:\n dynamics = self.memory['dynamics'][song_id][:, start: start+self.sample_len]\n else: \n dynamics = None\n \n return tracks_sample, program_sample, dynamics, self.memory['dir'][song_id]\n\n\n def slakh_program_mapping(self, programs):\n return np.array([EMBED_PROGRAM_MAPPING[SLAKH_PROGRAM_MAPPING[program]] for program in programs])\n\n\n def load_data(self, data_dir, sample_len, hop_len):\n song_list = [os.path.join(data_dir, self.split, item) for item in os.listdir(os.path.join(data_dir, self.split))]\n if self.debug_mode:\n song_list = song_list[: 10]\n for song_dir in tqdm(song_list):\n song_data = np.load(song_dir)\n tracks = song_data['tracks'] #(n_track, time, 128)\n if 'programs' in song_data:\n programs = song_data['programs'] #(n_track, )\n else:\n programs = np.array([0]*len(tracks))\n\n center_pitch = compute_center_pitch(tracks)\n pitch_sort = np.argsort(center_pitch)[::-1]\n tracks = tracks[pitch_sort]\n programs = programs[pitch_sort]\n\n \"\"\"clipping\"\"\" \n if self.mode == 'train':\n if self.split =='validation':\n # during model training, no overlapping for validation set\n for i in range(0, tracks.shape[1], sample_len):\n if i + sample_len >= tracks.shape[1]:\n break\n self.anchor_list.append((len(self.memory['tracks']), i)) #(song_id, start, total_length)\n else:\n # otherwise, hop size is 1-bar\n downbeats = np.nonzero(song_data['db_indicator'])[0]\n for i in range(0, len(downbeats), hop_len):\n if downbeats[i] + sample_len >= tracks.shape[1]:\n break\n self.anchor_list.append((len(self.memory['tracks']), downbeats[i])) #(song_id, start)\n\n elif (self.mode == 'test') or (self.mode == 'inference'):\n start = np.nonzero(song_data['db_indicator'])[0][0]\n end = start + (tracks.shape[1] - start) // sample_len * sample_len\n if end < tracks.shape[1]:\n pad_len = end + sample_len - tracks.shape[1]\n end += sample_len\n tracks = np.pad(tracks, ((0, 0), (0, pad_len), (0, 0)), mode='constant', constant_values=(0,))\n tracks = tracks[:, start: end]\n self.anchor_list.append((len(self.memory['tracks']), start))\n\n self.memory['tracks'].append(tracks)\n self.memory['programs'].append(self.slakh_program_mapping(programs))\n self.memory['dir'].append(song_dir)\n\n if self.with_dynamics:\n self.memory['dynamics'].append(song_data['dynamics'])" }, { "identifier": "collate_fn", "path": "orchestrator/QA_dataset.py", "snippet": "def collate_fn(batch, device, pitch_shift=True):\n #print(batch)\n max_tracks = max([max(len(item[0]), 1) for item in batch])\n\n tracks = [] \n mixture = []\n instrument = []\n aux_feature = []\n mask = [] #track-wise pad mask\n function = []\n\n if pitch_shift:\n aug_p = AUG_P / AUG_P.sum()\n aug_shift = np.random.choice(np.arange(-6, 6), 1, p=aug_p)[0]\n else:\n aug_shift = 0\n\n for pr, programs, _, _ in batch:\n pr = pr_mat_pitch_shift(pr, aug_shift)\n aux, _, func = compute_pr_feat(pr)\n mask.append([0]*len(pr) + [1]*(max_tracks-len(pr)))\n\n pr = np.pad(pr, ((0, max_tracks-len(pr)), (0, 0), (0, 0)), mode='constant', constant_values=(0,))\n programs = np.pad(programs, (0, max_tracks-len(programs)), mode='constant', constant_values=(NUM_INSTR_CLASS,))\n aux = np.pad(aux, ((0, max_tracks-len(aux)), (0, 0), (0, 0)), mode='constant', constant_values=(0,))\n func = np.pad(func, ((0, max_tracks-len(func)), (0, 0)), mode='constant', constant_values=(0,))\n\n mix = pr2grid(np.max(pr, axis=0), max_note_count=32)\n grid = np.array([pr2grid(matrix) for matrix in pr])\n\n tracks.append(grid)\n mixture.append(mix)\n instrument.append(programs)\n aux_feature.append(aux)\n function.append(func)\n\n return torch.from_numpy(np.array(mixture)).long().to(device), \\\n torch.from_numpy(np.array(instrument)).to(device), \\\n torch.from_numpy(np.array(function)).float().to(device),\\\n torch.from_numpy(np.array(tracks)).long().to(device), \\\n torch.from_numpy(np.array(aux_feature)).float().to(device), \\\n torch.BoolTensor(mask).to(device)" }, { "identifier": "compute_pr_feat", "path": "orchestrator/QA_dataset.py", "snippet": "def compute_pr_feat(pr):\n #pr: (track, time, 128)\n onset = (np.sum(pr, axis=-1) > 0) * 1. #(track, time)\n rhy_intensity = np.clip(np.sum((pr > 0) * 1., axis=-1) / 14, a_min=None, a_max=1) #(track, time)\n\n weight = np.sum(pr, axis=-1)\n weight[weight==0] = 1\n pitch_center = np.sum(np.arange(0, 128)[np.newaxis, np.newaxis, :] * pr, axis=-1) / weight / 128\n\n feature = np.stack((onset, rhy_intensity, pitch_center), axis=-1)\n\n func_pitch = np.sum((pr > 0) * 1., axis=-2) / 32\n\n func_time = rhy_intensity.copy()\n \n return feature, func_pitch, func_time" }, { "identifier": "EMBED_PROGRAM_MAPPING", "path": "orchestrator/QA_dataset.py", "snippet": "EMBED_PROGRAM_MAPPING = dict({\n 0: 0, 4: 1, 8: 2, 16: 3, 24: 4, 26: 5, 29: 6, 32: 7,\\\n 33: 8, 40: 9, 41: 10, 42: 11, 43: 12, 46: 13, 47: 14, 48: 15,\\\n 50: 16, 52: 17, 55: 18, 56: 19, 57: 20, 58: 21, 60: 22, 61: 23, \n 64: 24, 66: 25, 67: 26, 68: 27, 69: 28, 70: 29, 71: 30, 72: 31,\\\n 80: 32, 88: 33})" }, { "identifier": "Prior", "path": "orchestrator/prior_model.py", "snippet": "class Prior(nn.Module):\n def __init__(self, mixture_encoder=None,\n function_encoder=None,\n context_enc_layer=12, \n function_dec_layer=12, \n d_model=256, \n nhead=8, \n dim_feedforward=1024, \n dropout=.1, \n function_resolution=8,\n inference=False,\n QA_model=None,\n DEVICE='cuda:0'):\n super(Prior, self).__init__()\n\n # embeddings\n self.func_embedding = nn.Embedding(num_embeddings=NUM_TIME_CODE+1, embedding_dim=d_model, padding_idx=NUM_TIME_CODE)\n self.prog_embedding = nn.Embedding(num_embeddings=NUM_INSTR_CLASS+1, embedding_dim=d_model, padding_idx=NUM_INSTR_CLASS)\n self.total_len_embedding = nn.Embedding(num_embeddings=len(TOTAL_LEN_BIN)+1, embedding_dim=d_model, padding_idx=len(TOTAL_LEN_BIN))\n self.abs_pos_embedding = nn.Embedding(num_embeddings=len(ABS_POS_BIN)+1, embedding_dim=d_model, padding_idx=len(ABS_POS_BIN))\n self.rel_pos_embedding = nn.Embedding(num_embeddings=len(REL_POS_BIN)+1, embedding_dim=d_model, padding_idx=len(REL_POS_BIN))\n\n self.start_embedding = nn.Parameter(torch.empty(NUM_INSTR_CLASS+1, d_model))\n nn.init.normal_(self.start_embedding)\n with torch.no_grad():\n self.start_embedding[NUM_INSTR_CLASS].fill_(0)\n\n #pre-trained encoders\n if not inference:\n self.mixture_encoder = mixture_encoder\n for param in self.mixture_encoder.parameters():\n param.requires_grad = False\n self.function_encoder = function_encoder\n for param in self.function_encoder.parameters():\n param.requires_grad = False\n else:\n self.QA_model = QA_model\n self.mixture_encoder = self.QA_model.mixture_enc\n self.function_encoder = self.QA_model.function_enc\n\n \n self.context_enc = nn.TransformerEncoder(\n nn.TransformerEncoderLayer(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n activation=F.gelu, \n batch_first=True, \n norm_first=True,\n device=DEVICE),\n num_layers=context_enc_layer)\n #multi-track Transformer\n self.mt_trf = nn.ModuleDict({})\n for layer in range(function_dec_layer):\n self.mt_trf[f'track_layer_{layer}'] = TransformerEncoderLayerRPE(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n norm_first=True,\n max_len=18).to(DEVICE)\n self.mt_trf[f'time_layer_{layer}'] = nn.TransformerDecoderLayer(d_model=d_model, \n nhead=nhead, \n dim_feedforward=dim_feedforward, \n dropout=dropout, \n activation=F.gelu, \n batch_first=True, \n norm_first=True,\n device=DEVICE)\n \n #positional encoding\n self.max_len = 1000\n position = torch.arange(self.max_len).unsqueeze(1)\n div_term = torch.exp(torch.arange(0, d_model, 2) * (-math.log(10000.0) / d_model))\n pe = torch.zeros(1, self.max_len, d_model)\n pe[0, :, 0::2] = torch.sin(position * div_term)\n pe[0, :, 1::2] = torch.cos(position * div_term)\n pe = pe.to(DEVICE)\n self.register_buffer('pe', pe)\n \n #decoder output module \n self.func_out_linear = nn.Linear(d_model, NUM_TIME_CODE)\n\n #constants\n self.d_model = d_model\n self.function_dec_layer = function_dec_layer\n self.func_res = function_resolution\n\n #loss function\n self.criterion = nn.CrossEntropyLoss(reduction='mean')\n\n\n def generate_square_subsequent_mask(self, sz=15):\n return torch.triu(torch.ones(sz, sz), diagonal=1).bool()\n\n\n def func_get_next_token(self, token, gt=None):\n #token: (batch, codebook_size)\n #gt: (bs,)\n if gt is None:\n idx = token.max(-1)[1]\n else:\n idx = gt\n token = torch.zeros_like(token, device=token.device)\n arange = torch.arange(token.shape[0], device=token.device).long()\n token[arange, idx] = 1\n return token.unsqueeze(1) #one-hot shaoe (batch, 1, ft_codebook_size)\n\n \n\n\n def run(self, mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos, inference=False):\n #mix: (batch, max_time, 256)\n #prog: (batch, max_track)\n #function: (batch, max_time, max_track, 8)\n #tm_mask: (batch, max_time)\n #tk_mask: (batch, max_track)\n #total_len: (batch, max_time)\n #abs_pos: (batch, max_time)\n #rel_pos: (batch, max_time)\n batch, max_time, _ = mix.shape\n _, max_track = prog.shape\n \n mix = mix + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix = mix + self.total_len_embedding(total_len)\n mix = mix + self.abs_pos_embedding(abs_pos)\n mix = mix + self.rel_pos_embedding(rel_pos)\n \n mix = self.context_enc(mix) #(batch, max_time, 256)\n mix = mix.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, max_time, 256)\n mix = mix.reshape(-1, max_time, self.d_model)\n\n function = function.permute(0, 1, 3, 2).reshape(batch, -1, max_track)\n func = self.func_embedding(function)#(batch, 8*max_time, max_track, d_model)\n \n func = torch.cat([\n self.start_embedding[prog].unsqueeze(1), #(batch, 1, max_track, d_model)\n func[:, :-1]], \n dim=1) #batch, 8*max_time, max_track, d_model\n\n func = func + self.prog_embedding(prog).unsqueeze(1) \n\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1).unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func, \n src_key_padding_mask=tk_mask.unsqueeze(1).repeat(1, self.func_res*max_time, 1).reshape(-1, max_track))\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, self.func_res*max_time, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(self.func_res*max_time).to(func.device),\n tgt_key_padding_mask=tm_mask.unsqueeze(1).repeat(1, max_track, 1).reshape(-1, max_time).repeat_interleave(self.func_res, dim=-1),\n memory=mix) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3) #(batch, 8*max_time, max_track, d_model)\n\n function_recon = self.func_out_linear(func)\n\n return function_recon, function\n\n \n\n def loss_function(self, function_recon, function_gt, tm_mask, tk_mask):\n\n mask = torch.logical_or(tm_mask.repeat_interleave(8, dim=-1).unsqueeze(-1), tk_mask.unsqueeze(1)) #(batch, 8*max_time, track) \n unmask = torch.logical_not(mask)\n\n function_loss = self.criterion(function_recon[unmask].reshape(-1, NUM_TIME_CODE), \n function_gt[unmask].reshape(-1))\n return function_loss\n \n\n def loss(self, mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos):\n output = self.run(mix, prog, function, tm_mask, tk_mask, total_len, abs_pos, rel_pos, inference=False)\n return self.loss_function(*output, tm_mask, tk_mask)\n \n\n def forward(self, mode, *input, **kwargs):\n if mode in [\"run\", 0]:\n return self.run(*input, **kwargs)\n elif mode in ['loss', 'train', 1]:\n return self.loss(*input, **kwargs)\n elif mode in ['inference', 'eval', 'val', 2]:\n return self.inference(*input, **kwargs)\n else:\n raise NotImplementedError\n\n\n def run_autoregressive_greedy(self, mix, prog, function, total_len, abs_pos, rel_pos, blur=.5):\n #mix: (batch, num2bar, bar_resolution, max_simu_note, 6)\n #prog: (batch, max_track)\n #function: (batch, 1, max_track, 32)\n #total_len: (batch, num2bar)\n #abs_pos: (batch, num2bar)\n #rel_pos: (batch, num2bar)\n batch, num_2bar, time, max_simu_note, _ = mix.shape\n _, max_track = prog.shape\n\n mix = mix.reshape(-1, time, max_simu_note, 6)\n mix = self.mixture_encoder(mix)[0].mean.reshape(batch, num_2bar, -1) #(batch, num_2bar, 256)\n mix_ = (1-blur)*mix.clone() + blur*torch.empty(mix.shape, device=mix.device).normal_(mean=0, std=1) \n \n mix_ = mix_ + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix_ = mix_ + self.total_len_embedding(total_len)\n mix_ = mix_ + self.abs_pos_embedding(abs_pos)\n mix_ = mix_ + self.rel_pos_embedding(rel_pos)\n\n mix_ = self.context_enc(mix_) #(batch, num_bar, 256)\n mix_ = mix_.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, num_bar, 256)\n mix_ = mix_.reshape(-1, num_2bar, self.d_model)\n \n function = function.reshape(-1, 32)\n function = self.function_encoder.get_code_indices(function).reshape(batch, max_track, self.func_res)\n\n\n for idx in range(self.func_res, self.func_res*num_2bar):\n func = self.func_embedding(function) #*batch, max_track, 8, d_model\n func = func.permute(0, 2, 1, 3).reshape(batch, -1, max_track, self.d_model)\n\n func = func + self.prog_embedding(prog).unsqueeze(1)\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n \n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func)\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, idx, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(sz=idx).to(func.device),\n memory=mix_) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3) #(batch, num2bar-1, max_track, d_model)\n\n \n func_pred = self.func_out_linear(func[:, -1,]).max(-1)[1].unsqueeze(-1)\n\n function = torch.cat([function, func_pred], dim=-1)\n if function.shape[1] == self.func_res*num_2bar:\n break\n \n function = function.reshape(batch, max_track, num_2bar, self.func_res).permute(0, 2, 1, 3)\n z_func = self.function_encoder.infer_by_codes(function)\n return self.QA_model.infer_with_function_codes(mix[0], prog[0].repeat(num_2bar, 1), z_func[0])\n \n\n def run_autoregressive_nucleus(self, mix, prog, func_prompt, total_len, abs_pos, rel_pos, blur=.5, p=.1, t=1):\n #mix: (batch, num2bar, bar_resolution, max_simu_note, 6)\n #prog: (batch, max_track)\n #func_prompt: (batch, 1, max_track, 32)\n #total_len: (batch, num2bar)\n #abs_pos: (batch, num2bar)\n #rel_pos: (batch, num2bar)\n\n batch, num_2bar, time, max_simu_note, _ = mix.shape\n _, max_track = prog.shape\n\n mix = mix.reshape(-1, time, max_simu_note, 6)\n mix = self.mixture_encoder(mix)[0].mean.reshape(batch, num_2bar, -1) #(batch, num_2bar, 256)\n mix_ = (1-blur)*mix.clone() + blur*torch.empty(mix.shape, device=mix.device).normal_(mean=0, std=1) \n \n mix_ = mix_ + self.pe[:, :self.func_res*mix.shape[1], :][:, ::self.func_res]\n mix_ = mix_ + self.total_len_embedding(total_len)\n mix_ = mix_ + self.abs_pos_embedding(abs_pos)\n mix_ = mix_ + self.rel_pos_embedding(rel_pos)\n\n mix_ = self.context_enc(mix_) #(batch, num_bar, 256)\n mix_ = mix_.unsqueeze(1) + self.prog_embedding(prog).unsqueeze(2) #(batch, max_track, num_bar, 256)\n mix_ = mix_.reshape(-1, num_2bar, self.d_model)\n \n start = self.start_embedding[prog].unsqueeze(1) #(batch, 1, max_track, dmodel)\n\n if func_prompt is not None:\n func_prompt = func_prompt.reshape(-1, 32)\n func_prompt = self.function_encoder.get_code_indices(func_prompt).reshape(batch, max_track, self.func_res).permute(0, 2, 1) #(batch, 8, max_track)\n #else:\n function = torch.empty((batch, 0, max_track)).long().to(mix.device)\n\n for idx in range(self.func_res*num_2bar):\n if (idx < self.func_res) and (func_prompt is not None):\n start = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n function = torch.cat([function, func_prompt[:, idx: idx+1, :]], dim=1) \n continue\n else:\n func = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n\n func = func + self.prog_embedding(prog).unsqueeze(1)\n func = func + self.pe[:, :func.shape[1], :].unsqueeze(2)\n\n func = func + self.total_len_embedding(total_len).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.abs_pos_embedding(abs_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n func = func + self.rel_pos_embedding(rel_pos).repeat_interleave(self.func_res, dim=1)[:, :func.shape[1]].unsqueeze(2)\n\n for layer in range(self.function_dec_layer):\n \n func = func.reshape(-1, max_track, self.d_model)\n func = self.mt_trf[f'track_layer_{layer}'](src=func)\n func = func.reshape(batch, -1, max_track, self.d_model).permute(0, 2, 1, 3).reshape(-1, idx+1, self.d_model)\n func = self.mt_trf[f'time_layer_{layer}'](tgt=func,\n tgt_mask=self.generate_square_subsequent_mask(sz=idx+1).to(func.device),\n memory=mix_) \n func = func.reshape(batch, max_track, -1, self.d_model).permute(0, 2, 1, 3)#(batch, num2bar-1, max_track, d_model)\n \n start = torch.cat([start, self.func_embedding(function[:, idx-1: idx, :])], dim=1)\n\n func_logits = self.func_out_linear(func[:, -1,]) / t\n filtered_func_logits = self.nucleus_filter(func_logits, p)\n func_probability = F.softmax(filtered_func_logits, dim=-1)\n func_pred = torch.multinomial(func_probability.reshape(-1, NUM_TIME_CODE), 1).reshape(func_probability.shape[:-1]).unsqueeze(1)\n\n function = torch.cat([function, func_pred], dim=1)\n if function.shape[1] == self.func_res*num_2bar:\n break\n \n\n \n function = function.reshape(batch, num_2bar, self.func_res, max_track).permute(0, 1, 3, 2)\n z_func = self.function_encoder.infer_by_codes(function)\n return self.QA_model.infer_with_function_codes(mix[0], prog[0].repeat(num_2bar, 1), z_func[0])\n \n def nucleus_filter(self, logits, p):\n #sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n sorted_logits, sorted_indices = torch.sort(logits, dim=-1, descending=True)\n #cumulative_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n cum_sum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n #sorted_indices_to_remove = cumulative_probs > p\n nucleus = cum_sum_probs < p\n # Shift the indices to the right to keep also the first token above the threshold\n #sorted_indices_to_remove = torch.cat([sorted_indices_to_remove.new_zeros(sorted_indices_to_remove.shape[:-1] + (1,)), sorted_indices_to_remove[..., :-1]], dim=-1)\n nucleus = torch.cat([nucleus.new_ones(nucleus.shape[:-1] + (1,)), nucleus[..., :-1]], dim=-1)\n nucleus = nucleus.gather(-1, sorted_indices.argsort(-1))\n\n logits[~nucleus] = float('-inf')\n return logits\n \n\n\n @classmethod\n def init_model(cls, pretrain_model_path=None, DEVICE='cuda:0'):\n \"\"\"Fast model initialization.\"\"\"\n vqQaA = Query_and_reArrange(name='pretrain', trf_layers=2, device=DEVICE)\n if pretrain_model_path is not None:\n vqQaA.load_state_dict(torch.load(pretrain_model_path, map_location=torch.device('cpu')))\n vqQaA.eval()\n model = cls(vqQaA.mixture_enc, vqQaA.function_enc, DEVICE=DEVICE).to(DEVICE)\n return model\n \n @classmethod\n def init_inference_model(cls, prior_model_path, QA_model_path, DEVICE='cuda:0'):\n \"\"\"Fast model initialization.\"\"\"\n vqQaA = Query_and_reArrange(name='pretrain', trf_layers=2, device=DEVICE)\n vqQaA.load_state_dict(torch.load(QA_model_path, map_location=torch.device('cpu')))\n vqQaA.eval()\n model = cls(inference=True, QA_model=vqQaA, DEVICE=DEVICE).to(DEVICE)\n model.load_state_dict(torch.load(prior_model_path), strict=False)\n return model" }, { "identifier": "SLAKH_CLASS_PROGRAMS", "path": "orchestrator/QA_dataset.py", "snippet": "SLAKH_CLASS_PROGRAMS = dict({\n 0: 'Acoustic Piano', #0\n 4: 'Electric Piano', #1\n 8: 'Chromatic Percussion',#2\n 16: 'Organ', #3\n 24: 'Acoustic Guitar', #4\n 26: 'Clean Electric Guitar', #5\n 29: 'Distorted Electric Guitar', #6\n 32: 'Acoustic Bass', #7\n 33: 'Electric Bass', #8\n 40: 'Violin', #9\n 41: 'Viola', #10\n 42: 'Cello', #11\n 43: 'Contrabass', #12\n 46: 'Orchestral Harp', #13\n 47: 'Timpani', #14\n 48: 'String Ensemble', #15\n 50: 'Synth Strings', #16\n 52: 'Choir and Voice', #17\n 55: 'Orchestral Hit', #18\n 56: 'Trumpet', #19\n 57: 'Trombone', #20\n 58: 'Tuba', #21\n 60: 'French Horn', #22\n 61: 'Brass Section', #23\n 64: 'Soprano/Alto Sax', #24\n 66: 'Tenor Sax', #25\n 67: 'Baritone Sax', #26\n 68: 'Oboe', #27\n 69: 'English Horn', #28\n 70: 'Bassoon', #29\n 71: 'Clarinet', #30\n 72: 'Pipe', #31\n 80: 'Synth Lead', #32\n 88: 'Synth Pad' #33\n})" }, { "identifier": "grid2pr", "path": "orchestrator/utils/format_convert.py", "snippet": "def grid2pr(grid, max_note_count=16, min_pitch=0, pitch_eos_ind=129):\n #grid: (time, max_simu_note, 6)\n if grid.shape[1] == max_note_count:\n grid = grid[:, 1:]\n pr = np.zeros((grid.shape[0], 128), dtype=int)\n for t in range(grid.shape[0]):\n for n in range(grid.shape[1]):\n note = grid[t, n]\n if note[0] == pitch_eos_ind:\n break\n pitch = note[0] + min_pitch\n dur = int(''.join([str(_) for _ in note[1:]]), 2) + 1\n pr[t, pitch] = dur\n return pr" }, { "identifier": "pr2grid", "path": "orchestrator/utils/format_convert.py", "snippet": "def pr2grid(pr_mat, max_note_count=16, max_pitch=127, min_pitch=0,\n pitch_pad_ind=130, dur_pad_ind=2,\n pitch_sos_ind=128, pitch_eos_ind=129):\n pr_mat3d = np.ones((len(pr_mat), max_note_count, 6), dtype=int) * dur_pad_ind\n pr_mat3d[:, :, 0] = pitch_pad_ind\n pr_mat3d[:, 0, 0] = pitch_sos_ind\n cur_idx = np.ones(len(pr_mat), dtype=int)\n for t, p in zip(*np.where(pr_mat != 0)):\n pr_mat3d[t, cur_idx[t], 0] = p - min_pitch\n binary = np.binary_repr(min(int(pr_mat[t, p]), 32) - 1, width=5)\n pr_mat3d[t, cur_idx[t], 1: 6] = \\\n np.fromstring(' '.join(list(binary)), dtype=int, sep=' ')\n if cur_idx[t] == max_note_count-1:\n continue\n cur_idx[t] += 1\n #print(cur_idx)\n pr_mat3d[np.arange(0, len(pr_mat)), cur_idx, 0] = pitch_eos_ind\n return pr_mat3d" }, { "identifier": "matrix2midi", "path": "orchestrator/utils/format_convert.py", "snippet": "def matrix2midi(matrices, programs, init_tempo=120, time_start=0):\n \"\"\"\n Reconstruct a multi-track midi from a 3D matrix of shape (Track. Time, 128).\n \"\"\"\n ACC = 16\n tracks = []\n for program in programs:\n track_recon = pyd.Instrument(program=int(program), is_drum=False, name=pyd.program_to_instrument_name(int(program)))\n tracks.append(track_recon)\n\n indices_track, indices_onset, indices_pitch = np.nonzero(matrices)\n alpha = 1 / (ACC // 4) * 60 / init_tempo #timetep between each quntization bin\n for idx in range(len(indices_track)):\n track_id = indices_track[idx]\n onset = indices_onset[idx]\n pitch = indices_pitch[idx]\n\n start = onset * alpha\n duration = matrices[track_id, onset, pitch] * alpha\n velocity = 100\n\n note_recon = pyd.Note(velocity=int(velocity), pitch=int(pitch), start=time_start + start, end=time_start + start + duration)\n tracks[track_id].notes.append(note_recon)\n \n midi_recon = pyd.PrettyMIDI(initial_tempo=init_tempo)\n midi_recon.instruments = tracks\n return midi_recon" }, { "identifier": "midi2matrix", "path": "orchestrator/utils/format_convert.py", "snippet": "def midi2matrix(midi, quaver):\n pr_matrices = []\n programs = []\n for track in midi.instruments:\n programs.append(track.program)\n pr_matrix = np.zeros((len(quaver), 128))\n for note in track.notes:\n note_start = np.argmin(np.abs(quaver - note.start))\n note_end = np.argmin(np.abs(quaver - note.end))\n if note_end == note_start:\n note_end = min(note_start + 1, len(quaver) - 1)\n pr_matrix[note_start, note.pitch] = note_end - note_start\n pr_matrices.append(pr_matrix)\n return np.array(pr_matrices), np.array(programs)" }, { "identifier": "TOTAL_LEN_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "TOTAL_LEN_BIN = np.array([4, 7, 12, 15, 20, 23, 28, 31, 36, 39, 44, 47, 52, 55, 60, 63, 68, 71, 76, 79, 84, 87, 92, 95, 100, 103, 108, 111, 116, 119, 124, 127, 132])" }, { "identifier": "ABS_POS_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "ABS_POS_BIN = np.arange(129)" }, { "identifier": "REL_POS_BIN", "path": "orchestrator/prior_dataset.py", "snippet": "REL_POS_BIN = np.arange(128)" } ]
import os import pretty_midi as pyd import numpy as np import torch import piano_arranger.format_converter as cvt from torch.utils.data import DataLoader from scipy.interpolate import interp1d from tqdm import tqdm from piano_arranger.acc_utils import split_phrases from piano_arranger.models import DisentangleVAE from piano_arranger.AccoMontage import find_by_length, dp_search, re_harmonization, get_texture_filter, ref_spotlight from orchestrator import Slakh2100_Pop909_Dataset, collate_fn, compute_pr_feat, EMBED_PROGRAM_MAPPING, Prior from orchestrator.QA_dataset import SLAKH_CLASS_PROGRAMS from orchestrator.utils import grid2pr, pr2grid, matrix2midi, midi2matrix from orchestrator.prior_dataset import TOTAL_LEN_BIN, ABS_POS_BIN, REL_POS_BIN
18,858
SLAKH_CLASS_MAPPING = {v: k for k, v in EMBED_PROGRAM_MAPPING.items()} def load_premise(DATA_FILE_ROOT, DEVICE): """Load AccoMontage Search Space""" print('Loading AccoMontage piano texture search space. This may take 1 or 2 minutes ...') data = np.load(os.path.join(DATA_FILE_ROOT, 'phrase_data.npz'), allow_pickle=True) melody = data['melody'] acc = data['acc'] chord = data['chord'] vel = data['velocity'] cc = data['cc'] acc_pool = {} for LEN in tqdm(range(2, 13)): (mel, acc_, chord_, vel_, cc_, song_reference) = find_by_length(melody, acc, chord, vel, cc, LEN) acc_pool[LEN] = (mel, acc_, chord_, vel_, cc_, song_reference) texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train') loader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=lambda b:collate_fn(b, DEVICE)) REF = [] REF_PROG = [] REF_MIX = [] for (_, prog, function, _, _, _) in loader: prog = prog[0, :] REF.extend([batch for batch in function]) REF_PROG.extend([prog for _ in range(len(function))]) REF_MIX.append(torch.sum(function, dim=1)) REF_MIX = torch.cat(REF_MIX, dim=0) """Initialize orchestration model (Prior + Q&A)""" print('Initialize model ...') prior_model_path = os.path.join(DATA_FILE_ROOT, 'params_prior.pt') QaA_model_path = os.path.join(DATA_FILE_ROOT, 'params_qa.pt') orchestrator = Prior.init_inference_model(prior_model_path, QaA_model_path, DEVICE=DEVICE) orchestrator.to(DEVICE) orchestrator.eval() piano_arranger = DisentangleVAE.init_model(torch.device('cuda')).cuda() piano_arranger.load_state_dict(torch.load(os.path.join(DATA_FILE_ROOT, 'params_reharmonizer.pt'))) print('Finished.') return piano_arranger, orchestrator, (acc_pool, edge_weights, texture_filter), (REF, REF_PROG, REF_MIX) def read_lead_sheet(DEMO_ROOT, SONG_NAME, SEGMENTATION, NOTE_SHIFT, melody_track_ID=0): melody_roll, chord_roll = cvt.leadsheet2matrix(os.path.join(DEMO_ROOT, SONG_NAME, 'lead sheet.mid'), melody_track_ID) assert(len(melody_roll == len(chord_roll))) if NOTE_SHIFT != 0: melody_roll = melody_roll[int(NOTE_SHIFT*4):, :] chord_roll = chord_roll[int(NOTE_SHIFT*4):, :] if len(melody_roll) % 16 != 0: pad_len = (len(melody_roll)//16+1)*16-len(melody_roll) melody_roll = np.pad(melody_roll, ((0, pad_len), (0, 0))) melody_roll[-pad_len:, -1] = 1 chord_roll = np.pad(chord_roll, ((0, pad_len), (0, 0))) chord_roll[-pad_len:, 0] = -1 chord_roll[-pad_len:, -1] = -1 CHORD_TABLE = np.stack([cvt.expand_chord(chord) for chord in chord_roll[::4]], axis=0) LEADSHEET = np.concatenate((melody_roll, chord_roll[:, 1: -1]), axis=-1) #T*142, quantized at 16th query_phrases = split_phrases(SEGMENTATION) #[('A', 8, 0), ('A', 8, 8), ('B', 8, 16), ('B', 8, 24)] midi_len = len(LEADSHEET)//16 anno_len = sum([item[1] for item in query_phrases]) if midi_len > anno_len: LEADSHEET = LEADSHEET[: anno_len*16] CHORD_TABLE = CHORD_TABLE[: anno_len*4] print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is truncated to {anno_len} bars.') elif midi_len < anno_len: pad_len = (anno_len - midi_len)*16 LEADSHEET = np.pad(LEADSHEET, ((0, pad_len), (0, 0))) LEADSHEET[-pad_len:, 129] = 1 CHORD_TABLE = np.pad(CHORD_TABLE, ((0, pad_len//4), (0, 0))) CHORD_TABLE[-pad_len//4:, 11] = -1 CHORD_TABLE[-pad_len//4:, -1] = -1 print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is padded to {anno_len} bars.') melody_queries = [] for item in query_phrases: start_bar = item[-1] length = item[-2] segment = LEADSHEET[start_bar*16: (start_bar+length)*16] melody_queries.append(segment) #melody queries: list of T16*142, segmented by phrases return (LEADSHEET, CHORD_TABLE, melody_queries, query_phrases) def piano_arrangement(pianoRoll, chord_table, melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, piano_arranger, PREFILTER, tempo=100): print('Phrasal Unit selection begins:\n\t', f'{len(query_phrases)} phrases in the lead sheet;\n\t', f'set note density filter: {PREFILTER}.') phrase_indice, chord_shift = dp_search( melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, filter_id=PREFILTER) path = phrase_indice[0] shift = chord_shift[0] print('Re-harmonization begins ...') midi_recon, acc = re_harmonization(pianoRoll, chord_table, query_phrases, path, shift, acc_pool, model=piano_arranger, get_est=True, tempo=tempo)
SLAKH_CLASS_MAPPING = {v: k for k, v in EMBED_PROGRAM_MAPPING.items()} def load_premise(DATA_FILE_ROOT, DEVICE): """Load AccoMontage Search Space""" print('Loading AccoMontage piano texture search space. This may take 1 or 2 minutes ...') data = np.load(os.path.join(DATA_FILE_ROOT, 'phrase_data.npz'), allow_pickle=True) melody = data['melody'] acc = data['acc'] chord = data['chord'] vel = data['velocity'] cc = data['cc'] acc_pool = {} for LEN in tqdm(range(2, 13)): (mel, acc_, chord_, vel_, cc_, song_reference) = find_by_length(melody, acc, chord, vel, cc, LEN) acc_pool[LEN] = (mel, acc_, chord_, vel_, cc_, song_reference) texture_filter = get_texture_filter(acc_pool) edge_weights=np.load(os.path.join(DATA_FILE_ROOT, 'edge_weights.npz'), allow_pickle=True) """Load Q&A Prompt Search Space""" print('loading orchestration prompt search space ...') slakh_dir = os.path.join(DATA_FILE_ROOT, 'Slakh2100_inference_set') dataset = Slakh2100_Pop909_Dataset(slakh_dir=slakh_dir, pop909_dir=None, debug_mode=False, split='validation', mode='train') loader = DataLoader(dataset, batch_size=1, shuffle=True, collate_fn=lambda b:collate_fn(b, DEVICE)) REF = [] REF_PROG = [] REF_MIX = [] for (_, prog, function, _, _, _) in loader: prog = prog[0, :] REF.extend([batch for batch in function]) REF_PROG.extend([prog for _ in range(len(function))]) REF_MIX.append(torch.sum(function, dim=1)) REF_MIX = torch.cat(REF_MIX, dim=0) """Initialize orchestration model (Prior + Q&A)""" print('Initialize model ...') prior_model_path = os.path.join(DATA_FILE_ROOT, 'params_prior.pt') QaA_model_path = os.path.join(DATA_FILE_ROOT, 'params_qa.pt') orchestrator = Prior.init_inference_model(prior_model_path, QaA_model_path, DEVICE=DEVICE) orchestrator.to(DEVICE) orchestrator.eval() piano_arranger = DisentangleVAE.init_model(torch.device('cuda')).cuda() piano_arranger.load_state_dict(torch.load(os.path.join(DATA_FILE_ROOT, 'params_reharmonizer.pt'))) print('Finished.') return piano_arranger, orchestrator, (acc_pool, edge_weights, texture_filter), (REF, REF_PROG, REF_MIX) def read_lead_sheet(DEMO_ROOT, SONG_NAME, SEGMENTATION, NOTE_SHIFT, melody_track_ID=0): melody_roll, chord_roll = cvt.leadsheet2matrix(os.path.join(DEMO_ROOT, SONG_NAME, 'lead sheet.mid'), melody_track_ID) assert(len(melody_roll == len(chord_roll))) if NOTE_SHIFT != 0: melody_roll = melody_roll[int(NOTE_SHIFT*4):, :] chord_roll = chord_roll[int(NOTE_SHIFT*4):, :] if len(melody_roll) % 16 != 0: pad_len = (len(melody_roll)//16+1)*16-len(melody_roll) melody_roll = np.pad(melody_roll, ((0, pad_len), (0, 0))) melody_roll[-pad_len:, -1] = 1 chord_roll = np.pad(chord_roll, ((0, pad_len), (0, 0))) chord_roll[-pad_len:, 0] = -1 chord_roll[-pad_len:, -1] = -1 CHORD_TABLE = np.stack([cvt.expand_chord(chord) for chord in chord_roll[::4]], axis=0) LEADSHEET = np.concatenate((melody_roll, chord_roll[:, 1: -1]), axis=-1) #T*142, quantized at 16th query_phrases = split_phrases(SEGMENTATION) #[('A', 8, 0), ('A', 8, 8), ('B', 8, 16), ('B', 8, 24)] midi_len = len(LEADSHEET)//16 anno_len = sum([item[1] for item in query_phrases]) if midi_len > anno_len: LEADSHEET = LEADSHEET[: anno_len*16] CHORD_TABLE = CHORD_TABLE[: anno_len*4] print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is truncated to {anno_len} bars.') elif midi_len < anno_len: pad_len = (anno_len - midi_len)*16 LEADSHEET = np.pad(LEADSHEET, ((0, pad_len), (0, 0))) LEADSHEET[-pad_len:, 129] = 1 CHORD_TABLE = np.pad(CHORD_TABLE, ((0, pad_len//4), (0, 0))) CHORD_TABLE[-pad_len//4:, 11] = -1 CHORD_TABLE[-pad_len//4:, -1] = -1 print(f'Mismatch warning: Detect {midi_len} bars in the lead sheet (MIDI) and {anno_len} bars in the provided phrase annotation. The lead sheet is padded to {anno_len} bars.') melody_queries = [] for item in query_phrases: start_bar = item[-1] length = item[-2] segment = LEADSHEET[start_bar*16: (start_bar+length)*16] melody_queries.append(segment) #melody queries: list of T16*142, segmented by phrases return (LEADSHEET, CHORD_TABLE, melody_queries, query_phrases) def piano_arrangement(pianoRoll, chord_table, melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, piano_arranger, PREFILTER, tempo=100): print('Phrasal Unit selection begins:\n\t', f'{len(query_phrases)} phrases in the lead sheet;\n\t', f'set note density filter: {PREFILTER}.') phrase_indice, chord_shift = dp_search( melody_queries, query_phrases, acc_pool, edge_weights, texture_filter, filter_id=PREFILTER) path = phrase_indice[0] shift = chord_shift[0] print('Re-harmonization begins ...') midi_recon, acc = re_harmonization(pianoRoll, chord_table, query_phrases, path, shift, acc_pool, model=piano_arranger, get_est=True, tempo=tempo)
acc = np.array([grid2pr(matrix) for matrix in acc])
13
2023-10-23 12:36:57+00:00
24k
liuqidong07/MOELoRA-peft
src/MLoRA/peft/peft_model.py
[ { "identifier": "PeftConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})" }, { "identifier": "Gate", "path": "src/MLoRA/peft/shared.py", "snippet": "class Gate(nn.Module):\n \"\"\"Gate\"\"\"\n def __init__(self, peft_config: PeftConfig, adapter_name=\"default\"):\n\n super().__init__()\n\n self.expert_num = peft_config.expert_num\n self.task_num = peft_config.task_num\n self.te_dim = peft_config.task_embedding_dim\n\n #self.lora_task_embedding = nn.Embedding(self.task_num+1, self.te_dim)# 使用embedding来代替线性层\n self.GateL = nn.Linear(self.te_dim, self.expert_num, bias=False)\n self.act = nn.Softmax(dim=1) # 第0维为batch size\n \n def forward(self, task_em):\n\n #task_em = self.lora_task_embedding(x)\n y = self.GateL(task_em)\n y = self.act(y)\n\n return y" }, { "identifier": "GateN", "path": "src/MLoRA/peft/shared.py", "snippet": "class GateN(nn.Module):\n \"\"\"Gate New Function\"\"\"\n def __init__(self, expert_num, task_embedding_dim):\n\n super().__init__()\n\n self.expert_num = expert_num\n self.te_dim = task_embedding_dim\n\n self.GateL = nn.Linear(self.te_dim, self.expert_num, bias=False)\n self.act = nn.Softmax(dim=1) # 第0维为batch size\n \n def forward(self, task_em):\n\n #task_em = self.lora_task_embedding(x)\n y = self.GateL(task_em)\n y = self.act(y)\n\n return y" }, { "identifier": "AdaptionPromptModel", "path": "src/MLoRA/peft/tuners/adaption_prompt.py", "snippet": "class AdaptionPromptModel(nn.Module):\n \"\"\"\n Implements adaption prompts as described in https://arxiv.org/pdf/2303.16199.pdf.\n\n The top L attention modules are replaced with AdaptedAttention modules that wrap the original ones, but insert\n trainable prompts with gates (for zero init).\n\n Notes on the multi-adapter pattern:\n - We store the states of different adapters by keeping a dictionary of AdaptedAttention modules indexed by adapter\n name.\n - Every time we switch adapters, we remove the modules of the currently active adapter from the model, store them\n in the dictionary, and replace them with the modules of the new adapter.\n - To avoid duplicated and potentially inconsistent state, the currently active adapter is always removed from the\n dictionary.\n - Disabling the adapter would also result in the modules being removed from the model.\n \"\"\"\n\n def __init__(self, model, configs: Dict, adapter_name: str):\n super().__init__()\n self.model = model\n # Store adapter configs by name.\n self._configs: Dict[str, AdaptionPromptConfig] = {}\n # Store lists of the parents of the affected attention modules by adapter name.\n # We keep references to the parents so we can swap the adapters in-and-out of the model.\n self._parents: Dict[str, List[nn.Module]] = {}\n # Store lists of cached AdaptedAttention modules by name.\n self._cached_adapters: Dict[str, List] = {}\n # The name of the currently active adapter.\n self._active_adapter = None\n # Whether the adapter is enabled.\n self._enabled = True\n self.forward = self.model.forward\n self.add_adapter(adapter_name, configs[adapter_name])\n self._mark_only_adaption_prompts_as_trainable()\n\n def add_adapter(self, adapter_name: str, config: AdaptionPromptConfig) -> None:\n \"\"\"Add an adapter with the given name and config.\"\"\"\n config = prepare_config(config, self.model)\n if adapter_name in self._configs:\n raise ValueError(f\"Adapter with name '{adapter_name}' already exists.\")\n\n parents = []\n for name, _ in self.model.named_modules():\n if name.endswith(config.target_modules):\n par, _, _ = _get_submodules(self.model, name)\n parents.append(par)\n if len(parents) < config.adapter_layers:\n raise ValueError(\n f\"Config specifies more adapter layers '{config.adapter_layers}'\"\n f\" than the model has '{len(parents)}'.\"\n )\n # Note that if the target modules are not in Sequential, ModuleList, or\n # some other PyTorch ordered container, the behavior is undefined as we\n # assume here that the order of the modules is the same as the order of\n # the transformer decoder layers.\n parents = parents[-config.adapter_layers :]\n self._parents[adapter_name] = parents\n\n # It is only None during initialization.\n # If it is disabled, we don't have to remove the modules.\n if self._active_adapter is not None and self._enabled:\n self._remove_adapted_attentions(self._active_adapter)\n self._active_adapter = adapter_name\n self._configs[adapter_name] = config\n self._create_adapted_attentions(config, parents)\n if not self._enabled:\n self._remove_adapted_attentions(self._active_adapter)\n\n if config.inference_mode:\n _freeze_adapter(self.model, adapter_name)\n\n def set_adapter(self, adapter_name: str) -> None:\n \"\"\"Set the model to use the adapter with the given name.\"\"\"\n if self._active_adapter == adapter_name:\n return\n if adapter_name not in self._configs:\n raise ValueError(f\"Adapter with name '{adapter_name}' does not exist.\")\n\n if self._enabled:\n self._remove_adapted_attentions(self._active_adapter)\n self._set_adapted_attentions(adapter_name)\n\n self._active_adapter = adapter_name\n\n def enable_adapter_layers(self):\n \"\"\"Enable adapter layers by swapping in cached AdaptedAttention modules.\"\"\"\n self._enabled = True\n self._set_adapted_attentions(self._active_adapter)\n\n def disable_adapter_layers(self):\n \"\"\"Disable adapter layers by swapping out AdaptedAttention modules.\"\"\"\n self._enabled = False\n self._remove_adapted_attentions(self._active_adapter)\n\n def _create_adapted_attentions(self, config: AdaptionPromptConfig, parents: List[nn.Module]) -> None:\n \"\"\"Wrap LlamaAttention modules with newly created AdaptedAttention modules.\"\"\"\n for par in parents:\n attn = AdaptedAttention(\n model_type=self.model.config.model_type,\n adapter_len=config.adapter_len,\n model=getattr(par, config.target_modules),\n )\n setattr(par, config.target_modules, attn)\n\n def _set_adapted_attentions(self, adapter_name: str) -> None:\n \"\"\"Replace LlamaAttention modules with cached AdaptedAttention modules.\"\"\"\n cached = self._cached_adapters[adapter_name]\n del self._cached_adapters[adapter_name]\n config = self._configs[adapter_name]\n for i, par in enumerate(self._parents[adapter_name]):\n setattr(par, config.target_modules, cached[i])\n\n def _remove_adapted_attentions(self, adapter_name: str) -> None:\n \"\"\"Remove AdaptedAttention modules from the model and store them in the cache.\"\"\"\n config = self._configs[adapter_name]\n adapted_attentions = []\n for par in self._parents[adapter_name]:\n attn = getattr(par, config.target_modules)\n adapted_attentions.append(attn)\n setattr(par, config.target_modules, attn.model)\n self._cached_adapters[adapter_name] = adapted_attentions\n\n def _mark_only_adaption_prompts_as_trainable(self) -> None:\n \"\"\"Freeze all parameters of the model except the adaption prompts.\"\"\"\n for n, p in self.model.named_parameters():\n if not is_adaption_prompt_trainable(n):\n p.requires_grad = False\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n # This is necessary as e.g. causal models have various methods that we\n # don't want to re-implement here.\n return getattr(self.model, name)" }, { "identifier": "LoraModel", "path": "src/MLoRA/peft/tuners/lora.py", "snippet": "class LoraModel(torch.nn.Module):\n \"\"\"\n Creates Low Rank Adapter (Lora) model from a pretrained transformers model.\n\n Args:\n model ([`~transformers.PreTrainedModel`]): The model to be adapted.\n config ([`LoraConfig`]): The configuration of the Lora model.\n\n Returns:\n `torch.nn.Module`: The Lora model.\n\n Example:\n\n ```py\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig\n >>> from peft import LoraModel, LoraConfig\n\n >>> config = LoraConfig(\n ... peft_type=\"LORA\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... r=8,\n ... lora_alpha=32,\n ... target_modules=[\"q\", \"v\"],\n ... lora_dropout=0.01,\n ... )\n\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\")\n >>> lora_model = LoraModel(config, model)\n ```\n\n **Attributes**:\n - **model** ([`~transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`LoraConfig`]): The configuration of the Lora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n super().__init__()\n self.model = model\n self.forward = self.model.forward\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_lora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"LoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias) # freeze all layers except for lora layer\n if self.peft_config[adapter_name].inference_mode: # if inference, also freeze lora layer\n _freeze_adapter(self.model, adapter_name)\n\n def _find_and_replace(self, adapter_name):\n \"\"\"Replace the target `Linear` module with LoRA layer (Linear+LoRA)\"\"\"\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key) # parent: the parent mudle of target (e.g., SelfAttention), target: target module (e.g., nn.Linear()), target name: the name of target module (e.g., query_key_value)\n bias = target.bias is not None\n if isinstance(target, LoraLayer): # if the target is LoraLayer, only need to update the parameters\n target.update_layer(\n adapter_name,\n lora_config.r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else: # if not, get the lora parameter for create.\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n eightbit_kwargs = kwargs.copy()\n eightbit_kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = Linear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **eightbit_kwargs\n )\n else: # create based on the original module type\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = Linear(adapter_name, in_features, out_features, bias=bias, **kwargs) # create the lora module, here is not the raw nn.Linear, but the lora layer\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def _replace_module(self, parent_module, child_name, new_module, old_module):\n \"\"\"substitute the original nn.Linear to new Linear (nn.Linear+LoRA block)\"\"\"\n setattr(parent_module, child_name, new_module)\n new_module.weight = old_module.weight\n if old_module.bias is not None:\n new_module.bias = old_module.bias\n if getattr(old_module, \"state\", None) is not None: # synchronize the state and device\n new_module.state = old_module.state\n new_module.to(old_module.weight.device)\n\n # dispatch to correct device\n for name, module in new_module.named_modules():\n if \"lora_\" in name:\n module.to(old_module.weight.device)\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def get_peft_config_as_dict(self, inference: bool = False):\n config_dict = {}\n for key, value in self.peft_config.items():\n config = {k: v.value if isinstance(v, Enum) else v for k, v in asdict(value).items()}\n if inference:\n config[\"inference_mode\"] = True\n config_dict[key] = config\n return config\n\n def _set_adapter_layers(self, enabled=True):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.disable_adapters = False if enabled else True\n\n def enable_adapter_layers(self):\n self._set_adapter_layers(enabled=True)\n\n def disable_adapter_layers(self):\n self._set_adapter_layers(enabled=False)\n\n def set_adapter(self, adapter_name):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n if module.merged:\n warnings.warn(\"Adapter cannot be set when the model is merged. Unmerging the model first.\")\n module.unmerge()\n module.active_adapter = adapter_name\n\n def merge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.merge()\n\n def unmerge_adapter(self):\n for module in self.model.modules():\n if isinstance(module, LoraLayer):\n module.unmerge()\n\n @staticmethod\n def _prepare_lora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_LORA_TARGET_MODULES_MAPPING[model_config[\"model_type\"]]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config\n\n def merge_and_unload(self):\n r\"\"\"\n This method merges the LoRa layers into the base model. This is needed if someone wants to use the base model\n as a standalone model.\n \"\"\"\n if getattr(self.config, \"model_type\", None) == \"gpt2\":\n raise ValueError(\"GPT2 models are not supported for merging LORA layers\")\n\n if getattr(self.model, \"is_loaded_in_8bit\", False):\n raise ValueError(\"Cannot merge LORA layers when the model is loaded in 8-bit mode\")\n\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n try:\n parent, target, target_name = _get_submodules(self.model, key)\n except AttributeError:\n continue\n if isinstance(target, LoraLayer):\n bias = target.bias is not None\n new_module = torch.nn.Linear(target.in_features, target.out_features, bias=bias)\n target.merge()\n self._replace_module(parent, target_name, new_module, target)\n\n # save any additional trainable modules part of `modules_to_save`\n if isinstance(target, ModulesToSaveWrapper):\n setattr(parent, target_name, target.modules_to_save[target.active_adapter])\n\n return self.model\n\n def add_weighted_adapter(self, adapters, weights, adapter_name):\n if len({self.peft_config[adapter].r for adapter in adapters}) != 1:\n raise ValueError(\"All adapters must have the same r value\")\n self.peft_config[adapter_name] = self.peft_config[adapters[0]]\n self.peft_config[adapter_name].lora_alpha = self.peft_config[adapters[0]].r\n self._find_and_replace(adapter_name)\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n _freeze_adapter(self.model, adapter_name)\n key_list = [key for key, _ in self.model.named_modules() if \"lora\" not in key]\n for key in key_list:\n _, target, _ = _get_submodules(self.model, key)\n if isinstance(target, LoraLayer):\n target.lora_A[adapter_name].weight.data = target.lora_A[adapter_name].weight.data * 0.0\n target.lora_B[adapter_name].weight.data = target.lora_B[adapter_name].weight.data * 0.0\n for adapter, weight in zip(adapters, weights):\n if adapter not in target.lora_A:\n continue\n target.lora_A[adapter_name].weight.data += (\n target.lora_A[adapter].weight.data * weight * target.scaling[adapter]\n )\n target.lora_B[adapter_name].weight.data += target.lora_B[adapter].weight.data * weight" }, { "identifier": "AdaLoraModel", "path": "src/MLoRA/peft/tuners/adalora.py", "snippet": "class AdaLoraModel(LoraModel):\n \"\"\"\n Creates AdaLoRA (Adaptive LoRA) model from a pretrained transformers model. Paper:\n https://openreview.net/pdf?id=lq62uWRJjiY\n\n Args:\n model ([`transformers.PreTrainedModel`]): The model to be adapted.\n config ([`AdaLoraConfig`]): The configuration of the AdaLora model.\n\n Returns:\n `torch.nn.Module`: The AdaLora model.\n\n Example::\n\n >>> from transformers import AutoModelForSeq2SeqLM, LoraConfig >>> from peft import AdaLoraModel, AdaLoraConfig\n >>> config = AdaLoraConfig(\n peft_type=\"ADALORA\", task_type=\"SEQ_2_SEQ_LM\", r=8, lora_alpha=32, target_modules=[\"q\", \"v\"],\n lora_dropout=0.01,\n )\n >>> model = AutoModelForSeq2SeqLM.from_pretrained(\"t5-base\") >>> model = AdaLoraModel(config, model)\n\n **Attributes**:\n - **model** ([`transformers.PreTrainedModel`]) -- The model to be adapted.\n - **peft_config** ([`AdaLoraConfig`]): The configuration of the AdaLora model.\n \"\"\"\n\n def __init__(self, model, config, adapter_name):\n nn.Module.__init__(self)\n self.model = model\n self.peft_config = config\n self.add_adapter(adapter_name, self.peft_config[adapter_name])\n\n def add_adapter(self, adapter_name, config=None):\n if config is not None:\n model_config = self.model.config.to_dict() if hasattr(self.model.config, \"to_dict\") else self.model.config\n config = self._prepare_adalora_config(config, model_config)\n self.peft_config[adapter_name] = config\n self._find_and_replace(adapter_name)\n if len(self.peft_config) > 1 and self.peft_config[adapter_name].bias != \"none\":\n raise ValueError(\n \"AdaLoraModel supports only 1 adapter with bias. When using multiple adapters, set bias to 'none' for all adapters.\"\n )\n traininable_mode_counter = 0\n for config in self.peft_config.values():\n if not config.inference_mode:\n traininable_mode_counter += 1\n\n if traininable_mode_counter > 1:\n raise ValueError(\n \"AdaLoraModel supports only 1 trainable adapter. \"\n \"When using multiple adapters, set inference_mode to True for all adapters except the one you want to train.\"\n )\n\n mark_only_lora_as_trainable(self.model, self.peft_config[adapter_name].bias)\n if self.peft_config[adapter_name].inference_mode:\n _freeze_adapter(self.model, adapter_name)\n else:\n self.trainable_adapter_name = adapter_name\n self.rankallocator = RankAllocator(self.model, self.peft_config[adapter_name], self.trainable_adapter_name)\n\n def _find_and_replace(self, adapter_name):\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.init_r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n }\n key_list = [key for key, _ in self.model.named_modules()]\n for key in key_list:\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key)\n bias = target.bias is not None\n if isinstance(target, LoraLayer):\n target.update_layer(\n adapter_name,\n lora_config.init_r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else:\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n kwargs.update(\n {\n \"has_fp16_weights\": target.state.has_fp16_weights,\n \"memory_efficient_backward\": target.state.memory_efficient_backward,\n \"threshold\": target.state.threshold,\n \"index\": target.index,\n }\n )\n new_module = SVDLinear8bitLt(\n adapter_name, target.in_features, target.out_features, bias=bias, **kwargs\n )\n else:\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = SVDLinear(adapter_name, in_features, out_features, bias=bias, **kwargs)\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )\n\n def __getattr__(self, name: str):\n \"\"\"Forward missing attributes to the wrapped module.\"\"\"\n try:\n return super().__getattr__(name) # defer to nn.Module's logic\n except AttributeError:\n return getattr(self.model, name)\n\n def forward(self, *args, **kwargs):\n outputs = self.model.forward(*args, **kwargs)\n\n # Calculate the orthogonal regularization\n orth_reg_weight = self.peft_config[self.trainable_adapter_name].orth_reg_weight\n assert orth_reg_weight > 0\n\n if hasattr(outputs, \"loss\"):\n regu_loss = 0\n num_param = 0\n for n, p in self.model.named_parameters():\n if (\"lora_A\" in n or \"lora_B\" in n) and self.trainable_adapter_name in n:\n para_cov = p @ p.T if \"lora_A\" in n else p.T @ p\n I = torch.eye(*para_cov.size(), out=torch.empty_like(para_cov))\n I.requires_grad = False\n num_param += 1\n regu_loss += torch.norm(para_cov - I, p=\"fro\")\n regu_loss = regu_loss / num_param\n outputs.loss += orth_reg_weight * regu_loss\n return outputs\n\n def resize_modules_by_rank_pattern(self, rank_pattern, adapter_name):\n lora_config = self.peft_config[adapter_name]\n for name, rank_idx in rank_pattern.items():\n if isinstance(rank_idx, list):\n rank = sum(rank_idx)\n elif isinstance(rank_idx, torch.Tensor):\n rank_idx = rank_idx.view(-1)\n rank = rank_idx.sum().item()\n else:\n raise ValueError(\"Unexcepted type of rank_idx\")\n key = \".\".join(name.split(\".\")[0:-2]) if adapter_name in name else \".\".join(name.split(\".\")[0:-1])\n _, target, _ = _get_submodules(self.model, key)\n lora_E_weights = target.lora_E[adapter_name][rank_idx]\n lora_A_weights = target.lora_A[adapter_name][rank_idx]\n lora_B_weights = target.lora_B[adapter_name][:, rank_idx]\n ranknum = target.ranknum[adapter_name]\n target.update_layer(\n adapter_name,\n rank,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n with torch.no_grad():\n if rank > 0:\n target.lora_E[adapter_name].copy_(lora_E_weights)\n target.lora_A[adapter_name].copy_(lora_A_weights)\n target.lora_B[adapter_name].copy_(lora_B_weights)\n # The scaling is exactly as the previous\n target.ranknum[adapter_name].copy_(ranknum)\n\n def resize_state_dict_by_rank_pattern(self, rank_pattern, state_dict, adapter_name):\n for name, rank_idx in rank_pattern.items():\n rank = sum(rank_idx)\n prefix = \".\".join(name.split(\".\")[0:-2]) if adapter_name in name else \".\".join(name.split(\".\")[0:-1])\n for layer in [\"lora_E\", \"lora_A\", \"lora_B\"]:\n key = f\"base_model.model.{prefix}.{layer}.{adapter_name}\"\n if layer != \"lora_B\":\n state_dict[key] = (\n state_dict[key][rank_idx] if rank != state_dict[key].shape[0] else state_dict[key]\n )\n else:\n state_dict[key] = (\n state_dict[key][:, rank_idx] if rank != state_dict[key].shape[1] else state_dict[key]\n )\n return state_dict\n\n def update_and_allocate(self, global_step):\n lora_config = self.peft_config[self.trainable_adapter_name]\n # Update the importance score and allocate the budget\n if global_step < lora_config.total_step - lora_config.tfinal:\n _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step)\n if rank_pattern:\n lora_config.rank_pattern = rank_pattern\n # Finalize the budget allocation\n elif global_step == lora_config.total_step - lora_config.tfinal:\n _, rank_pattern = self.rankallocator.update_and_allocate(self.model, global_step, force_mask=True)\n # for some reason, this freezes the trainable parameters and nothing gets updates\n # self.resize_modules_by_rank_pattern(rank_pattern, self.trainable_adapter_name)\n lora_config.rank_pattern = rank_pattern\n self.rankallocator.reset_ipt()\n # Currently using inefficient way to mask the unimportant weights using the rank pattern\n # due to problem mentioned above\n elif global_step > lora_config.total_step - lora_config.tfinal:\n self.rankallocator.mask_using_rank_pattern(self.model, lora_config.rank_pattern)\n # Pass the function and do forward propagation\n else:\n return None\n\n @staticmethod\n def _prepare_adalora_config(peft_config, model_config):\n if peft_config.target_modules is None:\n if model_config[\"model_type\"] not in TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING:\n raise ValueError(\"Please specify `target_modules` in `peft_config`\")\n peft_config.target_modules = TRANSFORMERS_MODELS_TO_ADALORA_TARGET_MODULES_MAPPING[\n model_config[\"model_type\"]\n ]\n if peft_config.inference_mode:\n peft_config.merge_weights = True\n return peft_config" }, { "identifier": "PromptEncoder", "path": "src/MLoRA/peft/tuners/p_tuning.py", "snippet": "class PromptEncoder(torch.nn.Module):\n \"\"\"\n The prompt encoder network that is used to generate the virtual token embeddings for p-tuning.\n\n Args:\n config ([`PromptEncoderConfig`]): The configuration of the prompt encoder.\n\n Example:\n\n ```py\n >>> from peft import PromptEncoder, PromptEncoderConfig\n\n >>> config = PromptEncoderConfig(\n ... peft_type=\"P_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... encoder_reparameterization_type=\"MLP\",\n ... encoder_hidden_size=768,\n ... )\n\n >>> prompt_encoder = PromptEncoder(config)\n ```\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt encoder.\n - **mlp_head** (`torch.nn.Sequential`) -- The MLP head of the prompt encoder if `inference_mode=False`.\n - **lstm_head** (`torch.nn.LSTM`) -- The LSTM head of the prompt encoder if `inference_mode=False` and\n `encoder_reparameterization_type=\"LSTM\"`.\n - **token_dim** (`int`) -- The hidden embedding dimension of the base transformer model.\n - **input_size** (`int`) -- The input size of the prompt encoder.\n - **output_size** (`int`) -- The output size of the prompt encoder.\n - **hidden_size** (`int`) -- The hidden size of the prompt encoder.\n - **total_virtual_tokens** (`int`): The total number of virtual tokens of the\n prompt encoder.\n - **encoder_type** (Union[[`PromptEncoderReparameterizationType`], `str`]): The encoder type of the prompt\n encoder.\n\n\n Input shape: (`batch_size`, `total_virtual_tokens`)\n\n Output shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.token_dim = config.token_dim\n self.input_size = self.token_dim\n self.output_size = self.token_dim\n self.hidden_size = config.encoder_hidden_size\n self.total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules\n self.encoder_type = config.encoder_reparameterization_type\n\n # embedding\n self.embedding = torch.nn.Embedding(self.total_virtual_tokens, self.token_dim)\n if not config.inference_mode:\n if self.encoder_type == PromptEncoderReparameterizationType.LSTM:\n lstm_dropout = config.encoder_dropout\n num_layers = config.encoder_num_layers\n # LSTM\n self.lstm_head = torch.nn.LSTM(\n input_size=self.input_size,\n hidden_size=self.hidden_size,\n num_layers=num_layers,\n dropout=lstm_dropout,\n bidirectional=True,\n batch_first=True,\n )\n\n self.mlp_head = torch.nn.Sequential(\n torch.nn.Linear(self.hidden_size * 2, self.hidden_size * 2),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size * 2, self.output_size),\n )\n\n elif self.encoder_type == PromptEncoderReparameterizationType.MLP:\n warnings.warn(\n f\"for {self.encoder_type}, the `encoder_num_layers` is ignored. Exactly 2 MLP layers are used.\"\n )\n layers = [\n torch.nn.Linear(self.input_size, self.hidden_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size, self.hidden_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.hidden_size, self.output_size),\n ]\n self.mlp_head = torch.nn.Sequential(*layers)\n\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")\n\n def forward(self, indices):\n input_embeds = self.embedding(indices)\n if self.encoder_type == PromptEncoderReparameterizationType.LSTM:\n output_embeds = self.mlp_head(self.lstm_head(input_embeds)[0])\n elif self.encoder_type == PromptEncoderReparameterizationType.MLP:\n output_embeds = self.mlp_head(input_embeds)\n else:\n raise ValueError(\"Prompt encoder type not recognized. Please use one of MLP (recommended) or LSTM.\")\n\n return output_embeds" }, { "identifier": "PrefixEncoder", "path": "src/MLoRA/peft/tuners/prefix_tuning.py", "snippet": "class PrefixEncoder(torch.nn.Module):\n r\"\"\"\n The `torch.nn` model to encode the prefix.\n\n Args:\n config ([`PrefixTuningConfig`]): The configuration of the prefix encoder.\n\n Example:\n\n ```py\n >>> from peft import PrefixEncoder, PrefixTuningConfig\n\n >>> config = PrefixTuningConfig(\n ... peft_type=\"PREFIX_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... encoder_hidden_size=768,\n ... )\n >>> prefix_encoder = PrefixEncoder(config)\n ```\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prefix encoder.\n - **transform** (`torch.nn.Sequential`) -- The two-layer MLP to transform the prefix embeddings if\n `prefix_projection` is `True`.\n - **prefix_projection** (`bool`) -- Whether to project the prefix embeddings.\n\n Input shape: (`batch_size`, `num_virtual_tokens`)\n\n Output shape: (`batch_size`, `num_virtual_tokens`, `2*layers*hidden`)\n \"\"\"\n\n def __init__(self, config):\n super().__init__()\n self.prefix_projection = config.prefix_projection\n token_dim = config.token_dim\n num_layers = config.num_layers\n encoder_hidden_size = config.encoder_hidden_size\n num_virtual_tokens = config.num_virtual_tokens\n if self.prefix_projection and not config.inference_mode:\n # Use a two-layer MLP to encode the prefix\n self.embedding = torch.nn.Embedding(num_virtual_tokens, token_dim)\n self.transform = torch.nn.Sequential(\n torch.nn.Linear(token_dim, encoder_hidden_size),\n torch.nn.Tanh(),\n torch.nn.Linear(encoder_hidden_size, num_layers * 2 * token_dim),\n )\n else:\n self.embedding = torch.nn.Embedding(num_virtual_tokens, num_layers * 2 * token_dim)\n\n def forward(self, prefix: torch.Tensor):\n if self.prefix_projection:\n prefix_tokens = self.embedding(prefix)\n past_key_values = self.transform(prefix_tokens)\n else:\n past_key_values = self.embedding(prefix)\n return past_key_values" }, { "identifier": "PromptEmbedding", "path": "src/MLoRA/peft/tuners/prompt_tuning.py", "snippet": "class PromptEmbedding(torch.nn.Module):\n \"\"\"\n The model to encode virtual tokens into prompt embeddings.\n\n Args:\n config ([`PromptTuningConfig`]): The configuration of the prompt embedding.\n word_embeddings (`torch.nn.Module`): The word embeddings of the base transformer model.\n\n **Attributes**:\n - **embedding** (`torch.nn.Embedding`) -- The embedding layer of the prompt embedding.\n\n Example:\n\n ```py\n >>> from peft import PromptEmbedding, PromptTuningConfig\n\n >>> config = PromptTuningConfig(\n ... peft_type=\"PROMPT_TUNING\",\n ... task_type=\"SEQ_2_SEQ_LM\",\n ... num_virtual_tokens=20,\n ... token_dim=768,\n ... num_transformer_submodules=1,\n ... num_attention_heads=12,\n ... num_layers=12,\n ... prompt_tuning_init=\"TEXT\",\n ... prompt_tuning_init_text=\"Predict if sentiment of this review is positive, negative or neutral\",\n ... tokenizer_name_or_path=\"t5-base\",\n ... )\n\n >>> # t5_model.shared is the word embeddings of the base model\n >>> prompt_embedding = PromptEmbedding(config, t5_model.shared)\n ```\n\n Input Shape: (`batch_size`, `total_virtual_tokens`)\n\n Output Shape: (`batch_size`, `total_virtual_tokens`, `token_dim`)\n \"\"\"\n\n def __init__(self, config, word_embeddings):\n super().__init__()\n\n total_virtual_tokens = config.num_virtual_tokens * config.num_transformer_submodules\n self.embedding = torch.nn.Embedding(total_virtual_tokens, config.token_dim)\n if config.prompt_tuning_init == PromptTuningInit.TEXT:\n from transformers import AutoTokenizer\n\n tokenizer = AutoTokenizer.from_pretrained(config.tokenizer_name_or_path)\n init_text = config.prompt_tuning_init_text\n init_token_ids = tokenizer(init_text)[\"input_ids\"]\n # Trim or iterate until num_text_tokens matches total_virtual_tokens\n num_text_tokens = len(init_token_ids)\n if num_text_tokens > total_virtual_tokens:\n init_token_ids = init_token_ids[:total_virtual_tokens]\n elif num_text_tokens < total_virtual_tokens:\n num_reps = math.ceil(total_virtual_tokens / num_text_tokens)\n init_token_ids = init_token_ids * num_reps\n init_token_ids = init_token_ids[:total_virtual_tokens]\n\n word_embedding_weights = word_embeddings(torch.LongTensor(init_token_ids)).detach().clone()\n word_embedding_weights = word_embedding_weights.to(torch.float32)\n self.embedding.weight = torch.nn.Parameter(word_embedding_weights)\n\n def forward(self, indices):\n # Just get embeddings\n prompt_embeddings = self.embedding(indices)\n return prompt_embeddings" }, { "identifier": "MMOELoraModelS", "path": "src/MLoRA/peft/tuners/mmoeloraS.py", "snippet": "class MMOELoraModelS(MMOELoraModel):\n\n def __init__(self, model, config, adapter_name):\n\n super().__init__(model, config, adapter_name)\n\n\n\n def _find_and_replace(self, adapter_name):\n \"\"\"Replace the target `Linear` module with LoRA layer (Linear+LoRA)\"\"\"\n lora_config = self.peft_config[adapter_name]\n loaded_in_8bit = getattr(self.model, \"is_loaded_in_8bit\", False)\n if loaded_in_8bit and not is_bnb_available():\n raise ImportError(\n \"To use Lora with 8-bit quantization, please install the `bitsandbytes` package. \"\n \"You can install it with `pip install bitsandbytes`.\"\n )\n is_target_modules_in_base_model = False\n kwargs = {\n \"r\": lora_config.r,\n \"lora_alpha\": lora_config.lora_alpha,\n \"lora_dropout\": lora_config.lora_dropout,\n \"fan_in_fan_out\": lora_config.fan_in_fan_out,\n \"init_lora_weights\": lora_config.init_lora_weights,\n \"task_num\": lora_config.task_num,\n \"task_embedding_dim\": lora_config.task_embedding_dim,\n \"expert_num\": lora_config.expert_num,\n }\n key_list = [key for key, _ in self.model.named_modules()] # all module in raw model\n for key in key_list:\n # find the corresponding modules. target module has been split into list.\n if isinstance(lora_config.target_modules, str):\n target_module_found = re.fullmatch(lora_config.target_modules, key)\n else:\n target_module_found = any(key.endswith(target_key) for target_key in lora_config.target_modules)\n if target_module_found:\n if not is_target_modules_in_base_model:\n is_target_modules_in_base_model = True\n parent, target, target_name = _get_submodules(self.model, key)\n bias = target.bias is not None\n if isinstance(target, MMOELoraLayer):\n target.update_layer(\n adapter_name,\n lora_config.init_r,\n lora_config.lora_alpha,\n lora_config.lora_dropout,\n lora_config.init_lora_weights,\n )\n else:\n if loaded_in_8bit and isinstance(target, bnb.nn.Linear8bitLt):\n raise NotImplementedError\n else:\n if isinstance(target, torch.nn.Linear):\n in_features, out_features = target.in_features, target.out_features\n if kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to True but the target module is `torch.nn.Linear`. \"\n \"Setting fan_in_fan_out to False.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = False\n elif isinstance(target, Conv1D):\n in_features, out_features = (\n target.weight.ds_shape if hasattr(target.weight, \"ds_shape\") else target.weight.shape\n )\n if not kwargs[\"fan_in_fan_out\"]:\n warnings.warn(\n \"fan_in_fan_out is set to False but the target module is `Conv1D`. \"\n \"Setting fan_in_fan_out to True.\"\n )\n kwargs[\"fan_in_fan_out\"] = lora_config.fan_in_fan_out = True\n else:\n raise ValueError(\n f\"Target module {target} is not supported. \"\n f\"Currently, only `torch.nn.Linear` and `Conv1D` are supported.\"\n )\n new_module = MMOELoraLinearS(adapter_name, in_features, out_features, \n bias=bias, **kwargs)\n\n self._replace_module(parent, target_name, new_module, target)\n if not is_target_modules_in_base_model:\n raise ValueError(\n f\"Target modules {lora_config.target_modules} not found in the base model. \"\n f\"Please check the target modules and try again.\"\n )" }, { "identifier": "PeftConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftConfig(PeftConfigMixin):\n \"\"\"\n This is the base configuration class to store the configuration of a [`PeftModel`].\n\n Args:\n peft_type (Union[[`~peft.utils.config.PeftType`], `str`]): The type of Peft method to use.\n task_type (Union[[`~peft.utils.config.TaskType`], `str`]): The type of task to perform.\n inference_mode (`bool`, defaults to `False`): Whether to use the Peft model in inference mode.\n \"\"\"\n\n base_model_name_or_path: str = field(default=None, metadata={\"help\": \"The name of the base model to use.\"})\n peft_type: Union[str, PeftType] = field(default=None, metadata={\"help\": \"Peft type\"})\n task_type: Union[str, TaskType] = field(default=None, metadata={\"help\": \"Task type\"})\n inference_mode: bool = field(default=False, metadata={\"help\": \"Whether to use inference mode\"})" }, { "identifier": "PeftType", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PeftType(str, enum.Enum):\n PROMPT_TUNING = \"PROMPT_TUNING\"\n P_TUNING = \"P_TUNING\"\n PREFIX_TUNING = \"PREFIX_TUNING\"\n LORA = \"LORA\"\n ADALORA = \"ADALORA\"\n ADAPTION_PROMPT = \"ADAPTION_PROMPT\"\n MMOELORAS = \"MMOELORAS\"" }, { "identifier": "PromptLearningConfig", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class PromptLearningConfig(PeftConfig):\n \"\"\"\n This is the base configuration class to store the configuration of [`PrefixTuning`], [`PromptEncoder`], or\n [`PromptTuning`].\n\n Args:\n num_virtual_tokens (`int`): The number of virtual tokens to use.\n token_dim (`int`): The hidden embedding dimension of the base transformer model.\n num_transformer_submodules (`int`): The number of transformer submodules in the base transformer model.\n num_attention_heads (`int`): The number of attention heads in the base transformer model.\n num_layers (`int`): The number of layers in the base transformer model.\n \"\"\"\n\n num_virtual_tokens: int = field(default=None, metadata={\"help\": \"Number of virtual tokens\"})\n token_dim: int = field(\n default=None, metadata={\"help\": \"The hidden embedding dimension of the base transformer model\"}\n )\n num_transformer_submodules: Optional[int] = field(\n default=None, metadata={\"help\": \"Number of transformer submodules\"}\n )\n num_attention_heads: Optional[int] = field(default=None, metadata={\"help\": \"Number of attention heads\"})\n num_layers: Optional[int] = field(default=None, metadata={\"help\": \"Number of transformer layers\"})" }, { "identifier": "TaskType", "path": "src/MLoRA/peft/utils/config.py", "snippet": "class TaskType(str, enum.Enum):\n SEQ_CLS = \"SEQ_CLS\"\n SEQ_2_SEQ_LM = \"SEQ_2_SEQ_LM\"\n CAUSAL_LM = \"CAUSAL_LM\"\n TOKEN_CLS = \"TOKEN_CLS\"\n CAUSAL_LMS = \"CAUSAL_LMS\"" }, { "identifier": "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING", "path": "src/MLoRA/peft/utils/other.py", "snippet": "TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING = {\n \"bloom\": bloom_model_postprocess_past_key_value,\n}" }, { "identifier": "WEIGHTS_NAME", "path": "src/MLoRA/peft/utils/other.py", "snippet": "WEIGHTS_NAME = \"adapter_model.bin\"" }, { "identifier": "_set_trainable", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def _set_trainable(model, adapter_name):\n key_list = [key for key, _ in model.named_modules()]\n for key in key_list:\n target_module_found = any(key.endswith(target_key) for target_key in model.modules_to_save)\n if target_module_found:\n parent, target, target_name = _get_submodules(model, key)\n if isinstance(target, ModulesToSaveWrapper):\n target.update(adapter_name)\n else:\n for param in target.parameters():\n param.requires_grad = True\n setattr(parent, target_name, ModulesToSaveWrapper(target, adapter_name))" }, { "identifier": "shift_tokens_right", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def shift_tokens_right(input_ids: torch.Tensor, pad_token_id: int, decoder_start_token_id: int):\n \"\"\"\n Shift input ids one token to the right.\n\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`): input ids\n pad_token_id (`int`): The id of the `padding` token.\n decoder_start_token_id (`int`): The id of the `start` token.\n \"\"\"\n shifted_input_ids = input_ids.new_zeros(input_ids.shape)\n shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()\n shifted_input_ids[:, 0] = decoder_start_token_id\n\n if pad_token_id is None:\n raise ValueError(\"self.model.config.pad_token_id has to be defined.\")\n # replace possible -100 values in labels by `pad_token_id`\n shifted_input_ids.masked_fill_(shifted_input_ids == -100, pad_token_id)\n\n return shifted_input_ids" }, { "identifier": "_set_adapter", "path": "src/MLoRA/peft/utils/other.py", "snippet": "def _set_adapter(model, adapter_name):\n for module in model.modules():\n if isinstance(module, ModulesToSaveWrapper):\n module.active_adapter = adapter_name" }, { "identifier": "get_peft_model_state_dict", "path": "src/MLoRA/peft/utils/save_and_load.py", "snippet": "def get_peft_model_state_dict(model, state_dict=None, adapter_name=\"default\"):\n \"\"\"\n Get the state dict of the Peft model.\n\n Args:\n model ([`PeftModel`]): The Peft model. When using torch.nn.DistributedDataParallel, DeepSpeed or FSDP,\n the model should be the underlying model/unwrapped model (i.e. model.module).\n state_dict (`dict`, *optional*, defaults to `None`):\n The state dict of the model. If not provided, the state dict of the model\n will be used.\n \"\"\"\n config = model.peft_config[adapter_name]\n if state_dict is None:\n state_dict = model.state_dict()\n if config.peft_type in (PeftType.LORA, PeftType.ADALORA,\n PeftType.MMOELORAS):\n # to_return = lora_state_dict(model, bias=model.peft_config.bias)\n # adapted from `https://github.com/microsoft/LoRA/blob/main/loralib/utils.py`\n # to be used directly with the state dict which is necessary when using DeepSpeed or FSDP\n bias = config.bias\n if bias == \"none\": # filter out all lora parameters\n to_return = {k: state_dict[k] for k in state_dict if \"lora_\" in k}\n elif bias == \"all\":\n to_return = {k: state_dict[k] for k in state_dict if \"lora_\" in k or \"bias\" in k}\n elif bias == \"lora_only\":\n to_return = {}\n for k in state_dict:\n if \"lora_\" in k:\n to_return[k] = state_dict[k]\n bias_name = k.split(\"lora_\")[0] + \"bias\"\n if bias_name in state_dict:\n to_return[bias_name] = state_dict[bias_name]\n else:\n raise NotImplementedError\n to_return = {k: v for k, v in to_return.items() if ((\"lora_\" in k and adapter_name in k) or (\"bias\" in k))}\n\n if config.peft_type == PeftType.ADALORA:\n rank_pattern = config.rank_pattern\n if rank_pattern is not None:\n rank_pattern = {k.replace(f\".{adapter_name}\", \"\"): v for k, v in rank_pattern.items()}\n config.rank_pattern = rank_pattern\n to_return = model.resize_state_dict_by_rank_pattern(rank_pattern, to_return, adapter_name)\n\n elif config.peft_type == PeftType.ADAPTION_PROMPT:\n to_return = {k: state_dict[k] for k in state_dict if k.split(\".\")[-1].startswith(\"adaption_\")}\n elif isinstance(config, PromptLearningConfig):\n to_return = {}\n if config.inference_mode:\n prompt_embeddings = model.prompt_encoder[adapter_name].embedding.weight\n else:\n prompt_embeddings = model.get_prompt_embedding_to_save(adapter_name)\n to_return[\"prompt_embeddings\"] = prompt_embeddings\n else:\n raise NotImplementedError\n if model.modules_to_save is not None:\n for key, value in state_dict.items():\n if any(f\"{module_name}.modules_to_save.{adapter_name}\" in key for module_name in model.modules_to_save):\n to_return[key.replace(\"modules_to_save.\", \"\")] = value\n\n to_return = {k.replace(f\".{adapter_name}\", \"\"): v for k, v in to_return.items()}\n return to_return" }, { "identifier": "set_peft_model_state_dict", "path": "src/MLoRA/peft/utils/save_and_load.py", "snippet": "def set_peft_model_state_dict(model, peft_model_state_dict, adapter_name=\"default\"):\n \"\"\"\n Set the state dict of the Peft model.\n\n Args:\n model ([`PeftModel`]): The Peft model.\n peft_model_state_dict (`dict`): The state dict of the Peft model.\n \"\"\"\n config = model.peft_config[adapter_name]\n state_dict = {}\n if model.modules_to_save is not None:\n for key, value in peft_model_state_dict.items():\n if any(module_name in key for module_name in model.modules_to_save):\n for module_name in model.modules_to_save:\n if module_name in key:\n key = key.replace(module_name, f\"{module_name}.modules_to_save.{adapter_name}\")\n break\n state_dict[key] = value\n else:\n state_dict = peft_model_state_dict\n\n if config.peft_type in (PeftType.LORA, PeftType.ADALORA,\n PeftType.MMOELORAS):\n peft_model_state_dict = {}\n for k, v in state_dict.items():\n if \"lora_\" in k:\n suffix = k.split(\"lora_\")[1]\n if \".\" in suffix:\n suffix_to_replace = \".\".join(suffix.split(\".\")[1:])\n k = k.replace(suffix_to_replace, f\"{adapter_name}.{suffix_to_replace}\")\n else:\n k = f\"{k}.{adapter_name}\"\n peft_model_state_dict[k] = v\n else:\n peft_model_state_dict[k] = v\n if config.peft_type == PeftType.ADALORA:\n rank_pattern = config.rank_pattern\n if rank_pattern is not None:\n model.resize_modules_by_rank_pattern(rank_pattern, adapter_name)\n elif isinstance(config, PromptLearningConfig) or config.peft_type == PeftType.ADAPTION_PROMPT:\n peft_model_state_dict = state_dict\n else:\n raise NotImplementedError\n\n model.load_state_dict(peft_model_state_dict, strict=False)\n if isinstance(config, PromptLearningConfig):\n model.prompt_encoder[adapter_name].embedding.load_state_dict(\n {\"weight\": peft_model_state_dict[\"prompt_embeddings\"]}, strict=True\n )" } ]
import inspect import os import warnings import torch import torch.nn as nn from contextlib import contextmanager from accelerate import dispatch_model, infer_auto_device_map from accelerate.hooks import AlignDevicesHook, add_hook_to_module, remove_hook_from_submodules from accelerate.utils import get_balanced_memory from huggingface_hub import hf_hub_download from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss from transformers import PreTrainedModel from transformers.modeling_outputs import SequenceClassifierOutput, TokenClassifierOutput from transformers.utils import PushToHubMixin from .utils import PeftConfig from .shared import Gate, GateN from .tuners import ( AdaLoraModel, AdaptionPromptModel, LoraModel, PrefixEncoder, PromptEmbedding, PromptEncoder, MMOELoraModelS, ) from .utils import ( TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING, WEIGHTS_NAME, PeftConfig, PeftType, PromptLearningConfig, TaskType, _set_adapter, _set_trainable, get_peft_model_state_dict, set_peft_model_state_dict, shift_tokens_right, ) from .mapping import MODEL_TYPE_TO_PEFT_MODEL_MAPPING, PEFT_TYPE_TO_CONFIG_MAPPING from .mapping import PEFT_TYPE_TO_CONFIG_MAPPING
16,973
) model_kwargs["past_key_values"] = past_key_values else: if model_kwargs["past_key_values"] is None: inputs_embeds = self.word_embeddings(model_kwargs["input_ids"]) prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0]) prompts = prompts.to(inputs_embeds.dtype) model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1) model_kwargs["input_ids"] = None return model_kwargs class PeftModelForSeq2SeqLM(PeftModel): """ Peft model for sequence-to-sequence language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import PeftModelForSeq2SeqLM, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "SEQ_2_SEQ_LM", ... "inference_mode": False, ... "r": 8, ... "target_modules": ["q", "v"], ... "lora_alpha": 32, ... "lora_dropout": 0.1, ... "merge_weights": False, ... "fan_in_fan_out": False, ... "enable_lora": None, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566 ``` """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation self.base_model_prepare_encoder_decoder_kwargs_for_generation = ( self.base_model._prepare_encoder_decoder_kwargs_for_generation ) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): peft_config = self.active_peft_config if not isinstance(peft_config, PromptLearningConfig): return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = input_ids.shape[0] if decoder_attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device) decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, past_key_values=past_key_values, **kwargs ) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if decoder_inputs_embeds is None and decoder_input_ids is None:
# coding=utf-8 # Copyright 2023-present the HuggingFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. PEFT_TYPE_TO_MODEL_MAPPING = { PeftType.LORA: LoraModel, PeftType.PROMPT_TUNING: PromptEmbedding, PeftType.P_TUNING: PromptEncoder, PeftType.PREFIX_TUNING: PrefixEncoder, PeftType.ADALORA: AdaLoraModel, PeftType.ADAPTION_PROMPT: AdaptionPromptModel, PeftType.MMOELORAS: MMOELoraModelS, } class PeftModel(PushToHubMixin, torch.nn.Module): """ Base model encompassing various Peft methods. Args: model ([`~transformers.PreTrainedModel`]): The base transformer model used for Peft. peft_config ([`PeftConfig`]): The configuration of the Peft model. **Attributes**: - **base_model** ([`~transformers.PreTrainedModel`]) -- The base transformer model used for Peft. - **peft_config** ([`PeftConfig`]) -- The configuration of the Peft model. - **modules_to_save** (`list` of `str`) -- The list of sub-module names to save when saving the model. - **prompt_encoder** ([`PromptEncoder`]) -- The prompt encoder used for Peft if using [`PromptLearningConfig`]. - **prompt_tokens** (`torch.Tensor`) -- The virtual prompt tokens used for Peft if using [`PromptLearningConfig`]. - **transformer_backbone_name** (`str`) -- The name of the transformer backbone in the base model if using [`PromptLearningConfig`]. - **word_embeddings** (`torch.nn.Embedding`) -- The word embeddings of the transformer backbone in the base model if using [`PromptLearningConfig`]. """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__() self.base_model = model self.config = self.base_model.config self.modules_to_save = None self.peft_config = {} self.active_adapter = adapter_name self.peft_type = peft_config.peft_type self.base_model_torch_dtype = getattr(model, "dtype", None) if not isinstance(peft_config, PromptLearningConfig): self.peft_config[adapter_name] = peft_config self.base_model = PEFT_TYPE_TO_MODEL_MAPPING[peft_config.peft_type]( self.base_model, self.peft_config, adapter_name ) self.set_additional_trainable_modules(peft_config, adapter_name) else: self.add_adapter(adapter_name, peft_config) def save_pretrained(self, save_directory, **kwargs): r""" This function saves the adapter model and the adapter configuration files to a directory, so that it can be reloaded using the [`LoraModel.from_pretrained`] class method, and also used by the [`LoraModel.push_to_hub`] method. Args: save_directory (`str`): Directory where the adapter model and configuration files will be saved (will be created if it does not exist). kwargs (additional keyword arguments, *optional*): Additional keyword arguments passed along to the `push_to_hub` method. """ if os.path.isfile(save_directory): raise ValueError(f"Provided path ({save_directory}) should be a directory, not a file") os.makedirs(save_directory, exist_ok=True) for adapter_name, peft_config in self.peft_config.items(): # save only the trainable weights output_state_dict = get_peft_model_state_dict( self, state_dict=kwargs.get("state_dict", None), adapter_name=adapter_name ) # save the weights based on the adapter name output_dir = os.path.join(save_directory, adapter_name) if adapter_name != "default" else save_directory os.makedirs(output_dir, exist_ok=True) torch.save(output_state_dict, os.path.join(output_dir, WEIGHTS_NAME)) # save the config and change the inference mode to `True` if peft_config.base_model_name_or_path is None: peft_config.base_model_name_or_path = ( self.base_model.__dict__.get("name_or_path", None) if isinstance(peft_config, PromptLearningConfig) else self.base_model.model.__dict__.get("name_or_path", None) ) inference_mode = peft_config.inference_mode peft_config.inference_mode = True peft_config.save_pretrained(output_dir) # save the config to file peft_config.inference_mode = inference_mode @classmethod def from_pretrained(cls, model, model_id, adapter_name="default", is_trainable=False, **kwargs): r""" Instantiate a [`LoraModel`] from a pretrained Lora configuration and weights. Args: model ([`~transformers.PreTrainedModel`]): The model to be adapted. The model should be initialized with the [`~transformers.PreTrainedModel.from_pretrained`] method from the 🤗 Transformers library. model_id (`str` or `os.PathLike`): The name of the Lora configuration to use. Can be either: - A string, the `model id` of a Lora configuration hosted inside a model repo on the Hugging Face Hub. - A path to a directory containing a Lora configuration file saved using the `save_pretrained` method (`./my_lora_config_directory/`). """ # load the config config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig.from_pretrained(model_id, subfolder=kwargs.get("subfolder", None)).peft_type ].from_pretrained(model_id, subfolder=kwargs.get("subfolder", None)) if (getattr(model, "hf_device_map", None) is not None) and len( set(model.hf_device_map.values()).intersection({"cpu", "disk"}) ) > 0: remove_hook_from_submodules(model) if isinstance(config, PromptLearningConfig) and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: config.inference_mode = not is_trainable if config.task_type not in MODEL_TYPE_TO_PEFT_MODEL_MAPPING.keys(): model = cls(model, config, adapter_name) else: model = MODEL_TYPE_TO_PEFT_MODEL_MAPPING[config.task_type](model, config, adapter_name) # New a PeftModel model.load_adapter(model_id, adapter_name, **kwargs) return model def _setup_prompt_encoder(self, adapter_name): config = self.peft_config[adapter_name] self.prompt_encoder = torch.nn.ModuleDict({}) self.prompt_tokens = {} transformer_backbone = None for name, module in self.base_model.named_children(): for param in module.parameters(): param.requires_grad = False if isinstance(module, PreTrainedModel): # Make sure to freeze Tranformers model if transformer_backbone is None: transformer_backbone = module self.transformer_backbone_name = name if config.num_transformer_submodules is None: config.num_transformer_submodules = 2 if config.task_type == TaskType.SEQ_2_SEQ_LM else 1 for named_param, value in list(transformer_backbone.named_parameters()): if value.shape[0] == self.base_model.config.vocab_size: self.word_embeddings = transformer_backbone.get_submodule(named_param.replace(".weight", "")) break if config.peft_type == PeftType.PROMPT_TUNING: prompt_encoder = PromptEmbedding(config, self.word_embeddings) elif config.peft_type == PeftType.P_TUNING: prompt_encoder = PromptEncoder(config) elif config.peft_type == PeftType.PREFIX_TUNING: prompt_encoder = PrefixEncoder(config) else: raise ValueError("Not supported") self.prompt_encoder.update(torch.nn.ModuleDict({adapter_name: prompt_encoder})) self.prompt_tokens[adapter_name] = torch.arange( config.num_virtual_tokens * config.num_transformer_submodules ).long() def get_prompt_embedding_to_save(self, adapter_name): """ Returns the prompt embedding to save when saving the model. Only applicable when `peft_config.peft_type != PeftType.LORA`. """ prompt_tokens = self.prompt_tokens[adapter_name].unsqueeze(0).expand(1, -1).to(self.device) if self.peft_config[adapter_name].peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : self.peft_config[adapter_name].num_virtual_tokens] prompt_embeddings = self.prompt_encoder[adapter_name](prompt_tokens) return prompt_embeddings[0].detach().cpu() def get_prompt(self, batch_size): """ Returns the virtual prompts to use for Peft. Only applicable when `peft_config.peft_type != PeftType.LORA`. """ peft_config = self.active_peft_config prompt_encoder = self.prompt_encoder[self.active_adapter] prompt_tokens = self.prompt_tokens[self.active_adapter].unsqueeze(0).expand(batch_size, -1).to(self.device) if peft_config.peft_type == PeftType.PREFIX_TUNING: prompt_tokens = prompt_tokens[:, : peft_config.num_virtual_tokens] if peft_config.inference_mode: past_key_values = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: past_key_values = prompt_encoder(prompt_tokens) past_key_values = past_key_values.view( batch_size, peft_config.num_virtual_tokens, peft_config.num_layers * 2, peft_config.num_attention_heads, peft_config.token_dim // peft_config.num_attention_heads, ) if peft_config.num_transformer_submodules == 2: past_key_values = torch.cat([past_key_values, past_key_values], dim=2) past_key_values = past_key_values.permute([2, 0, 3, 1, 4]).split( peft_config.num_transformer_submodules * 2 ) if TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING.get(self.config.model_type, None) is not None: post_process_fn = TRANSFORMERS_MODELS_TO_PREFIX_TUNING_POSTPROCESS_MAPPING[self.config.model_type] past_key_values = post_process_fn(past_key_values) return past_key_values else: if peft_config.inference_mode: prompts = prompt_encoder.embedding.weight.repeat(batch_size, 1, 1) else: prompts = prompt_encoder(prompt_tokens) return prompts def print_trainable_parameters(self): """ Prints the number of trainable parameters in the model. """ trainable_params = 0 all_param = 0 for _, param in self.named_parameters(): num_params = param.numel() # if using DS Zero 3 and the weights are initialized empty if num_params == 0 and hasattr(param, "ds_numel"): num_params = param.ds_numel all_param += num_params if param.requires_grad: trainable_params += num_params print( f"trainable params: {trainable_params} || all params: {all_param} || trainable%: {100 * trainable_params / all_param}" ) def __getattr__(self, name: str): """Forward missing attributes to the wrapped module.""" try: return super().__getattr__(name) # defer to nn.Module's logic except AttributeError: return getattr(self.base_model, name) def forward(self, *args, **kwargs): """ Forward pass of the model. """ return self.get_base_model()(*args, **kwargs) @contextmanager def disable_adapter(self): """ Disables the adapter module. """ try: if isinstance(self.peft_config, PromptLearningConfig): old_forward = self.forward self.forward = self.base_model.forward else: self.base_model.disable_adapter_layers() yield finally: if isinstance(self.peft_config, PromptLearningConfig): self.forward = old_forward else: self.base_model.enable_adapter_layers() def get_base_model(self): """ Returns the base model. """ return self.base_model if isinstance(self.active_peft_config, PromptLearningConfig) else self.base_model.model def add_adapter(self, adapter_name, peft_config): if peft_config.peft_type != self.peft_type: raise ValueError( f"Cannot combine adapters with different peft types. " f"Found {self.peft_type} and {peft_config.peft_type}." ) self.peft_config[adapter_name] = peft_config if isinstance(peft_config, PromptLearningConfig): self._setup_prompt_encoder(adapter_name) else: self.base_model.add_adapter(adapter_name, peft_config) self.set_additional_trainable_modules(peft_config, adapter_name) def set_additional_trainable_modules(self, peft_config, adapter_name): if getattr(peft_config, "modules_to_save", None) is not None: if self.modules_to_save is None: self.modules_to_save = set(peft_config.modules_to_save) else: self.modules_to_save.update(peft_config.modules_to_save) _set_trainable(self, adapter_name) def load_adapter(self, model_id, adapter_name, is_trainable=False, **kwargs): if adapter_name not in self.peft_config: # load the config peft_config = PEFT_TYPE_TO_CONFIG_MAPPING[ PeftConfig.from_pretrained(model_id, subfolder=kwargs.get("subfolder", None)).peft_type ].from_pretrained(model_id, subfolder=kwargs.get("subfolder", None)) if isinstance(peft_config, PromptLearningConfig) and is_trainable: raise ValueError("Cannot set a prompt learning adapter to trainable when loading pretrained adapter.") else: peft_config.inference_mode = not is_trainable self.add_adapter(adapter_name, peft_config) # load weights if any path = os.path.join(model_id, kwargs["subfolder"]) if kwargs.get("subfolder", None) is not None else model_id if os.path.exists(os.path.join(path, WEIGHTS_NAME)): filename = os.path.join(path, WEIGHTS_NAME) else: try: filename = hf_hub_download(model_id, WEIGHTS_NAME, subfolder=kwargs.get("subfolder", None)) except: # noqa raise ValueError( f"Can't find weights for {model_id} in {model_id} or in the Hugging Face Hub. " f"Please check that the file {WEIGHTS_NAME} is present at {model_id}." ) adapters_weights = torch.load( filename, map_location=torch.device("cuda" if torch.cuda.is_available() else "cpu") ) # load the weights into the model set_peft_model_state_dict(self, adapters_weights, adapter_name=adapter_name) if ( (getattr(self, "hf_device_map", None) is not None) and (len(set(self.hf_device_map.values()).intersection({"cpu", "disk"})) > 0) and len(self.peft_config) == 1 ): device_map = kwargs.get("device_map", "auto") max_memory = kwargs.get("max_memory", None) offload_dir = kwargs.get("offload_folder", None) offload_index = kwargs.get("offload_index", None) dispatch_model_kwargs = {} # Safety checker for previous `accelerate` versions # `offload_index` was introduced in https://github.com/huggingface/accelerate/pull/873/ if "offload_index" in inspect.signature(dispatch_model).parameters: dispatch_model_kwargs["offload_index"] = offload_index no_split_module_classes = self._no_split_modules if device_map != "sequential": max_memory = get_balanced_memory( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes, low_zero=(device_map == "balanced_low_0"), ) if isinstance(device_map, str): device_map = infer_auto_device_map( self, max_memory=max_memory, no_split_module_classes=no_split_module_classes ) dispatch_model( self, device_map=device_map, offload_dir=offload_dir, **dispatch_model_kwargs, ) hook = AlignDevicesHook(io_same_device=True) if isinstance(self.peft_config[adapter_name], PromptLearningConfig): remove_hook_from_submodules(self.prompt_encoder) add_hook_to_module(self.get_base_model(), hook) # Set model in evaluation mode to deactivate Dropout modules by default self.eval() def set_adapter(self, adapter_name): """ Sets the active adapter. """ if adapter_name not in self.peft_config: raise ValueError(f"Adapter {adapter_name} not found.") self.active_adapter = adapter_name if not isinstance(self.peft_config[adapter_name], PromptLearningConfig): self.base_model.set_adapter(adapter_name) _set_adapter(self, adapter_name) @property def active_peft_config(self): return self.peft_config[self.active_adapter] class PeftModelForSequenceClassification(PeftModel): """ Peft model for sequence classification tasks. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. **Attributes**: - **config** ([`~transformers.PretrainedConfig`]) -- The configuration object of the base model. - **cls_layer_name** (`str`) -- The name of the classification layer. Example: ```py >>> from transformers import AutoModelForSequenceClassification >>> from peft import PeftModelForSequenceClassification, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "SEQ_CLS", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 768, ... "num_transformer_submodules": 1, ... "num_attention_heads": 12, ... "num_layers": 12, ... "encoder_hidden_size": 768, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSequenceClassification.from_pretrained("bert-base-cased") >>> peft_model = PeftModelForSequenceClassification(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 370178 || all params: 108680450 || trainable%: 0.3406113979101117 ``` """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__(model, peft_config, adapter_name) if self.modules_to_save is None: self.modules_to_save = {"classifier", "score"} else: self.modules_to_save.update({"classifier", "score"}) for name, _ in self.base_model.named_children(): if any(module_name in name for module_name in self.modules_to_save): self.cls_layer_name = name break # to make sure classifier layer is trainable _set_trainable(self, adapter_name) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): return_dict = return_dict if return_dict is not None else self.config.use_return_dict peft_config = self.active_peft_config if not isinstance(peft_config, PromptLearningConfig): return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = input_ids.shape[0] if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: return self._prefix_tuning_forward(input_ids=input_ids, **kwargs) else: if kwargs.get("token_type_ids", None) is not None: kwargs["token_type_ids"] = torch.cat( ( torch.zeros(batch_size, peft_config.num_virtual_tokens).to(self.device), kwargs["token_type_ids"], ), dim=1, ).long() if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def _prefix_tuning_forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): batch_size = input_ids.shape[0] past_key_values = self.get_prompt(batch_size) fwd_params = list(inspect.signature(self.base_model.forward).parameters.keys()) kwargs.update( { "input_ids": input_ids, "attention_mask": attention_mask, "inputs_embeds": inputs_embeds, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, "past_key_values": past_key_values, } ) if "past_key_values" in fwd_params: return self.base_model(labels=labels, **kwargs) else: transformer_backbone_name = self.base_model.get_submodule(self.transformer_backbone_name) fwd_params = list(inspect.signature(transformer_backbone_name.forward).parameters.keys()) if "past_key_values" not in fwd_params: raise ValueError("Model does not support past key values which are required for prefix tuning.") outputs = transformer_backbone_name(**kwargs) pooled_output = outputs[1] if len(outputs) > 1 else outputs[0] if "dropout" in [name for name, _ in list(self.base_model.named_children())]: pooled_output = self.base_model.dropout(pooled_output) logits = self.base_model.get_submodule(self.cls_layer_name)(pooled_output) loss = None if labels is not None: if self.config.problem_type is None: if self.base_model.num_labels == 1: self.config.problem_type = "regression" elif self.base_model.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int): self.config.problem_type = "single_label_classification" else: self.config.problem_type = "multi_label_classification" if self.config.problem_type == "regression": loss_fct = MSELoss() if self.base_model.num_labels == 1: loss = loss_fct(logits.squeeze(), labels.squeeze()) else: loss = loss_fct(logits, labels) elif self.config.problem_type == "single_label_classification": loss_fct = CrossEntropyLoss() loss = loss_fct(logits.view(-1, self.base_model.num_labels), labels.view(-1)) elif self.config.problem_type == "multi_label_classification": loss_fct = BCEWithLogitsLoss() loss = loss_fct(logits, labels) if not return_dict: output = (logits,) + outputs[2:] return ((loss,) + output) if loss is not None else output return SequenceClassifierOutput( loss=loss, logits=logits, hidden_states=outputs.hidden_states, attentions=outputs.attentions, ) class PeftModelForCausalLM(PeftModel): """ Peft model for causal language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForCausalLM >>> from peft import PeftModelForCausalLM, get_peft_config >>> config = { ... "peft_type": "PREFIX_TUNING", ... "task_type": "CAUSAL_LM", ... "inference_mode": False, ... "num_virtual_tokens": 20, ... "token_dim": 1280, ... "num_transformer_submodules": 1, ... "num_attention_heads": 20, ... "num_layers": 36, ... "encoder_hidden_size": 1280, ... "prefix_projection": False, ... "postprocess_past_key_value_function": None, ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForCausalLM.from_pretrained("gpt2-large") >>> peft_model = PeftModelForCausalLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 1843200 || all params: 775873280 || trainable%: 0.23756456724479544 ``` """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): peft_config = self.active_peft_config if not isinstance(peft_config, PromptLearningConfig): return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = input_ids.shape[0] if attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device) attention_mask = torch.cat((prefix_attention_mask, attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model(input_ids=input_ids, past_key_values=past_key_values, **kwargs) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) # concat prompt labels if labels is not None: prefix_labels = torch.full((batch_size, peft_config.num_virtual_tokens), -100).to(self.device) kwargs["labels"] = torch.cat((prefix_labels, labels), dim=1) prompts = self.get_prompt(batch_size=batch_size) prompts = prompts.to(inputs_embeds.dtype) inputs_embeds = torch.cat((prompts, inputs_embeds), dim=1) return self.base_model(inputs_embeds=inputs_embeds, **kwargs) def generate(self, **kwargs): peft_config = self.active_peft_config self.base_model.prepare_inputs_for_generation = self.prepare_inputs_for_generation try: if not isinstance(peft_config, PromptLearningConfig): outputs = self.base_model.generate(**kwargs) else: if "input_ids" not in kwargs: raise ValueError("input_ids must be provided for Peft model generation") # For gpt2 models, we construct postion_ids on the fly by using attention mask, and position ids need to match input_shape. # for prefix tuning, input shape is determined using `input_ids`. Thus we should not expand 'attention_mask' here # for prompt tuning input_ids is not passed but a concatenated input_embeds is passed. Thus attention_mask needs to be of same size of num_virtual_tokens + input_ids if kwargs.get("attention_mask", None) is not None and peft_config.peft_type in [ PeftType.PROMPT_TUNING, PeftType.P_TUNING, ]: # concat prompt attention mask prefix_attention_mask = torch.ones( kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens ).to(kwargs["input_ids"].device) kwargs["attention_mask"] = torch.cat((prefix_attention_mask, kwargs["attention_mask"]), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn( "Position ids are not supported for parameter efficient tuning. Ignoring position ids." ) kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn( "Token type ids are not supported for parameter efficient tuning. Ignoring token type ids" ) kwargs["token_type_ids"] = None outputs = self.base_model.generate(**kwargs) except: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation raise else: self.base_model.prepare_inputs_for_generation = self.base_model_prepare_inputs_for_generation return outputs def prepare_inputs_for_generation(self, *args, **kwargs): peft_config = self.active_peft_config model_kwargs = self.base_model_prepare_inputs_for_generation(*args, **kwargs) if isinstance(peft_config, PromptLearningConfig): if peft_config.peft_type == PeftType.PREFIX_TUNING: prefix_attention_mask = torch.ones( model_kwargs["input_ids"].shape[0], peft_config.num_virtual_tokens ).to(model_kwargs["input_ids"].device) model_kwargs["attention_mask"] = torch.cat( (prefix_attention_mask, model_kwargs["attention_mask"]), dim=1 ) if model_kwargs["past_key_values"] is None and peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0]) if self.base_model_torch_dtype is not None: # handle the case for Bloom where it outputs tuple of tuples if isinstance(past_key_values[0], tuple): past_key_values = tuple( tuple( past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_value_tuple ) for past_key_value_tuple in past_key_values ) else: past_key_values = tuple( past_key_value.to(self.base_model_torch_dtype) for past_key_value in past_key_values ) model_kwargs["past_key_values"] = past_key_values else: if model_kwargs["past_key_values"] is None: inputs_embeds = self.word_embeddings(model_kwargs["input_ids"]) prompts = self.get_prompt(batch_size=model_kwargs["input_ids"].shape[0]) prompts = prompts.to(inputs_embeds.dtype) model_kwargs["inputs_embeds"] = torch.cat((prompts, inputs_embeds), dim=1) model_kwargs["input_ids"] = None return model_kwargs class PeftModelForSeq2SeqLM(PeftModel): """ Peft model for sequence-to-sequence language modeling. Args: model ([`~transformers.PreTrainedModel`]): Base transformer model. peft_config ([`PeftConfig`]): Peft config. Example: ```py >>> from transformers import AutoModelForSeq2SeqLM >>> from peft import PeftModelForSeq2SeqLM, get_peft_config >>> config = { ... "peft_type": "LORA", ... "task_type": "SEQ_2_SEQ_LM", ... "inference_mode": False, ... "r": 8, ... "target_modules": ["q", "v"], ... "lora_alpha": 32, ... "lora_dropout": 0.1, ... "merge_weights": False, ... "fan_in_fan_out": False, ... "enable_lora": None, ... "bias": "none", ... } >>> peft_config = get_peft_config(config) >>> model = AutoModelForSeq2SeqLM.from_pretrained("t5-base") >>> peft_model = PeftModelForSeq2SeqLM(model, peft_config) >>> peft_model.print_trainable_parameters() trainable params: 884736 || all params: 223843584 || trainable%: 0.3952474242013566 ``` """ def __init__(self, model, peft_config: PeftConfig, adapter_name="default"): super().__init__(model, peft_config, adapter_name) self.base_model_prepare_inputs_for_generation = self.base_model.prepare_inputs_for_generation self.base_model_prepare_encoder_decoder_kwargs_for_generation = ( self.base_model._prepare_encoder_decoder_kwargs_for_generation ) def forward( self, input_ids=None, attention_mask=None, inputs_embeds=None, decoder_input_ids=None, decoder_attention_mask=None, decoder_inputs_embeds=None, labels=None, output_attentions=None, output_hidden_states=None, return_dict=None, **kwargs, ): peft_config = self.active_peft_config if not isinstance(peft_config, PromptLearningConfig): return self.base_model( input_ids=input_ids, attention_mask=attention_mask, inputs_embeds=inputs_embeds, decoder_input_ids=decoder_input_ids, decoder_attention_mask=decoder_attention_mask, decoder_inputs_embeds=decoder_inputs_embeds, labels=labels, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=return_dict, **kwargs, ) batch_size = input_ids.shape[0] if decoder_attention_mask is not None: # concat prompt attention mask prefix_attention_mask = torch.ones(batch_size, peft_config.num_virtual_tokens).to(self.device) decoder_attention_mask = torch.cat((prefix_attention_mask, decoder_attention_mask), dim=1) if kwargs.get("position_ids", None) is not None: warnings.warn("Position ids are not supported for parameter efficient tuning. Ignoring position ids.") kwargs["position_ids"] = None if kwargs.get("token_type_ids", None) is not None: warnings.warn("Token type ids are not supported for parameter efficient tuning. Ignoring token type ids") kwargs["token_type_ids"] = None kwargs.update( { "attention_mask": attention_mask, "decoder_attention_mask": decoder_attention_mask, "labels": labels, "output_attentions": output_attentions, "output_hidden_states": output_hidden_states, "return_dict": return_dict, } ) if peft_config.peft_type == PeftType.PREFIX_TUNING: past_key_values = self.get_prompt(batch_size) return self.base_model( input_ids=input_ids, decoder_input_ids=decoder_input_ids, past_key_values=past_key_values, **kwargs ) else: if inputs_embeds is None: inputs_embeds = self.word_embeddings(input_ids) if decoder_inputs_embeds is None and decoder_input_ids is None:
decoder_input_ids = shift_tokens_right(
17
2023-10-19 10:55:50+00:00
24k
YuroFR/freqtrade-modded-crypto-trading-bot
freqtrade/exchange/exchange.py
[ { "identifier": "DEFAULT_AMOUNT_RESERVE_PERCENT", "path": "freqtrade/constants.py", "snippet": "DOCS_LINK = \"https://www.freqtrade.io/en/stable\"\nDEFAULT_CONFIG = 'config.json'\nPROCESS_THROTTLE_SECS = 5 # sec\nHYPEROPT_EPOCH = 100 # epochs\nRETRY_TIMEOUT = 30 # sec\nTIMEOUT_UNITS = ['minutes', 'seconds']\nEXPORT_OPTIONS = ['none', 'trades', 'signals']\nDEFAULT_DB_PROD_URL = 'sqlite:///tradesv3.sqlite'\nDEFAULT_DB_DRYRUN_URL = 'sqlite:///tradesv3.dryrun.sqlite'\nUNLIMITED_STAKE_AMOUNT = 'unlimited'\nDEFAULT_AMOUNT_RESERVE_PERCENT = 0.05\nREQUIRED_ORDERTIF = ['entry', 'exit']\nREQUIRED_ORDERTYPES = ['entry', 'exit', 'stoploss', 'stoploss_on_exchange']\nPRICING_SIDES = ['ask', 'bid', 'same', 'other']\nORDERTYPE_POSSIBILITIES = ['limit', 'market']\n_ORDERTIF_POSSIBILITIES = ['GTC', 'FOK', 'IOC', 'PO']\nORDERTIF_POSSIBILITIES = _ORDERTIF_POSSIBILITIES + [t.lower() for t in _ORDERTIF_POSSIBILITIES]\nSTOPLOSS_PRICE_TYPES = [p for p in PriceType]\nHYPEROPT_LOSS_BUILTIN = ['ShortTradeDurHyperOptLoss', 'OnlyProfitHyperOptLoss',\n 'SharpeHyperOptLoss', 'SharpeHyperOptLossDaily',\n 'SortinoHyperOptLoss', 'SortinoHyperOptLossDaily',\n 'CalmarHyperOptLoss',\n 'MaxDrawDownHyperOptLoss', 'MaxDrawDownRelativeHyperOptLoss',\n 'ProfitDrawDownHyperOptLoss']\nAVAILABLE_PAIRLISTS = ['StaticPairList', 'VolumePairList', 'ProducerPairList', 'RemotePairList',\n 'AgeFilter', \"FullTradesFilter\", 'OffsetFilter', 'PerformanceFilter',\n 'PrecisionFilter', 'PriceFilter', 'RangeStabilityFilter',\n 'ShuffleFilter', 'SpreadFilter', 'VolatilityFilter']\nAVAILABLE_PROTECTIONS = ['CooldownPeriod',\n 'LowProfitPairs', 'MaxDrawdown', 'StoplossGuard']\nAVAILABLE_DATAHANDLERS = ['json', 'jsongz', 'hdf5', 'feather', 'parquet']\nBACKTEST_BREAKDOWNS = ['day', 'week', 'month']\nBACKTEST_CACHE_AGE = ['none', 'day', 'week', 'month']\nBACKTEST_CACHE_DEFAULT = 'day'\nDRY_RUN_WALLET = 1000\nDATETIME_PRINT_FORMAT = '%Y-%m-%d %H:%M:%S'\nMATH_CLOSE_PREC = 1e-14 # Precision used for float comparisons\nDEFAULT_DATAFRAME_COLUMNS = ['date', 'open', 'high', 'low', 'close', 'volume']\nDEFAULT_TRADES_COLUMNS = ['timestamp', 'id', 'type', 'side', 'price', 'amount', 'cost']\nTRADES_DTYPES = {\n 'timestamp': 'int64',\n 'id': 'str',\n 'type': 'str',\n 'side': 'str',\n 'price': 'float64',\n 'amount': 'float64',\n 'cost': 'float64',\n}\nTRADING_MODES = ['spot', 'margin', 'futures']\nMARGIN_MODES = ['cross', 'isolated', '']\nLAST_BT_RESULT_FN = '.last_result.json'\nFTHYPT_FILEVERSION = 'fthypt_fileversion'\nUSERPATH_HYPEROPTS = 'hyperopts'\nUSERPATH_STRATEGIES = 'strategies'\nUSERPATH_NOTEBOOKS = 'notebooks'\nUSERPATH_FREQAIMODELS = 'freqaimodels'\nTELEGRAM_SETTING_OPTIONS = ['on', 'off', 'silent']\nWEBHOOK_FORMAT_OPTIONS = ['form', 'json', 'raw']\nFULL_DATAFRAME_THRESHOLD = 100\nCUSTOM_TAG_MAX_LENGTH = 255\nDL_DATA_TIMEFRAMES = ['1m', '5m']\nENV_VAR_PREFIX = 'FREQTRADE__'\nCANCELED_EXCHANGE_STATES = ('cancelled', 'canceled', 'expired')\nNON_OPEN_EXCHANGE_STATES = CANCELED_EXCHANGE_STATES + ('closed',)\nDECIMAL_PER_COIN_FALLBACK = 3 # Should be low to avoid listing all possible FIAT's\nDECIMALS_PER_COIN = {\n 'BTC': 8,\n 'ETH': 5,\n}\nDUST_PER_COIN = {\n 'BTC': 0.0001,\n 'ETH': 0.01\n}\nUSER_DATA_FILES = {\n 'sample_strategy.py': USERPATH_STRATEGIES,\n 'sample_hyperopt_loss.py': USERPATH_HYPEROPTS,\n 'strategy_analysis_example.ipynb': USERPATH_NOTEBOOKS,\n}\nSUPPORTED_FIAT = [\n \"AUD\", \"BRL\", \"CAD\", \"CHF\", \"CLP\", \"CNY\", \"CZK\", \"DKK\",\n \"EUR\", \"GBP\", \"HKD\", \"HUF\", \"IDR\", \"ILS\", \"INR\", \"JPY\",\n \"KRW\", \"MXN\", \"MYR\", \"NOK\", \"NZD\", \"PHP\", \"PKR\", \"PLN\",\n \"RUB\", \"UAH\", \"SEK\", \"SGD\", \"THB\", \"TRY\", \"TWD\", \"ZAR\",\n \"USD\", \"BTC\", \"ETH\", \"XRP\", \"LTC\", \"BCH\"\n]\nMINIMAL_CONFIG = {\n \"stake_currency\": \"\",\n \"dry_run\": True,\n \"exchange\": {\n \"name\": \"\",\n \"key\": \"\",\n \"secret\": \"\",\n \"pair_whitelist\": [],\n \"ccxt_async_config\": {\n }\n }\n}\n__MESSAGE_TYPE_DICT: Dict[str, Dict[str, str]] = {x: {'type': 'object'} for x in RPCMessageType}\nCONF_SCHEMA = {\n 'type': 'object',\n 'properties': {\n 'max_open_trades': {'type': ['integer', 'number'], 'minimum': -1},\n 'new_pairs_days': {'type': 'integer', 'default': 30},\n 'timeframe': {'type': 'string'},\n 'stake_currency': {'type': 'string'},\n 'stake_amount': {\n 'type': ['number', 'string'],\n 'minimum': 0.0001,\n 'pattern': UNLIMITED_STAKE_AMOUNT\n },\n 'tradable_balance_ratio': {\n 'type': 'number',\n 'minimum': 0.0,\n 'maximum': 1,\n 'default': 0.99\n },\n 'available_capital': {\n 'type': 'number',\n 'minimum': 0,\n },\n 'amend_last_stake_amount': {'type': 'boolean', 'default': False},\n 'last_stake_amount_min_ratio': {\n 'type': 'number', 'minimum': 0.0, 'maximum': 1.0, 'default': 0.5\n },\n 'fiat_display_currency': {'type': 'string', 'enum': SUPPORTED_FIAT},\n 'dry_run': {'type': 'boolean'},\n 'dry_run_wallet': {'type': 'number', 'default': DRY_RUN_WALLET},\n 'cancel_open_orders_on_exit': {'type': 'boolean', 'default': False},\n 'process_only_new_candles': {'type': 'boolean'},\n 'minimal_roi': {\n 'type': 'object',\n 'patternProperties': {\n '^[0-9.]+$': {'type': 'number'}\n },\n },\n 'amount_reserve_percent': {'type': 'number', 'minimum': 0.0, 'maximum': 0.5},\n 'stoploss': {'type': 'number', 'maximum': 0, 'exclusiveMaximum': True},\n 'trailing_stop': {'type': 'boolean'},\n 'trailing_stop_positive': {'type': 'number', 'minimum': 0, 'maximum': 1},\n 'trailing_stop_positive_offset': {'type': 'number', 'minimum': 0, 'maximum': 1},\n 'trailing_only_offset_is_reached': {'type': 'boolean'},\n 'use_exit_signal': {'type': 'boolean'},\n 'exit_profit_only': {'type': 'boolean'},\n 'exit_profit_offset': {'type': 'number'},\n 'ignore_roi_if_entry_signal': {'type': 'boolean'},\n 'ignore_buying_expired_candle_after': {'type': 'number'},\n 'trading_mode': {'type': 'string', 'enum': TRADING_MODES},\n 'margin_mode': {'type': 'string', 'enum': MARGIN_MODES},\n 'reduce_df_footprint': {'type': 'boolean', 'default': False},\n 'minimum_trade_amount': {'type': 'number', 'default': 10},\n 'targeted_trade_amount': {'type': 'number', 'default': 20},\n 'lookahead_analysis_exportfilename': {'type': 'string'},\n 'startup_candle': {\n 'type': 'array',\n 'uniqueItems': True,\n 'default': [199, 399, 499, 999, 1999],\n },\n 'liquidation_buffer': {'type': 'number', 'minimum': 0.0, 'maximum': 0.99},\n 'backtest_breakdown': {\n 'type': 'array',\n 'items': {'type': 'string', 'enum': BACKTEST_BREAKDOWNS}\n },\n 'bot_name': {'type': 'string'},\n 'unfilledtimeout': {\n 'type': 'object',\n 'properties': {\n 'entry': {'type': 'number', 'minimum': 1},\n 'exit': {'type': 'number', 'minimum': 1},\n 'exit_timeout_count': {'type': 'number', 'minimum': 0, 'default': 0},\n 'unit': {'type': 'string', 'enum': TIMEOUT_UNITS, 'default': 'minutes'}\n }\n },\n 'entry_pricing': {\n 'type': 'object',\n 'properties': {\n 'price_last_balance': {\n 'type': 'number',\n 'minimum': 0,\n 'maximum': 1,\n 'exclusiveMaximum': False,\n },\n 'price_side': {'type': 'string', 'enum': PRICING_SIDES, 'default': 'same'},\n 'use_order_book': {'type': 'boolean'},\n 'order_book_top': {'type': 'integer', 'minimum': 1, 'maximum': 50, },\n 'check_depth_of_market': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'bids_to_ask_delta': {'type': 'number', 'minimum': 0},\n }\n },\n },\n 'required': ['price_side']\n },\n 'exit_pricing': {\n 'type': 'object',\n 'properties': {\n 'price_side': {'type': 'string', 'enum': PRICING_SIDES, 'default': 'same'},\n 'price_last_balance': {\n 'type': 'number',\n 'minimum': 0,\n 'maximum': 1,\n 'exclusiveMaximum': False,\n },\n 'use_order_book': {'type': 'boolean'},\n 'order_book_top': {'type': 'integer', 'minimum': 1, 'maximum': 50, },\n },\n 'required': ['price_side']\n },\n 'custom_price_max_distance_ratio': {\n 'type': 'number', 'minimum': 0.0\n },\n 'order_types': {\n 'type': 'object',\n 'properties': {\n 'entry': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'exit': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'force_exit': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'force_entry': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'emergency_exit': {\n 'type': 'string',\n 'enum': ORDERTYPE_POSSIBILITIES,\n 'default': 'market'},\n 'stoploss': {'type': 'string', 'enum': ORDERTYPE_POSSIBILITIES},\n 'stoploss_on_exchange': {'type': 'boolean'},\n 'stoploss_price_type': {'type': 'string', 'enum': STOPLOSS_PRICE_TYPES},\n 'stoploss_on_exchange_interval': {'type': 'number'},\n 'stoploss_on_exchange_limit_ratio': {'type': 'number', 'minimum': 0.0,\n 'maximum': 1.0}\n },\n 'required': ['entry', 'exit', 'stoploss', 'stoploss_on_exchange']\n },\n 'order_time_in_force': {\n 'type': 'object',\n 'properties': {\n 'entry': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES},\n 'exit': {'type': 'string', 'enum': ORDERTIF_POSSIBILITIES}\n },\n 'required': REQUIRED_ORDERTIF\n },\n 'exchange': {'$ref': '#/definitions/exchange'},\n 'edge': {'$ref': '#/definitions/edge'},\n 'freqai': {'$ref': '#/definitions/freqai'},\n 'external_message_consumer': {'$ref': '#/definitions/external_message_consumer'},\n 'experimental': {\n 'type': 'object',\n 'properties': {\n 'block_bad_exchanges': {'type': 'boolean'}\n }\n },\n 'pairlists': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'method': {'type': 'string', 'enum': AVAILABLE_PAIRLISTS},\n },\n 'required': ['method'],\n }\n },\n 'protections': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'method': {'type': 'string', 'enum': AVAILABLE_PROTECTIONS},\n 'stop_duration': {'type': 'number', 'minimum': 0.0},\n 'stop_duration_candles': {'type': 'number', 'minimum': 0},\n 'trade_limit': {'type': 'number', 'minimum': 1},\n 'lookback_period': {'type': 'number', 'minimum': 1},\n 'lookback_period_candles': {'type': 'number', 'minimum': 1},\n },\n 'required': ['method'],\n }\n },\n 'telegram': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'token': {'type': 'string'},\n 'chat_id': {'type': 'string'},\n 'allow_custom_messages': {'type': 'boolean', 'default': True},\n 'balance_dust_level': {'type': 'number', 'minimum': 0.0},\n 'notification_settings': {\n 'type': 'object',\n 'default': {},\n 'properties': {\n 'status': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'warning': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'startup': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'entry': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'entry_fill': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'off'\n },\n 'entry_cancel': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS, },\n 'exit': {\n 'type': ['string', 'object'],\n 'additionalProperties': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS\n }\n },\n 'exit_fill': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n 'exit_cancel': {'type': 'string', 'enum': TELEGRAM_SETTING_OPTIONS},\n 'protection_trigger': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n 'protection_trigger_global': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n 'show_candle': {\n 'type': 'string',\n 'enum': ['off', 'ohlc'],\n 'default': 'off'\n },\n 'strategy_msg': {\n 'type': 'string',\n 'enum': TELEGRAM_SETTING_OPTIONS,\n 'default': 'on'\n },\n }\n },\n 'reload': {'type': 'boolean'},\n },\n 'required': ['enabled', 'token', 'chat_id'],\n },\n 'webhook': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'url': {'type': 'string'},\n 'format': {'type': 'string', 'enum': WEBHOOK_FORMAT_OPTIONS, 'default': 'form'},\n 'retries': {'type': 'integer', 'minimum': 0},\n 'retry_delay': {'type': 'number', 'minimum': 0},\n **__MESSAGE_TYPE_DICT,\n # **{x: {'type': 'object'} for x in RPCMessageType},\n # Below -> Deprecated\n 'webhookentry': {'type': 'object'},\n 'webhookentrycancel': {'type': 'object'},\n 'webhookentryfill': {'type': 'object'},\n 'webhookexit': {'type': 'object'},\n 'webhookexitcancel': {'type': 'object'},\n 'webhookexitfill': {'type': 'object'},\n 'webhookstatus': {'type': 'object'},\n },\n },\n 'discord': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'webhook_url': {'type': 'string'},\n \"exit_fill\": {\n 'type': 'array', 'items': {'type': 'object'},\n 'default': [\n {\"Trade ID\": \"{trade_id}\"},\n {\"Exchange\": \"{exchange}\"},\n {\"Pair\": \"{pair}\"},\n {\"Direction\": \"{direction}\"},\n {\"Open rate\": \"{open_rate}\"},\n {\"Close rate\": \"{close_rate}\"},\n {\"Amount\": \"{amount}\"},\n {\"Open date\": \"{open_date:%Y-%m-%d %H:%M:%S}\"},\n {\"Close date\": \"{close_date:%Y-%m-%d %H:%M:%S}\"},\n {\"Profit\": \"{profit_amount} {stake_currency}\"},\n {\"Profitability\": \"{profit_ratio:.2%}\"},\n {\"Enter tag\": \"{enter_tag}\"},\n {\"Exit Reason\": \"{exit_reason}\"},\n {\"Strategy\": \"{strategy}\"},\n {\"Timeframe\": \"{timeframe}\"},\n ]\n },\n \"entry_fill\": {\n 'type': 'array', 'items': {'type': 'object'},\n 'default': [\n {\"Trade ID\": \"{trade_id}\"},\n {\"Exchange\": \"{exchange}\"},\n {\"Pair\": \"{pair}\"},\n {\"Direction\": \"{direction}\"},\n {\"Open rate\": \"{open_rate}\"},\n {\"Amount\": \"{amount}\"},\n {\"Open date\": \"{open_date:%Y-%m-%d %H:%M:%S}\"},\n {\"Enter tag\": \"{enter_tag}\"},\n {\"Strategy\": \"{strategy} {timeframe}\"},\n ]\n },\n }\n },\n 'api_server': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'listen_ip_address': {'format': 'ipv4'},\n 'listen_port': {\n 'type': 'integer',\n 'minimum': 1024,\n 'maximum': 65535\n },\n 'username': {'type': 'string'},\n 'password': {'type': 'string'},\n 'ws_token': {'type': ['string', 'array'], 'items': {'type': 'string'}},\n 'jwt_secret_key': {'type': 'string'},\n 'CORS_origins': {'type': 'array', 'items': {'type': 'string'}},\n 'verbosity': {'type': 'string', 'enum': ['error', 'info']},\n },\n 'required': ['enabled', 'listen_ip_address', 'listen_port', 'username', 'password']\n },\n 'db_url': {'type': 'string'},\n 'export': {'type': 'string', 'enum': EXPORT_OPTIONS, 'default': 'trades'},\n 'disableparamexport': {'type': 'boolean'},\n 'initial_state': {'type': 'string', 'enum': ['running', 'stopped']},\n 'force_entry_enable': {'type': 'boolean'},\n 'disable_dataframe_checks': {'type': 'boolean'},\n 'internals': {\n 'type': 'object',\n 'default': {},\n 'properties': {\n 'process_throttle_secs': {'type': 'integer'},\n 'interval': {'type': 'integer'},\n 'sd_notify': {'type': 'boolean'},\n }\n },\n 'dataformat_ohlcv': {\n 'type': 'string',\n 'enum': AVAILABLE_DATAHANDLERS,\n 'default': 'feather'\n },\n 'dataformat_trades': {\n 'type': 'string',\n 'enum': AVAILABLE_DATAHANDLERS,\n 'default': 'feather'\n },\n 'position_adjustment_enable': {'type': 'boolean'},\n 'max_entry_position_adjustment': {'type': ['integer', 'number'], 'minimum': -1},\n },\n 'definitions': {\n 'exchange': {\n 'type': 'object',\n 'properties': {\n 'name': {'type': 'string'},\n 'key': {'type': 'string', 'default': ''},\n 'secret': {'type': 'string', 'default': ''},\n 'password': {'type': 'string', 'default': ''},\n 'uid': {'type': 'string'},\n 'pair_whitelist': {\n 'type': 'array',\n 'items': {\n 'type': 'string',\n },\n 'uniqueItems': True\n },\n 'pair_blacklist': {\n 'type': 'array',\n 'items': {\n 'type': 'string',\n },\n 'uniqueItems': True\n },\n 'unknown_fee_rate': {'type': 'number'},\n 'outdated_offset': {'type': 'integer', 'minimum': 1},\n 'markets_refresh_interval': {'type': 'integer'},\n 'ccxt_config': {'type': 'object'},\n 'ccxt_async_config': {'type': 'object'}\n },\n 'required': ['name']\n },\n 'edge': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean'},\n 'process_throttle_secs': {'type': 'integer', 'minimum': 600},\n 'calculate_since_number_of_days': {'type': 'integer'},\n 'allowed_risk': {'type': 'number'},\n 'stoploss_range_min': {'type': 'number'},\n 'stoploss_range_max': {'type': 'number'},\n 'stoploss_range_step': {'type': 'number'},\n 'minimum_winrate': {'type': 'number'},\n 'minimum_expectancy': {'type': 'number'},\n 'min_trade_number': {'type': 'number'},\n 'max_trade_duration_minute': {'type': 'integer'},\n 'remove_pumps': {'type': 'boolean'}\n },\n 'required': ['process_throttle_secs', 'allowed_risk']\n },\n 'external_message_consumer': {\n 'type': 'object',\n 'properties': {\n 'enabled': {'type': 'boolean', 'default': False},\n 'producers': {\n 'type': 'array',\n 'items': {\n 'type': 'object',\n 'properties': {\n 'name': {'type': 'string'},\n 'host': {'type': 'string'},\n 'port': {\n 'type': 'integer',\n 'default': 8080,\n 'minimum': 0,\n 'maximum': 65535\n },\n 'secure': {'type': 'boolean', 'default': False},\n 'ws_token': {'type': 'string'},\n },\n 'required': ['name', 'host', 'ws_token']\n }\n },\n 'wait_timeout': {'type': 'integer', 'minimum': 0},\n 'sleep_time': {'type': 'integer', 'minimum': 0},\n 'ping_timeout': {'type': 'integer', 'minimum': 0},\n 'remove_entry_exit_signals': {'type': 'boolean', 'default': False},\n 'initial_candle_limit': {\n 'type': 'integer',\n 'minimum': 0,\n 'maximum': 1500,\n 'default': 1500\n },\n 'message_size_limit': { # In megabytes\n 'type': 'integer',\n 'minimum': 1,\n 'maxmium': 20,\n 'default': 8,\n }\n },\n 'required': ['producers']\n },\n \"freqai\": {\n \"type\": \"object\",\n \"properties\": {\n \"enabled\": {\"type\": \"boolean\", \"default\": False},\n \"keras\": {\"type\": \"boolean\", \"default\": False},\n \"write_metrics_to_disk\": {\"type\": \"boolean\", \"default\": False},\n \"purge_old_models\": {\"type\": [\"boolean\", \"number\"], \"default\": 2},\n \"conv_width\": {\"type\": \"integer\", \"default\": 1},\n \"train_period_days\": {\"type\": \"integer\", \"default\": 0},\n \"backtest_period_days\": {\"type\": \"number\", \"default\": 7},\n \"identifier\": {\"type\": \"string\", \"default\": \"example\"},\n \"feature_parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"include_corr_pairlist\": {\"type\": \"array\"},\n \"include_timeframes\": {\"type\": \"array\"},\n \"label_period_candles\": {\"type\": \"integer\"},\n \"include_shifted_candles\": {\"type\": \"integer\", \"default\": 0},\n \"DI_threshold\": {\"type\": \"number\", \"default\": 0},\n \"weight_factor\": {\"type\": \"number\", \"default\": 0},\n \"principal_component_analysis\": {\"type\": \"boolean\", \"default\": False},\n \"use_SVM_to_remove_outliers\": {\"type\": \"boolean\", \"default\": False},\n \"plot_feature_importances\": {\"type\": \"integer\", \"default\": 0},\n \"svm_params\": {\"type\": \"object\",\n \"properties\": {\n \"shuffle\": {\"type\": \"boolean\", \"default\": False},\n \"nu\": {\"type\": \"number\", \"default\": 0.1}\n },\n },\n \"shuffle_after_split\": {\"type\": \"boolean\", \"default\": False},\n \"buffer_train_data_candles\": {\"type\": \"integer\", \"default\": 0}\n },\n \"required\": [\"include_timeframes\", \"include_corr_pairlist\", ]\n },\n \"data_split_parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"test_size\": {\"type\": \"number\"},\n \"random_state\": {\"type\": \"integer\"},\n \"shuffle\": {\"type\": \"boolean\", \"default\": False}\n },\n },\n \"model_training_parameters\": {\n \"type\": \"object\"\n },\n \"rl_config\": {\n \"type\": \"object\",\n \"properties\": {\n \"drop_ohlc_from_features\": {\"type\": \"boolean\", \"default\": False},\n \"train_cycles\": {\"type\": \"integer\"},\n \"max_trade_duration_candles\": {\"type\": \"integer\"},\n \"add_state_info\": {\"type\": \"boolean\", \"default\": False},\n \"max_training_drawdown_pct\": {\"type\": \"number\", \"default\": 0.02},\n \"cpu_count\": {\"type\": \"integer\", \"default\": 1},\n \"model_type\": {\"type\": \"string\", \"default\": \"PPO\"},\n \"policy_type\": {\"type\": \"string\", \"default\": \"MlpPolicy\"},\n \"net_arch\": {\"type\": \"array\", \"default\": [128, 128]},\n \"randomize_starting_position\": {\"type\": \"boolean\", \"default\": False},\n \"progress_bar\": {\"type\": \"boolean\", \"default\": True},\n \"model_reward_parameters\": {\n \"type\": \"object\",\n \"properties\": {\n \"rr\": {\"type\": \"number\", \"default\": 1},\n \"profit_aim\": {\"type\": \"number\", \"default\": 0.025}\n }\n }\n },\n },\n },\n \"required\": [\n \"enabled\",\n \"train_period_days\",\n \"backtest_period_days\",\n \"identifier\",\n \"feature_parameters\",\n \"data_split_parameters\"\n ]\n },\n },\n}\nSCHEMA_TRADE_REQUIRED = [\n 'exchange',\n 'timeframe',\n 'max_open_trades',\n 'stake_currency',\n 'stake_amount',\n 'tradable_balance_ratio',\n 'last_stake_amount_min_ratio',\n 'dry_run',\n 'dry_run_wallet',\n 'exit_pricing',\n 'entry_pricing',\n 'stoploss',\n 'minimal_roi',\n 'internals',\n 'dataformat_ohlcv',\n 'dataformat_trades',\n]\nSCHEMA_BACKTEST_REQUIRED = [\n 'exchange',\n 'stake_currency',\n 'stake_amount',\n 'dry_run_wallet',\n 'dataformat_ohlcv',\n 'dataformat_trades',\n]\nSCHEMA_BACKTEST_REQUIRED_FINAL = SCHEMA_BACKTEST_REQUIRED + [\n 'stoploss',\n 'minimal_roi',\n 'max_open_trades'\n]\nSCHEMA_MINIMAL_REQUIRED = [\n 'exchange',\n 'dry_run',\n 'dataformat_ohlcv',\n 'dataformat_trades',\n]\nSCHEMA_MINIMAL_WEBSERVER = SCHEMA_MINIMAL_REQUIRED + [\n 'api_server',\n]\nCANCEL_REASON = {\n \"TIMEOUT\": \"cancelled due to timeout\",\n \"PARTIALLY_FILLED_KEEP_OPEN\": \"partially filled - keeping order open\",\n \"PARTIALLY_FILLED\": \"partially filled\",\n \"FULLY_CANCELLED\": \"fully cancelled\",\n \"ALL_CANCELLED\": \"cancelled (all unfilled and partially filled open orders cancelled)\",\n \"CANCELLED_ON_EXCHANGE\": \"cancelled on exchange\",\n \"FORCE_EXIT\": \"forcesold\",\n \"REPLACE\": \"cancelled to be replaced by new limit order\",\n \"REPLACE_FAILED\": \"failed to replace order, deleting Trade\",\n \"USER_CANCEL\": \"user requested order cancel\"\n}" }, { "identifier": "clean_ohlcv_dataframe", "path": "freqtrade/data/converter/converter.py", "snippet": "def clean_ohlcv_dataframe(data: DataFrame, timeframe: str, pair: str, *,\n fill_missing: bool, drop_incomplete: bool) -> DataFrame:\n \"\"\"\n Cleanse a OHLCV dataframe by\n * Grouping it by date (removes duplicate tics)\n * dropping last candles if requested\n * Filling up missing data (if requested)\n :param data: DataFrame containing candle (OHLCV) data.\n :param timeframe: timeframe (e.g. 5m). Used to fill up eventual missing data\n :param pair: Pair this data is for (used to warn if fillup was necessary)\n :param fill_missing: fill up missing candles with 0 candles\n (see ohlcv_fill_up_missing_data for details)\n :param drop_incomplete: Drop the last candle of the dataframe, assuming it's incomplete\n :return: DataFrame\n \"\"\"\n # group by index and aggregate results to eliminate duplicate ticks\n data = data.groupby(by='date', as_index=False, sort=True).agg({\n 'open': 'first',\n 'high': 'max',\n 'low': 'min',\n 'close': 'last',\n 'volume': 'max',\n })\n # eliminate partial candle\n if drop_incomplete:\n data.drop(data.tail(1).index, inplace=True)\n logger.debug('Dropping last candle')\n\n if fill_missing:\n return ohlcv_fill_up_missing_data(data, timeframe, pair)\n else:\n return data" }, { "identifier": "ohlcv_to_dataframe", "path": "freqtrade/data/converter/converter.py", "snippet": "def ohlcv_to_dataframe(ohlcv: list, timeframe: str, pair: str, *,\n fill_missing: bool = True, drop_incomplete: bool = True) -> DataFrame:\n \"\"\"\n Converts a list with candle (OHLCV) data (in format returned by ccxt.fetch_ohlcv)\n to a Dataframe\n :param ohlcv: list with candle (OHLCV) data, as returned by exchange.async_get_candle_history\n :param timeframe: timeframe (e.g. 5m). Used to fill up eventual missing data\n :param pair: Pair this data is for (used to warn if fillup was necessary)\n :param fill_missing: fill up missing candles with 0 candles\n (see ohlcv_fill_up_missing_data for details)\n :param drop_incomplete: Drop the last candle of the dataframe, assuming it's incomplete\n :return: DataFrame\n \"\"\"\n logger.debug(f\"Converting candle (OHLCV) data to dataframe for pair {pair}.\")\n cols = DEFAULT_DATAFRAME_COLUMNS\n df = DataFrame(ohlcv, columns=cols)\n\n df['date'] = to_datetime(df['date'], unit='ms', utc=True)\n\n # Some exchanges return int values for Volume and even for OHLC.\n # Convert them since TA-LIB indicators used in the strategy assume floats\n # and fail with exception...\n df = df.astype(dtype={'open': 'float', 'high': 'float', 'low': 'float', 'close': 'float',\n 'volume': 'float'})\n return clean_ohlcv_dataframe(df, timeframe, pair,\n fill_missing=fill_missing,\n drop_incomplete=drop_incomplete)" }, { "identifier": "trades_dict_to_list", "path": "freqtrade/data/converter/trade_converter.py", "snippet": "def trades_dict_to_list(trades: List[Dict]) -> TradeList:\n \"\"\"\n Convert fetch_trades result into a List (to be more memory efficient).\n :param trades: List of trades, as returned by ccxt.fetch_trades.\n :return: List of Lists, with constants.DEFAULT_TRADES_COLUMNS as columns\n \"\"\"\n return [[t[col] for col in DEFAULT_TRADES_COLUMNS] for t in trades]" }, { "identifier": "CandleType", "path": "freqtrade/enums/candletype.py", "snippet": "class CandleType(str, Enum):\n \"\"\"Enum to distinguish candle types\"\"\"\n SPOT = \"spot\"\n FUTURES = \"futures\"\n MARK = \"mark\"\n INDEX = \"index\"\n PREMIUMINDEX = \"premiumIndex\"\n\n # TODO: Could take up less memory if these weren't a CandleType\n FUNDING_RATE = \"funding_rate\"\n # BORROW_RATE = \"borrow_rate\" # * unimplemented\n\n def __str__(self):\n return f\"{self.name.lower()}\"\n\n @staticmethod\n def from_string(value: str) -> 'CandleType':\n if not value:\n # Default to spot\n return CandleType.SPOT\n return CandleType(value)\n\n @staticmethod\n def get_default(trading_mode: str) -> 'CandleType':\n if trading_mode == 'futures':\n return CandleType.FUTURES\n return CandleType.SPOT" }, { "identifier": "MarginMode", "path": "freqtrade/enums/marginmode.py", "snippet": "class MarginMode(str, Enum):\n \"\"\"\n Enum to distinguish between\n cross margin/futures margin_mode and\n isolated margin/futures margin_mode\n \"\"\"\n CROSS = \"cross\"\n ISOLATED = \"isolated\"\n NONE = ''" }, { "identifier": "PriceType", "path": "freqtrade/enums/pricetype.py", "snippet": "class PriceType(str, Enum):\n \"\"\"Enum to distinguish possible trigger prices for stoplosses\"\"\"\n LAST = \"last\"\n MARK = \"mark\"\n INDEX = \"index\"" }, { "identifier": "OPTIMIZE_MODES", "path": "freqtrade/enums/runmode.py", "snippet": "OPTIMIZE_MODES = [RunMode.BACKTEST, RunMode.EDGE, RunMode.HYPEROPT]" }, { "identifier": "TradingMode", "path": "freqtrade/enums/tradingmode.py", "snippet": "class TradingMode(str, Enum):\n \"\"\"\n Enum to distinguish between\n spot, margin, futures or any other trading method\n \"\"\"\n SPOT = \"spot\"\n MARGIN = \"margin\"\n FUTURES = \"futures\"" }, { "identifier": "DDosProtection", "path": "freqtrade/exceptions.py", "snippet": "class DDosProtection(TemporaryError):\n \"\"\"\n Temporary error caused by DDOS protection.\n Bot will wait for a second and then retry.\n \"\"\"" }, { "identifier": "ExchangeError", "path": "freqtrade/exceptions.py", "snippet": "class ExchangeError(DependencyException):\n \"\"\"\n Error raised out of the exchange.\n Has multiple Errors to determine the appropriate error.\n \"\"\"" }, { "identifier": "InsufficientFundsError", "path": "freqtrade/exceptions.py", "snippet": "class InsufficientFundsError(InvalidOrderException):\n \"\"\"\n This error is used when there are not enough funds available on the exchange\n to create an order.\n \"\"\"" }, { "identifier": "InvalidOrderException", "path": "freqtrade/exceptions.py", "snippet": "class InvalidOrderException(ExchangeError):\n \"\"\"\n This is returned when the order is not valid. Example:\n If stoploss on exchange order is hit, then trying to cancel the order\n should return this exception.\n \"\"\"" }, { "identifier": "OperationalException", "path": "freqtrade/exceptions.py", "snippet": "class OperationalException(FreqtradeException):\n \"\"\"\n Requires manual intervention and will stop the bot.\n Most of the time, this is caused by an invalid Configuration.\n \"\"\"" }, { "identifier": "PricingError", "path": "freqtrade/exceptions.py", "snippet": "class PricingError(DependencyException):\n \"\"\"\n Subclass of DependencyException.\n Indicates that the price could not be determined.\n Implicitly a buy / sell operation.\n \"\"\"" }, { "identifier": "RetryableOrderError", "path": "freqtrade/exceptions.py", "snippet": "class RetryableOrderError(InvalidOrderException):\n \"\"\"\n This is returned when the order is not found.\n This Error will be repeated with increasing backoff (in line with DDosError).\n \"\"\"" }, { "identifier": "TemporaryError", "path": "freqtrade/exceptions.py", "snippet": "class TemporaryError(ExchangeError):\n \"\"\"\n Temporary network or exchange related error.\n This could happen when an exchange is congested, unavailable, or the user\n has networking problems. Usually resolves itself after a time.\n \"\"\"" }, { "identifier": "API_FETCH_ORDER_RETRY_COUNT", "path": "freqtrade/exchange/common.py", "snippet": "API_FETCH_ORDER_RETRY_COUNT = 5" }, { "identifier": "remove_exchange_credentials", "path": "freqtrade/exchange/common.py", "snippet": "def remove_exchange_credentials(exchange_config: ExchangeConfig, dry_run: bool) -> None:\n \"\"\"\n Removes exchange keys from the configuration and specifies dry-run\n Used for backtesting / hyperopt / edge and utils.\n Modifies the input dict!\n \"\"\"\n if dry_run:\n exchange_config['key'] = ''\n exchange_config['apiKey'] = ''\n exchange_config['secret'] = ''\n exchange_config['password'] = ''\n exchange_config['uid'] = ''" }, { "identifier": "retrier", "path": "freqtrade/exchange/common.py", "snippet": "@overload\ndef retrier(_func: F) -> F:\n ..." }, { "identifier": "retrier_async", "path": "freqtrade/exchange/common.py", "snippet": "def retrier_async(f):\n async def wrapper(*args, **kwargs):\n count = kwargs.pop('count', API_RETRY_COUNT)\n kucoin = args[0].name == \"KuCoin\" # Check if the exchange is KuCoin.\n try:\n return await f(*args, **kwargs)\n except TemporaryError as ex:\n msg = f'{f.__name__}() returned exception: \"{ex}\". '\n if count > 0:\n msg += f'Retrying still for {count} times.'\n count -= 1\n kwargs['count'] = count\n if isinstance(ex, DDosProtection):\n if kucoin and \"429000\" in str(ex):\n # Temporary fix for 429000 error on kucoin\n # see https://github.com/freqtrade/freqtrade/issues/5700 for details.\n _get_logging_mixin().log_once(\n f\"Kucoin 429 error, avoid triggering DDosProtection backoff delay. \"\n f\"{count} tries left before giving up\", logmethod=logger.warning)\n # Reset msg to avoid logging too many times.\n msg = ''\n else:\n backoff_delay = calculate_backoff(count + 1, API_RETRY_COUNT)\n logger.info(f\"Applying DDosProtection backoff delay: {backoff_delay}\")\n await asyncio.sleep(backoff_delay)\n if msg:\n logger.warning(msg)\n return await wrapper(*args, **kwargs)\n else:\n logger.warning(msg + 'Giving up.')\n raise ex\n return wrapper" }, { "identifier": "ROUND", "path": "freqtrade/exchange/exchange_utils.py", "snippet": "def is_exchange_known_ccxt(\n exchange_name: str, ccxt_module: Optional[CcxtModuleType] = None) -> bool:\ndef ccxt_exchanges(ccxt_module: Optional[CcxtModuleType] = None) -> List[str]:\ndef available_exchanges(ccxt_module: Optional[CcxtModuleType] = None) -> List[str]:\ndef validate_exchange(exchange: str) -> Tuple[bool, str]:\ndef _build_exchange_list_entry(\n exchange_name: str, exchangeClasses: Dict[str, Any]) -> ValidExchangesType:\ndef list_available_exchanges(all_exchanges: bool) -> List[ValidExchangesType]:\ndef timeframe_to_seconds(timeframe: str) -> int:\ndef timeframe_to_minutes(timeframe: str) -> int:\ndef timeframe_to_msecs(timeframe: str) -> int:\ndef timeframe_to_prev_date(timeframe: str, date: Optional[datetime] = None) -> datetime:\ndef timeframe_to_next_date(timeframe: str, date: Optional[datetime] = None) -> datetime:\ndef date_minus_candles(\n timeframe: str, candle_count: int, date: Optional[datetime] = None) -> datetime:\ndef market_is_active(market: Dict) -> bool:\ndef amount_to_contracts(amount: float, contract_size: Optional[float]) -> float:\ndef contracts_to_amount(num_contracts: float, contract_size: Optional[float]) -> float:\ndef amount_to_precision(amount: float, amount_precision: Optional[float],\n precisionMode: Optional[int]) -> float:\ndef amount_to_contract_precision(\n amount, amount_precision: Optional[float], precisionMode: Optional[int],\n contract_size: Optional[float]) -> float:\ndef __price_to_precision_significant_digits(\n price: float,\n price_precision: float,\n *,\n rounding_mode: int = ROUND,\n) -> float:\ndef price_to_precision(\n price: float,\n price_precision: Optional[float],\n precisionMode: Optional[int],\n *,\n rounding_mode: int = ROUND,\n) -> float:" }, { "identifier": "OHLCVResponse", "path": "freqtrade/exchange/types.py", "snippet": "class Ticker(TypedDict):\nclass OrderBook(TypedDict):" }, { "identifier": "chunks", "path": "freqtrade/misc.py", "snippet": "def chunks(lst: List[Any], n: int) -> Iterator[List[Any]]:\n \"\"\"\n Split lst into chunks of the size n.\n :param lst: list to split into chunks\n :param n: number of max elements per chunk\n :return: None\n \"\"\"\n for chunk in range(0, len(lst), n):\n yield (lst[chunk:chunk + n])" }, { "identifier": "deep_merge_dicts", "path": "freqtrade/misc.py", "snippet": "def deep_merge_dicts(source, destination, allow_null_overrides: bool = True):\n \"\"\"\n Values from Source override destination, destination is returned (and modified!!)\n Sample:\n >>> a = { 'first' : { 'rows' : { 'pass' : 'dog', 'number' : '1' } } }\n >>> b = { 'first' : { 'rows' : { 'fail' : 'cat', 'number' : '5' } } }\n >>> merge(b, a) == { 'first' : { 'rows' : { 'pass' : 'dog', 'fail' : 'cat', 'number' : '5' } } }\n True\n \"\"\"\n for key, value in source.items():\n if isinstance(value, dict):\n # get node or create one\n node = destination.setdefault(key, {})\n deep_merge_dicts(value, node, allow_null_overrides)\n elif value is not None or allow_null_overrides:\n destination[key] = value\n\n return destination" }, { "identifier": "file_dump_json", "path": "freqtrade/misc.py", "snippet": "def file_dump_json(filename: Path, data: Any, is_zip: bool = False, log: bool = True) -> None:\n \"\"\"\n Dump JSON data into a file\n :param filename: file to create\n :param is_zip: if file should be zip\n :param data: JSON Data to save\n :return:\n \"\"\"\n\n if is_zip:\n if filename.suffix != '.gz':\n filename = filename.with_suffix('.gz')\n if log:\n logger.info(f'dumping json to \"{filename}\"')\n\n with gzip.open(filename, 'w') as fpz:\n rapidjson.dump(data, fpz, default=str, number_mode=rapidjson.NM_NATIVE)\n else:\n if log:\n logger.info(f'dumping json to \"{filename}\"')\n with filename.open('w') as fp:\n rapidjson.dump(data, fp, default=str, number_mode=rapidjson.NM_NATIVE)\n\n logger.debug(f'done json to \"{filename}\"')" }, { "identifier": "file_load_json", "path": "freqtrade/misc.py", "snippet": "def file_load_json(file: Path):\n\n if file.suffix != \".gz\":\n gzipfile = file.with_suffix(file.suffix + '.gz')\n else:\n gzipfile = file\n # Try gzip file first, otherwise regular json file.\n if gzipfile.is_file():\n logger.debug(f\"Loading historical data from file {gzipfile}\")\n with gzip.open(gzipfile) as datafile:\n pairdata = json_load(datafile)\n elif file.is_file():\n logger.debug(f\"Loading historical data from file {file}\")\n with file.open() as datafile:\n pairdata = json_load(datafile)\n else:\n return None\n return pairdata" }, { "identifier": "safe_value_fallback2", "path": "freqtrade/misc.py", "snippet": "def safe_value_fallback2(dict1: dictMap, dict2: dictMap, key1: str, key2: str, default_value=None):\n \"\"\"\n Search a value in dict1, return this if it's not None.\n Fall back to dict2 - return key2 from dict2 if it's not None.\n Else falls back to None.\n\n \"\"\"\n if key1 in dict1 and dict1[key1] is not None:\n return dict1[key1]\n else:\n if key2 in dict2 and dict2[key2] is not None:\n return dict2[key2]\n return default_value" }, { "identifier": "expand_pairlist", "path": "freqtrade/plugins/pairlist/pairlist_helpers.py", "snippet": "def expand_pairlist(wildcardpl: List[str], available_pairs: List[str],\n keep_invalid: bool = False) -> List[str]:\n \"\"\"\n Expand pairlist potentially containing wildcards based on available markets.\n This will implicitly filter all pairs in the wildcard-list which are not in available_pairs.\n :param wildcardpl: List of Pairlists, which may contain regex\n :param available_pairs: List of all available pairs (`exchange.get_markets().keys()`)\n :param keep_invalid: If sets to True, drops invalid pairs silently while expanding regexes\n :return: expanded pairlist, with Regexes from wildcardpl applied to match all available pairs.\n :raises: ValueError if a wildcard is invalid (like '*/BTC' - which should be `.*/BTC`)\n \"\"\"\n result = []\n if keep_invalid:\n for pair_wc in wildcardpl:\n try:\n comp = re.compile(pair_wc, re.IGNORECASE)\n result_partial = [\n pair for pair in available_pairs if re.fullmatch(comp, pair)\n ]\n # Add all matching pairs.\n # If there are no matching pairs (Pair not on exchange) keep it.\n result += result_partial or [pair_wc]\n except re.error as err:\n raise ValueError(f\"Wildcard error in {pair_wc}, {err}\")\n\n result = [element for element in result if re.fullmatch(r'^[A-Za-z0-9:/-]+$', element)]\n\n else:\n for pair_wc in wildcardpl:\n try:\n comp = re.compile(pair_wc, re.IGNORECASE)\n result += [\n pair for pair in available_pairs if re.fullmatch(comp, pair)\n ]\n except re.error as err:\n raise ValueError(f\"Wildcard error in {pair_wc}, {err}\")\n return result" }, { "identifier": "dt_from_ts", "path": "freqtrade/util/datetime_helpers.py", "snippet": "def dt_from_ts(timestamp: float) -> datetime:\n \"\"\"\n Return a datetime from a timestamp.\n :param timestamp: timestamp in seconds or milliseconds\n \"\"\"\n if timestamp > 1e10:\n # Timezone in ms - convert to seconds\n timestamp /= 1000\n return datetime.fromtimestamp(timestamp, tz=timezone.utc)" }, { "identifier": "dt_now", "path": "freqtrade/util/datetime_helpers.py", "snippet": "def dt_now() -> datetime:\n \"\"\"Return the current datetime in UTC.\"\"\"\n return datetime.now(timezone.utc)" }, { "identifier": "dt_humanize", "path": "freqtrade/util/datetime_helpers.py", "snippet": "def dt_humanize(dt: datetime, **kwargs) -> str:\n \"\"\"\n Return a humanized string for the given datetime.\n :param dt: datetime to humanize\n :param kwargs: kwargs to pass to arrow's humanize()\n \"\"\"\n return arrow.get(dt).humanize(**kwargs)" }, { "identifier": "dt_ts", "path": "freqtrade/util/datetime_helpers.py", "snippet": "def dt_ts(dt: Optional[datetime] = None) -> int:\n \"\"\"\n Return dt in ms as a timestamp in UTC.\n If dt is None, return the current datetime in UTC.\n \"\"\"\n if dt:\n return int(dt.timestamp() * 1000)\n return int(dt_now().timestamp() * 1000)" } ]
import asyncio import inspect import logging import signal import ccxt import ccxt.async_support as ccxt_async from copy import deepcopy from datetime import datetime, timedelta, timezone from math import floor from threading import Lock from typing import Any, Coroutine, Dict, List, Literal, Optional, Tuple, Union from cachetools import TTLCache from ccxt import TICK_SIZE from dateutil import parser from pandas import DataFrame, concat from freqtrade.constants import (DEFAULT_AMOUNT_RESERVE_PERCENT, NON_OPEN_EXCHANGE_STATES, BidAsk, BuySell, Config, EntryExit, ExchangeConfig, ListPairsWithTimeframes, MakerTaker, OBLiteral, PairWithTimeframe) from freqtrade.data.converter import clean_ohlcv_dataframe, ohlcv_to_dataframe, trades_dict_to_list from freqtrade.enums import OPTIMIZE_MODES, CandleType, MarginMode, PriceType, TradingMode from freqtrade.exceptions import (DDosProtection, ExchangeError, InsufficientFundsError, InvalidOrderException, OperationalException, PricingError, RetryableOrderError, TemporaryError) from freqtrade.exchange.common import (API_FETCH_ORDER_RETRY_COUNT, remove_exchange_credentials, retrier, retrier_async) from freqtrade.exchange.exchange_utils import (ROUND, ROUND_DOWN, ROUND_UP, CcxtModuleType, amount_to_contract_precision, amount_to_contracts, amount_to_precision, contracts_to_amount, date_minus_candles, is_exchange_known_ccxt, market_is_active, price_to_precision, timeframe_to_minutes, timeframe_to_msecs, timeframe_to_next_date, timeframe_to_prev_date, timeframe_to_seconds) from freqtrade.exchange.types import OHLCVResponse, OrderBook, Ticker, Tickers from freqtrade.misc import (chunks, deep_merge_dicts, file_dump_json, file_load_json, safe_value_fallback2) from freqtrade.plugins.pairlist.pairlist_helpers import expand_pairlist from freqtrade.util import dt_from_ts, dt_now from freqtrade.util.datetime_helpers import dt_humanize, dt_ts from freqtrade.persistence import Order
15,462
timeframe, candle_type, since_ms) move_to = one_call * self.required_candle_call_count now = timeframe_to_next_date(timeframe) since_ms = int((now - timedelta(seconds=move_to // 1000)).timestamp() * 1000) if since_ms: return self._async_get_historic_ohlcv( pair, timeframe, since_ms=since_ms, raise_=True, candle_type=candle_type) else: # One call ... "regular" refresh return self._async_get_candle_history( pair, timeframe, since_ms=since_ms, candle_type=candle_type) def _build_ohlcv_dl_jobs( self, pair_list: ListPairsWithTimeframes, since_ms: Optional[int], cache: bool) -> Tuple[List[Coroutine], List[Tuple[str, str, CandleType]]]: """ Build Coroutines to execute as part of refresh_latest_ohlcv """ input_coroutines: List[Coroutine[Any, Any, OHLCVResponse]] = [] cached_pairs = [] for pair, timeframe, candle_type in set(pair_list): if (timeframe not in self.timeframes and candle_type in (CandleType.SPOT, CandleType.FUTURES)): logger.warning( f"Cannot download ({pair}, {timeframe}) combination as this timeframe is " f"not available on {self.name}. Available timeframes are " f"{', '.join(self.timeframes)}.") continue if ((pair, timeframe, candle_type) not in self._klines or not cache or self._now_is_time_to_refresh(pair, timeframe, candle_type)): input_coroutines.append( self._build_coroutine(pair, timeframe, candle_type, since_ms, cache)) else: logger.debug( f"Using cached candle (OHLCV) data for {pair}, {timeframe}, {candle_type} ..." ) cached_pairs.append((pair, timeframe, candle_type)) return input_coroutines, cached_pairs def _process_ohlcv_df(self, pair: str, timeframe: str, c_type: CandleType, ticks: List[List], cache: bool, drop_incomplete: bool) -> DataFrame: # keeping last candle time as last refreshed time of the pair if ticks and cache: idx = -2 if drop_incomplete and len(ticks) > 1 else -1 self._pairs_last_refresh_time[(pair, timeframe, c_type)] = ticks[idx][0] // 1000 # keeping parsed dataframe in cache ohlcv_df = ohlcv_to_dataframe(ticks, timeframe, pair=pair, fill_missing=True, drop_incomplete=drop_incomplete) if cache: if (pair, timeframe, c_type) in self._klines: old = self._klines[(pair, timeframe, c_type)] # Reassign so we return the updated, combined df ohlcv_df = clean_ohlcv_dataframe(concat([old, ohlcv_df], axis=0), timeframe, pair, fill_missing=True, drop_incomplete=False) candle_limit = self.ohlcv_candle_limit(timeframe, self._config['candle_type_def']) # Age out old candles ohlcv_df = ohlcv_df.tail(candle_limit + self._startup_candle_count) ohlcv_df = ohlcv_df.reset_index(drop=True) self._klines[(pair, timeframe, c_type)] = ohlcv_df else: self._klines[(pair, timeframe, c_type)] = ohlcv_df return ohlcv_df def refresh_latest_ohlcv(self, pair_list: ListPairsWithTimeframes, *, since_ms: Optional[int] = None, cache: bool = True, drop_incomplete: Optional[bool] = None ) -> Dict[PairWithTimeframe, DataFrame]: """ Refresh in-memory OHLCV asynchronously and set `_klines` with the result Loops asynchronously over pair_list and downloads all pairs async (semi-parallel). Only used in the dataprovider.refresh() method. :param pair_list: List of 2 element tuples containing pair, interval to refresh :param since_ms: time since when to download, in milliseconds :param cache: Assign result to _klines. Usefull for one-off downloads like for pairlists :param drop_incomplete: Control candle dropping. Specifying None defaults to _ohlcv_partial_candle :return: Dict of [{(pair, timeframe): Dataframe}] """ logger.debug("Refreshing candle (OHLCV) data for %d pairs", len(pair_list)) # Gather coroutines to run input_coroutines, cached_pairs = self._build_ohlcv_dl_jobs(pair_list, since_ms, cache) results_df = {} # Chunk requests into batches of 100 to avoid overwelming ccxt Throttling for input_coro in chunks(input_coroutines, 100): async def gather_stuff(): return await asyncio.gather(*input_coro, return_exceptions=True) with self._loop_lock: results = self.loop.run_until_complete(gather_stuff()) for res in results: if isinstance(res, Exception): logger.warning(f"Async code raised an exception: {repr(res)}") continue # Deconstruct tuple (has 5 elements) pair, timeframe, c_type, ticks, drop_hint = res drop_incomplete_ = drop_hint if drop_incomplete is None else drop_incomplete ohlcv_df = self._process_ohlcv_df( pair, timeframe, c_type, ticks, cache, drop_incomplete_) results_df[(pair, timeframe, c_type)] = ohlcv_df # Return cached klines for pair, timeframe, c_type in cached_pairs: results_df[(pair, timeframe, c_type)] = self.klines( (pair, timeframe, c_type), copy=False ) return results_df def _now_is_time_to_refresh(self, pair: str, timeframe: str, candle_type: CandleType) -> bool: # Timeframe in seconds
# pragma pylint: disable=W0603 """ Cryptocurrency Exchanges support """ logger = logging.getLogger(__name__) class Exchange: # Parameters to add directly to buy/sell calls (like agreeing to trading agreement) _params: Dict = {} # Additional parameters - added to the ccxt object _ccxt_params: Dict = {} # Dict to specify which options each exchange implements # This defines defaults, which can be selectively overridden by subclasses using _ft_has # or by specifying them in the configuration. _ft_has_default: Dict = { "stoploss_on_exchange": False, "stop_price_param": "stopLossPrice", # Used for stoploss_on_exchange request "stop_price_prop": "stopLossPrice", # Used for stoploss_on_exchange response parsing "order_time_in_force": ["GTC"], "ohlcv_params": {}, "ohlcv_candle_limit": 500, "ohlcv_has_history": True, # Some exchanges (Kraken) don't provide history via ohlcv "ohlcv_partial_candle": True, "ohlcv_require_since": False, # Check https://github.com/ccxt/ccxt/issues/10767 for removal of ohlcv_volume_currency "ohlcv_volume_currency": "base", # "base" or "quote" "tickers_have_quoteVolume": True, "tickers_have_bid_ask": True, # bid / ask empty for fetch_tickers "tickers_have_price": True, "trades_pagination": "time", # Possible are "time" or "id" "trades_pagination_arg": "since", "l2_limit_range": None, "l2_limit_range_required": True, # Allow Empty L2 limit (kucoin) "mark_ohlcv_price": "mark", "mark_ohlcv_timeframe": "8h", "ccxt_futures_name": "swap", "needs_trading_fees": False, # use fetch_trading_fees to cache fees "order_props_in_contracts": ['amount', 'filled', 'remaining'], # Override createMarketBuyOrderRequiresPrice where ccxt has it wrong "marketOrderRequiresPrice": False, } _ft_has: Dict = {} _ft_has_futures: Dict = {} _supported_trading_mode_margin_pairs: List[Tuple[TradingMode, MarginMode]] = [ # TradingMode.SPOT always supported and not required in this list ] def __init__(self, config: Config, *, exchange_config: Optional[ExchangeConfig] = None, validate: bool = True, load_leverage_tiers: bool = False) -> None: """ Initializes this module with the given config, it does basic validation whether the specified exchange and pairs are valid. :return: None """ self._api: ccxt.Exchange self._api_async: ccxt_async.Exchange = None self._markets: Dict = {} self._trading_fees: Dict[str, Any] = {} self._leverage_tiers: Dict[str, List[Dict]] = {} # Lock event loop. This is necessary to avoid race-conditions when using force* commands # Due to funding fee fetching. self._loop_lock = Lock() self.loop = self._init_async_loop() self._config: Config = {} self._config.update(config) # Holds last candle refreshed time of each pair self._pairs_last_refresh_time: Dict[PairWithTimeframe, int] = {} # Timestamp of last markets refresh self._last_markets_refresh: int = 0 # Cache for 10 minutes ... self._cache_lock = Lock() self._fetch_tickers_cache: TTLCache = TTLCache(maxsize=2, ttl=60 * 10) # Cache values for 1800 to avoid frequent polling of the exchange for prices # Caching only applies to RPC methods, so prices for open trades are still # refreshed once every iteration. self._exit_rate_cache: TTLCache = TTLCache(maxsize=100, ttl=1800) self._entry_rate_cache: TTLCache = TTLCache(maxsize=100, ttl=1800) # Holds candles self._klines: Dict[PairWithTimeframe, DataFrame] = {} # Holds all open sell orders for dry_run self._dry_run_open_orders: Dict[str, Any] = {} if config['dry_run']: logger.info('Instance is running with dry_run enabled') logger.info(f"Using CCXT {ccxt.__version__}") exchange_conf: Dict[str, Any] = exchange_config if exchange_config else config['exchange'] remove_exchange_credentials(exchange_conf, config.get('dry_run', False)) self.log_responses = exchange_conf.get('log_responses', False) # Leverage properties self.trading_mode: TradingMode = config.get('trading_mode', TradingMode.SPOT) self.margin_mode: MarginMode = ( MarginMode(config.get('margin_mode')) if config.get('margin_mode') else MarginMode.NONE ) self.liquidation_buffer = config.get('liquidation_buffer', 0.05) # Deep merge ft_has with default ft_has options self._ft_has = deep_merge_dicts(self._ft_has, deepcopy(self._ft_has_default)) if self.trading_mode == TradingMode.FUTURES: self._ft_has = deep_merge_dicts(self._ft_has_futures, self._ft_has) if exchange_conf.get('_ft_has_params'): self._ft_has = deep_merge_dicts(exchange_conf.get('_ft_has_params'), self._ft_has) logger.info("Overriding exchange._ft_has with config params, result: %s", self._ft_has) # Assign this directly for easy access self._ohlcv_partial_candle = self._ft_has['ohlcv_partial_candle'] self._trades_pagination = self._ft_has['trades_pagination'] self._trades_pagination_arg = self._ft_has['trades_pagination_arg'] # Initialize ccxt objects ccxt_config = self._ccxt_config ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}), ccxt_config) ccxt_config = deep_merge_dicts(exchange_conf.get('ccxt_sync_config', {}), ccxt_config) self._api = self._init_ccxt(exchange_conf, ccxt_kwargs=ccxt_config) ccxt_async_config = self._ccxt_config ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_config', {}), ccxt_async_config) ccxt_async_config = deep_merge_dicts(exchange_conf.get('ccxt_async_config', {}), ccxt_async_config) self._api_async = self._init_ccxt( exchange_conf, ccxt_async, ccxt_kwargs=ccxt_async_config) logger.info(f'Using Exchange "{self.name}"') self.required_candle_call_count = 1 if validate: # Initial markets load self._load_markets() self.validate_config(config) self._startup_candle_count: int = config.get('startup_candle_count', 0) self.required_candle_call_count = self.validate_required_startup_candles( self._startup_candle_count, config.get('timeframe', '')) # Converts the interval provided in minutes in config to seconds self.markets_refresh_interval: int = exchange_conf.get( "markets_refresh_interval", 60) * 60 * 1000 if self.trading_mode != TradingMode.SPOT and load_leverage_tiers: self.fill_leverage_tiers() self.additional_exchange_init() def __del__(self): """ Destructor - clean up async stuff """ self.close() def close(self): logger.debug("Exchange object destroyed, closing async loop") if (self._api_async and inspect.iscoroutinefunction(self._api_async.close) and self._api_async.session): logger.debug("Closing async ccxt session.") self.loop.run_until_complete(self._api_async.close()) if self.loop and not self.loop.is_closed(): self.loop.close() def _init_async_loop(self) -> asyncio.AbstractEventLoop: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) return loop def validate_config(self, config): # Check if timeframe is available self.validate_timeframes(config.get('timeframe')) # Check if all pairs are available self.validate_stakecurrency(config['stake_currency']) if not config['exchange'].get('skip_pair_validation'): self.validate_pairs(config['exchange']['pair_whitelist']) self.validate_ordertypes(config.get('order_types', {})) self.validate_order_time_in_force(config.get('order_time_in_force', {})) self.validate_trading_mode_and_margin_mode(self.trading_mode, self.margin_mode) self.validate_pricing(config['exit_pricing']) self.validate_pricing(config['entry_pricing']) def _init_ccxt(self, exchange_config: Dict[str, Any], ccxt_module: CcxtModuleType = ccxt, ccxt_kwargs: Dict = {}) -> ccxt.Exchange: """ Initialize ccxt with given config and return valid ccxt instance. """ # Find matching class for the given exchange name name = exchange_config['name'] if not is_exchange_known_ccxt(name, ccxt_module): raise OperationalException(f'Exchange {name} is not supported by ccxt') ex_config = { 'apiKey': exchange_config.get('key'), 'secret': exchange_config.get('secret'), 'password': exchange_config.get('password'), 'uid': exchange_config.get('uid', ''), } if ccxt_kwargs: logger.info('Applying additional ccxt config: %s', ccxt_kwargs) if self._ccxt_params: # Inject static options after the above output to not confuse users. ccxt_kwargs = deep_merge_dicts(self._ccxt_params, ccxt_kwargs) if ccxt_kwargs: ex_config.update(ccxt_kwargs) try: api = getattr(ccxt_module, name.lower())(ex_config) except (KeyError, AttributeError) as e: raise OperationalException(f'Exchange {name} is not supported') from e except ccxt.BaseError as e: raise OperationalException(f"Initialization of ccxt failed. Reason: {e}") from e return api @property def _ccxt_config(self) -> Dict: # Parameters to add directly to ccxt sync/async initialization. if self.trading_mode == TradingMode.MARGIN: return { "options": { "defaultType": "margin" } } elif self.trading_mode == TradingMode.FUTURES: return { "options": { "defaultType": self._ft_has["ccxt_futures_name"] } } else: return {} @property def name(self) -> str: """exchange Name (from ccxt)""" return self._api.name @property def id(self) -> str: """exchange ccxt id""" return self._api.id @property def timeframes(self) -> List[str]: return list((self._api.timeframes or {}).keys()) @property def markets(self) -> Dict[str, Any]: """exchange ccxt markets""" if not self._markets: logger.info("Markets were not loaded. Loading them now..") self._load_markets() return self._markets @property def precisionMode(self) -> int: """exchange ccxt precisionMode""" return self._api.precisionMode def additional_exchange_init(self) -> None: """ Additional exchange initialization logic. .api will be available at this point. Must be overridden in child methods if required. """ pass def _log_exchange_response(self, endpoint, response) -> None: """ Log exchange responses """ if self.log_responses: logger.info(f"API {endpoint}: {response}") def ohlcv_candle_limit( self, timeframe: str, candle_type: CandleType, since_ms: Optional[int] = None) -> int: """ Exchange ohlcv candle limit Uses ohlcv_candle_limit_per_timeframe if the exchange has different limits per timeframe (e.g. bittrex), otherwise falls back to ohlcv_candle_limit :param timeframe: Timeframe to check :param candle_type: Candle-type :param since_ms: Starting timestamp :return: Candle limit as integer """ return int(self._ft_has.get('ohlcv_candle_limit_per_timeframe', {}).get( timeframe, self._ft_has.get('ohlcv_candle_limit'))) def get_markets(self, base_currencies: List[str] = [], quote_currencies: List[str] = [], spot_only: bool = False, margin_only: bool = False, futures_only: bool = False, tradable_only: bool = True, active_only: bool = False) -> Dict[str, Any]: """ Return exchange ccxt markets, filtered out by base currency and quote currency if this was requested in parameters. """ markets = self.markets if not markets: raise OperationalException("Markets were not loaded.") if base_currencies: markets = {k: v for k, v in markets.items() if v['base'] in base_currencies} if quote_currencies: markets = {k: v for k, v in markets.items() if v['quote'] in quote_currencies} if tradable_only: markets = {k: v for k, v in markets.items() if self.market_is_tradable(v)} if spot_only: markets = {k: v for k, v in markets.items() if self.market_is_spot(v)} if margin_only: markets = {k: v for k, v in markets.items() if self.market_is_margin(v)} if futures_only: markets = {k: v for k, v in markets.items() if self.market_is_future(v)} if active_only: markets = {k: v for k, v in markets.items() if market_is_active(v)} return markets def get_quote_currencies(self) -> List[str]: """ Return a list of supported quote currencies """ markets = self.markets return sorted(set([x['quote'] for _, x in markets.items()])) def get_pair_quote_currency(self, pair: str) -> str: """ Return a pair's quote currency (base/quote:settlement) """ return self.markets.get(pair, {}).get('quote', '') def get_pair_base_currency(self, pair: str) -> str: """ Return a pair's base currency (base/quote:settlement) """ return self.markets.get(pair, {}).get('base', '') def market_is_future(self, market: Dict[str, Any]) -> bool: return ( market.get(self._ft_has["ccxt_futures_name"], False) is True and market.get('linear', False) is True ) def market_is_spot(self, market: Dict[str, Any]) -> bool: return market.get('spot', False) is True def market_is_margin(self, market: Dict[str, Any]) -> bool: return market.get('margin', False) is True def market_is_tradable(self, market: Dict[str, Any]) -> bool: """ Check if the market symbol is tradable by Freqtrade. Ensures that Configured mode aligns to """ return ( market.get('quote', None) is not None and market.get('base', None) is not None and (self.precisionMode != TICK_SIZE # Too low precision will falsify calculations or market.get('precision', {}).get('price') > 1e-11) and ((self.trading_mode == TradingMode.SPOT and self.market_is_spot(market)) or (self.trading_mode == TradingMode.MARGIN and self.market_is_margin(market)) or (self.trading_mode == TradingMode.FUTURES and self.market_is_future(market))) ) def klines(self, pair_interval: PairWithTimeframe, copy: bool = True) -> DataFrame: if pair_interval in self._klines: return self._klines[pair_interval].copy() if copy else self._klines[pair_interval] else: return DataFrame() def get_contract_size(self, pair: str) -> Optional[float]: if self.trading_mode == TradingMode.FUTURES: market = self.markets.get(pair, {}) contract_size: float = 1.0 if not market: return None if market.get('contractSize') is not None: # ccxt has contractSize in markets as string contract_size = float(market['contractSize']) return contract_size else: return 1 def _trades_contracts_to_amount(self, trades: List) -> List: if len(trades) > 0 and 'symbol' in trades[0]: contract_size = self.get_contract_size(trades[0]['symbol']) if contract_size != 1: for trade in trades: trade['amount'] = trade['amount'] * contract_size return trades def _order_contracts_to_amount(self, order: Dict) -> Dict: if 'symbol' in order and order['symbol'] is not None: contract_size = self.get_contract_size(order['symbol']) if contract_size != 1: for prop in self._ft_has.get('order_props_in_contracts', []): if prop in order and order[prop] is not None: order[prop] = order[prop] * contract_size return order def _amount_to_contracts(self, pair: str, amount: float) -> float: contract_size = self.get_contract_size(pair) return amount_to_contracts(amount, contract_size) def _contracts_to_amount(self, pair: str, num_contracts: float) -> float: contract_size = self.get_contract_size(pair) return contracts_to_amount(num_contracts, contract_size) def amount_to_contract_precision(self, pair: str, amount: float) -> float: """ Helper wrapper around amount_to_contract_precision """ contract_size = self.get_contract_size(pair) return amount_to_contract_precision(amount, self.get_precision_amount(pair), self.precisionMode, contract_size) def _load_async_markets(self, reload: bool = False) -> None: try: if self._api_async: self.loop.run_until_complete( self._api_async.load_markets(reload=reload, params={})) except (asyncio.TimeoutError, ccxt.BaseError) as e: logger.warning('Could not load async markets. Reason: %s', e) return def _load_markets(self) -> None: """ Initialize markets both sync and async """ try: self._markets = self._api.load_markets(params={}) self._load_async_markets() self._last_markets_refresh = dt_ts() if self._ft_has['needs_trading_fees']: self._trading_fees = self.fetch_trading_fees() except ccxt.BaseError: logger.exception('Unable to initialize markets.') def reload_markets(self) -> None: """Reload markets both sync and async if refresh interval has passed """ # Check whether markets have to be reloaded if (self._last_markets_refresh > 0) and ( self._last_markets_refresh + self.markets_refresh_interval > dt_ts()): return None logger.debug("Performing scheduled market reload..") try: self._markets = self._api.load_markets(reload=True, params={}) # Also reload async markets to avoid issues with newly listed pairs self._load_async_markets(reload=True) self._last_markets_refresh = dt_ts() self.fill_leverage_tiers() except ccxt.BaseError: logger.exception("Could not reload markets.") def validate_stakecurrency(self, stake_currency: str) -> None: """ Checks stake-currency against available currencies on the exchange. Only runs on startup. If markets have not been loaded, there's been a problem with the connection to the exchange. :param stake_currency: Stake-currency to validate :raise: OperationalException if stake-currency is not available. """ if not self._markets: raise OperationalException( 'Could not load markets, therefore cannot start. ' 'Please investigate the above error for more details.' ) quote_currencies = self.get_quote_currencies() if stake_currency not in quote_currencies: raise OperationalException( f"{stake_currency} is not available as stake on {self.name}. " f"Available currencies are: {', '.join(quote_currencies)}") def validate_pairs(self, pairs: List[str]) -> None: """ Checks if all given pairs are tradable on the current exchange. :param pairs: list of pairs :raise: OperationalException if one pair is not available :return: None """ if not self.markets: logger.warning('Unable to validate pairs (assuming they are correct).') return extended_pairs = expand_pairlist(pairs, list(self.markets), keep_invalid=True) invalid_pairs = [] for pair in extended_pairs: # Note: ccxt has BaseCurrency/QuoteCurrency format for pairs if self.markets and pair not in self.markets: raise OperationalException( f'Pair {pair} is not available on {self.name} {self.trading_mode.value}. ' f'Please remove {pair} from your whitelist.') # From ccxt Documentation: # markets.info: An associative array of non-common market properties, # including fees, rates, limits and other general market information. # The internal info array is different for each particular market, # its contents depend on the exchange. # It can also be a string or similar ... so we need to verify that first. elif (isinstance(self.markets[pair].get('info'), dict) and self.markets[pair].get('info', {}).get('prohibitedIn', False)): # Warn users about restricted pairs in whitelist. # We cannot determine reliably if Users are affected. logger.warning(f"Pair {pair} is restricted for some users on this exchange." f"Please check if you are impacted by this restriction " f"on the exchange and eventually remove {pair} from your whitelist.") if (self._config['stake_currency'] and self.get_pair_quote_currency(pair) != self._config['stake_currency']): invalid_pairs.append(pair) if invalid_pairs: raise OperationalException( f"Stake-currency '{self._config['stake_currency']}' not compatible with " f"pair-whitelist. Please remove the following pairs: {invalid_pairs}") def get_valid_pair_combination(self, curr_1: str, curr_2: str) -> str: """ Get valid pair combination of curr_1 and curr_2 by trying both combinations. """ for pair in [f"{curr_1}/{curr_2}", f"{curr_2}/{curr_1}"]: if pair in self.markets and self.markets[pair].get('active'): return pair raise ValueError(f"Could not combine {curr_1} and {curr_2} to get a valid pair.") def validate_timeframes(self, timeframe: Optional[str]) -> None: """ Check if timeframe from config is a supported timeframe on the exchange """ if not hasattr(self._api, "timeframes") or self._api.timeframes is None: # If timeframes attribute is missing (or is None), the exchange probably # has no fetchOHLCV method. # Therefore we also show that. raise OperationalException( f"The ccxt library does not provide the list of timeframes " f"for the exchange {self.name} and this exchange " f"is therefore not supported. ccxt fetchOHLCV: {self.exchange_has('fetchOHLCV')}") if timeframe and (timeframe not in self.timeframes): raise OperationalException( f"Invalid timeframe '{timeframe}'. This exchange supports: {self.timeframes}") if timeframe and timeframe_to_minutes(timeframe) < 1: raise OperationalException("Timeframes < 1m are currently not supported by Freqtrade.") def validate_ordertypes(self, order_types: Dict) -> None: """ Checks if order-types configured in strategy/config are supported """ if any(v == 'market' for k, v in order_types.items()): if not self.exchange_has('createMarketOrder'): raise OperationalException( f'Exchange {self.name} does not support market orders.') self.validate_stop_ordertypes(order_types) def validate_stop_ordertypes(self, order_types: Dict) -> None: """ Validate stoploss order types """ if (order_types.get("stoploss_on_exchange") and not self._ft_has.get("stoploss_on_exchange", False)): raise OperationalException( f'On exchange stoploss is not supported for {self.name}.' ) if self.trading_mode == TradingMode.FUTURES: price_mapping = self._ft_has.get('stop_price_type_value_mapping', {}).keys() if ( order_types.get("stoploss_on_exchange", False) is True and 'stoploss_price_type' in order_types and order_types['stoploss_price_type'] not in price_mapping ): raise OperationalException( f'On exchange stoploss price type is not supported for {self.name}.' ) def validate_pricing(self, pricing: Dict) -> None: if pricing.get('use_order_book', False) and not self.exchange_has('fetchL2OrderBook'): raise OperationalException(f'Orderbook not available for {self.name}.') if (not pricing.get('use_order_book', False) and ( not self.exchange_has('fetchTicker') or not self._ft_has['tickers_have_price'])): raise OperationalException(f'Ticker pricing not available for {self.name}.') def validate_order_time_in_force(self, order_time_in_force: Dict) -> None: """ Checks if order time in force configured in strategy/config are supported """ if any(v.upper() not in self._ft_has["order_time_in_force"] for k, v in order_time_in_force.items()): raise OperationalException( f'Time in force policies are not supported for {self.name} yet.') def validate_required_startup_candles(self, startup_candles: int, timeframe: str) -> int: """ Checks if required startup_candles is more than ohlcv_candle_limit(). Requires a grace-period of 5 candles - so a startup-period up to 494 is allowed by default. """ candle_limit = self.ohlcv_candle_limit( timeframe, self._config['candle_type_def'], int(date_minus_candles(timeframe, startup_candles).timestamp() * 1000) if timeframe else None) # Require one more candle - to account for the still open candle. candle_count = startup_candles + 1 # Allow 5 calls to the exchange per pair required_candle_call_count = int( (candle_count / candle_limit) + (0 if candle_count % candle_limit == 0 else 1)) if self._ft_has['ohlcv_has_history']: if required_candle_call_count > 5: # Only allow 5 calls per pair to somewhat limit the impact raise OperationalException( f"This strategy requires {startup_candles} candles to start, " "which is more than 5x " f"the amount of candles {self.name} provides for {timeframe}.") elif required_candle_call_count > 1: raise OperationalException( f"This strategy requires {startup_candles} candles to start, which is more than " f"the amount of candles {self.name} provides for {timeframe}.") if required_candle_call_count > 1: logger.warning(f"Using {required_candle_call_count} calls to get OHLCV. " f"This can result in slower operations for the bot. Please check " f"if you really need {startup_candles} candles for your strategy") return required_candle_call_count def validate_trading_mode_and_margin_mode( self, trading_mode: TradingMode, margin_mode: Optional[MarginMode] # Only None when trading_mode = TradingMode.SPOT ): """ Checks if freqtrade can perform trades using the configured trading mode(Margin, Futures) and MarginMode(Cross, Isolated) Throws OperationalException: If the trading_mode/margin_mode type are not supported by freqtrade on this exchange """ if trading_mode != TradingMode.SPOT and ( (trading_mode, margin_mode) not in self._supported_trading_mode_margin_pairs ): mm_value = margin_mode and margin_mode.value raise OperationalException( f"Freqtrade does not support {mm_value} {trading_mode.value} on {self.name}" ) def get_option(self, param: str, default: Optional[Any] = None) -> Any: """ Get parameter value from _ft_has """ return self._ft_has.get(param, default) def exchange_has(self, endpoint: str) -> bool: """ Checks if exchange implements a specific API endpoint. Wrapper around ccxt 'has' attribute :param endpoint: Name of endpoint (e.g. 'fetchOHLCV', 'fetchTickers') :return: bool """ return endpoint in self._api.has and self._api.has[endpoint] def get_precision_amount(self, pair: str) -> Optional[float]: """ Returns the amount precision of the exchange. :param pair: Pair to get precision for :return: precision for amount or None. Must be used in combination with precisionMode """ return self.markets.get(pair, {}).get('precision', {}).get('amount', None) def get_precision_price(self, pair: str) -> Optional[float]: """ Returns the price precision of the exchange. :param pair: Pair to get precision for :return: precision for price or None. Must be used in combination with precisionMode """ return self.markets.get(pair, {}).get('precision', {}).get('price', None) def amount_to_precision(self, pair: str, amount: float) -> float: """ Returns the amount to buy or sell to a precision the Exchange accepts """ return amount_to_precision(amount, self.get_precision_amount(pair), self.precisionMode) def price_to_precision(self, pair: str, price: float, *, rounding_mode: int = ROUND) -> float: """ Returns the price rounded to the precision the Exchange accepts. The default price_rounding_mode in conf is ROUND. For stoploss calculations, must use ROUND_UP for longs, and ROUND_DOWN for shorts. """ return price_to_precision(price, self.get_precision_price(pair), self.precisionMode, rounding_mode=rounding_mode) def price_get_one_pip(self, pair: str, price: float) -> float: """ Get's the "1 pip" value for this pair. Used in PriceFilter to calculate the 1pip movements. """ precision = self.markets[pair]['precision']['price'] if self.precisionMode == TICK_SIZE: return precision else: return 1 / pow(10, precision) def get_min_pair_stake_amount( self, pair: str, price: float, stoploss: float, leverage: Optional[float] = 1.0 ) -> Optional[float]: return self._get_stake_amount_limit(pair, price, stoploss, 'min', leverage) def get_max_pair_stake_amount(self, pair: str, price: float, leverage: float = 1.0) -> float: max_stake_amount = self._get_stake_amount_limit(pair, price, 0.0, 'max', leverage) if max_stake_amount is None: # * Should never be executed raise OperationalException(f'{self.name}.get_max_pair_stake_amount should' 'never set max_stake_amount to None') return max_stake_amount def _get_stake_amount_limit( self, pair: str, price: float, stoploss: float, limit: Literal['min', 'max'], leverage: Optional[float] = 1.0 ) -> Optional[float]: isMin = limit == 'min' try: market = self.markets[pair] except KeyError: raise ValueError(f"Can't get market information for symbol {pair}") if isMin: # reserve some percent defined in config (5% default) + stoploss margin_reserve: float = 1.0 + self._config.get('amount_reserve_percent', DEFAULT_AMOUNT_RESERVE_PERCENT) stoploss_reserve = ( margin_reserve / (1 - abs(stoploss)) if abs(stoploss) != 1 else 1.5 ) # it should not be more than 50% stoploss_reserve = max(min(stoploss_reserve, 1.5), 1) else: margin_reserve = 1.0 stoploss_reserve = 1.0 stake_limits = [] limits = market['limits'] if (limits['cost'][limit] is not None): stake_limits.append( self._contracts_to_amount(pair, limits['cost'][limit]) * stoploss_reserve ) if (limits['amount'][limit] is not None): stake_limits.append( self._contracts_to_amount(pair, limits['amount'][limit]) * price * margin_reserve ) if not stake_limits: return None if isMin else float('inf') # The value returned should satisfy both limits: for amount (base currency) and # for cost (quote, stake currency), so max() is used here. # See also #2575 at github. return self._get_stake_amount_considering_leverage( max(stake_limits) if isMin else min(stake_limits), leverage or 1.0 ) def _get_stake_amount_considering_leverage(self, stake_amount: float, leverage: float) -> float: """ Takes the minimum stake amount for a pair with no leverage and returns the minimum stake amount when leverage is considered :param stake_amount: The stake amount for a pair before leverage is considered :param leverage: The amount of leverage being used on the current trade """ return stake_amount / leverage # Dry-run methods def create_dry_run_order(self, pair: str, ordertype: str, side: str, amount: float, rate: float, leverage: float, params: Dict = {}, stop_loss: bool = False) -> Dict[str, Any]: now = dt_now() order_id = f'dry_run_{side}_{pair}_{now.timestamp()}' # Rounding here must respect to contract sizes _amount = self._contracts_to_amount( pair, self.amount_to_precision(pair, self._amount_to_contracts(pair, amount))) dry_order: Dict[str, Any] = { 'id': order_id, 'symbol': pair, 'price': rate, 'average': rate, 'amount': _amount, 'cost': _amount * rate, 'type': ordertype, 'side': side, 'filled': 0, 'remaining': _amount, 'datetime': now.strftime('%Y-%m-%dT%H:%M:%S.%fZ'), 'timestamp': dt_ts(now), 'status': "open", 'fee': None, 'info': {}, 'leverage': leverage } if stop_loss: dry_order["info"] = {"stopPrice": dry_order["price"]} dry_order[self._ft_has['stop_price_prop']] = dry_order["price"] # Workaround to avoid filling stoploss orders immediately dry_order["ft_order_type"] = "stoploss" orderbook: Optional[OrderBook] = None if self.exchange_has('fetchL2OrderBook'): orderbook = self.fetch_l2_order_book(pair, 20) if ordertype == "limit" and orderbook: # Allow a 1% price difference allowed_diff = 0.01 if self._dry_is_price_crossed(pair, side, rate, orderbook, allowed_diff): logger.info( f"Converted order {pair} to market order due to price {rate} crossing spread " f"by more than {allowed_diff:.2%}.") dry_order["type"] = "market" if dry_order["type"] == "market" and not dry_order.get("ft_order_type"): # Update market order pricing average = self.get_dry_market_fill_price(pair, side, amount, rate, orderbook) dry_order.update({ 'average': average, 'filled': _amount, 'remaining': 0.0, 'status': "closed", 'cost': (dry_order['amount'] * average) }) # market orders will always incurr taker fees dry_order = self.add_dry_order_fee(pair, dry_order, 'taker') dry_order = self.check_dry_limit_order_filled( dry_order, immediate=True, orderbook=orderbook) self._dry_run_open_orders[dry_order["id"]] = dry_order # Copy order and close it - so the returned order is open unless it's a market order return dry_order def add_dry_order_fee( self, pair: str, dry_order: Dict[str, Any], taker_or_maker: MakerTaker, ) -> Dict[str, Any]: fee = self.get_fee(pair, taker_or_maker=taker_or_maker) dry_order.update({ 'fee': { 'currency': self.get_pair_quote_currency(pair), 'cost': dry_order['cost'] * fee, 'rate': fee } }) return dry_order def get_dry_market_fill_price(self, pair: str, side: str, amount: float, rate: float, orderbook: Optional[OrderBook]) -> float: """ Get the market order fill price based on orderbook interpolation """ if self.exchange_has('fetchL2OrderBook'): if not orderbook: orderbook = self.fetch_l2_order_book(pair, 20) ob_type: OBLiteral = 'asks' if side == 'buy' else 'bids' slippage = 0.05 max_slippage_val = rate * ((1 + slippage) if side == 'buy' else (1 - slippage)) remaining_amount = amount filled_value = 0.0 book_entry_price = 0.0 for book_entry in orderbook[ob_type]: book_entry_price = book_entry[0] book_entry_coin_volume = book_entry[1] if remaining_amount > 0: if remaining_amount < book_entry_coin_volume: # Orderbook at this slot bigger than remaining amount filled_value += remaining_amount * book_entry_price break else: filled_value += book_entry_coin_volume * book_entry_price remaining_amount -= book_entry_coin_volume else: break else: # If remaining_amount wasn't consumed completely (break was not called) filled_value += remaining_amount * book_entry_price forecast_avg_filled_price = max(filled_value, 0) / amount # Limit max. slippage to specified value if side == 'buy': forecast_avg_filled_price = min(forecast_avg_filled_price, max_slippage_val) else: forecast_avg_filled_price = max(forecast_avg_filled_price, max_slippage_val) return self.price_to_precision(pair, forecast_avg_filled_price) return rate def _dry_is_price_crossed(self, pair: str, side: str, limit: float, orderbook: Optional[OrderBook] = None, offset: float = 0.0) -> bool: if not self.exchange_has('fetchL2OrderBook'): return True if not orderbook: orderbook = self.fetch_l2_order_book(pair, 1) try: if side == 'buy': price = orderbook['asks'][0][0] if limit * (1 - offset) >= price: return True else: price = orderbook['bids'][0][0] if limit * (1 + offset) <= price: return True except IndexError: # Ignore empty orderbooks when filling - can be filled with the next iteration. pass return False def check_dry_limit_order_filled( self, order: Dict[str, Any], immediate: bool = False, orderbook: Optional[OrderBook] = None) -> Dict[str, Any]: """ Check dry-run limit order fill and update fee (if it filled). """ if (order['status'] != "closed" and order['type'] in ["limit"] and not order.get('ft_order_type')): pair = order['symbol'] if self._dry_is_price_crossed(pair, order['side'], order['price'], orderbook): order.update({ 'status': 'closed', 'filled': order['amount'], 'remaining': 0, }) self.add_dry_order_fee( pair, order, 'taker' if immediate else 'maker', ) return order def fetch_dry_run_order(self, order_id) -> Dict[str, Any]: """ Return dry-run order Only call if running in dry-run mode. """ try: order = self._dry_run_open_orders[order_id] order = self.check_dry_limit_order_filled(order) return order except KeyError as e: order = Order.order_by_id(order_id) if order: ccxt_order = order.to_ccxt_object(self._ft_has['stop_price_prop']) self._dry_run_open_orders[order_id] = ccxt_order return ccxt_order # Gracefully handle errors with dry-run orders. raise InvalidOrderException( f'Tried to get an invalid dry-run-order (id: {order_id}). Message: {e}') from e # Order handling def _lev_prep(self, pair: str, leverage: float, side: BuySell, accept_fail: bool = False): if self.trading_mode != TradingMode.SPOT: self.set_margin_mode(pair, self.margin_mode, accept_fail) self._set_leverage(leverage, pair, accept_fail) def _get_params( self, side: BuySell, ordertype: str, leverage: float, reduceOnly: bool, time_in_force: str = 'GTC', ) -> Dict: params = self._params.copy() if time_in_force != 'GTC' and ordertype != 'market': params.update({'timeInForce': time_in_force.upper()}) if reduceOnly: params.update({'reduceOnly': True}) return params def _order_needs_price(self, ordertype: str) -> bool: return ( ordertype != 'market' or self._api.options.get("createMarketBuyOrderRequiresPrice", False) or self._ft_has.get('marketOrderRequiresPrice', False) ) def create_order( self, *, pair: str, ordertype: str, side: BuySell, amount: float, rate: float, leverage: float, reduceOnly: bool = False, time_in_force: str = 'GTC', ) -> Dict: if self._config['dry_run']: dry_order = self.create_dry_run_order( pair, ordertype, side, amount, self.price_to_precision(pair, rate), leverage) return dry_order params = self._get_params(side, ordertype, leverage, reduceOnly, time_in_force) try: # Set the precision for amount and price(rate) as accepted by the exchange amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)) needs_price = self._order_needs_price(ordertype) rate_for_order = self.price_to_precision(pair, rate) if needs_price else None if not reduceOnly: self._lev_prep(pair, leverage, side) order = self._api.create_order( pair, ordertype, side, amount, rate_for_order, params, ) if order.get('status') is None: # Map empty status to open. order['status'] = 'open' if order.get('type') is None: order['type'] = ordertype self._log_exchange_response('create_order', order) order = self._order_contracts_to_amount(order) return order except ccxt.InsufficientFunds as e: raise InsufficientFundsError( f'Insufficient funds to create {ordertype} {side} order on market {pair}. ' f'Tried to {side} amount {amount} at rate {rate}.' f'Message: {e}') from e except ccxt.InvalidOrder as e: raise InvalidOrderException( f'Could not create {ordertype} {side} order on market {pair}. ' f'Tried to {side} amount {amount} at rate {rate}. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not place {side} order due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def stoploss_adjust(self, stop_loss: float, order: Dict, side: str) -> bool: """ Verify stop_loss against stoploss-order value (limit or price) Returns True if adjustment is necessary. """ if not self._ft_has.get('stoploss_on_exchange'): raise OperationalException(f"stoploss is not implemented for {self.name}.") price_param = self._ft_has['stop_price_prop'] return ( order.get(price_param, None) is None or ((side == "sell" and stop_loss > float(order[price_param])) or (side == "buy" and stop_loss < float(order[price_param]))) ) def _get_stop_order_type(self, user_order_type) -> Tuple[str, str]: available_order_Types: Dict[str, str] = self._ft_has["stoploss_order_types"] if user_order_type in available_order_Types.keys(): ordertype = available_order_Types[user_order_type] else: # Otherwise pick only one available ordertype = list(available_order_Types.values())[0] user_order_type = list(available_order_Types.keys())[0] return ordertype, user_order_type def _get_stop_limit_rate(self, stop_price: float, order_types: Dict, side: str) -> float: # Limit price threshold: As limit price should always be below stop-price limit_price_pct = order_types.get('stoploss_on_exchange_limit_ratio', 0.99) if side == "sell": limit_rate = stop_price * limit_price_pct else: limit_rate = stop_price * (2 - limit_price_pct) bad_stop_price = ((stop_price < limit_rate) if side == "sell" else (stop_price > limit_rate)) # Ensure rate is less than stop price if bad_stop_price: # This can for example happen if the stop / liquidation price is set to 0 # Which is possible if a market-order closes right away. # The InvalidOrderException will bubble up to exit_positions, where it will be # handled gracefully. raise InvalidOrderException( "In stoploss limit order, stop price should be more than limit price. " f"Stop price: {stop_price}, Limit price: {limit_rate}, " f"Limit Price pct: {limit_price_pct}" ) return limit_rate def _get_stop_params(self, side: BuySell, ordertype: str, stop_price: float) -> Dict: params = self._params.copy() # Verify if stopPrice works for your exchange, else configure stop_price_param params.update({self._ft_has['stop_price_param']: stop_price}) return params @retrier(retries=0) def create_stoploss(self, pair: str, amount: float, stop_price: float, order_types: Dict, side: BuySell, leverage: float) -> Dict: """ creates a stoploss order. requires `_ft_has['stoploss_order_types']` to be set as a dict mapping limit and market to the corresponding exchange type. The precise ordertype is determined by the order_types dict or exchange default. The exception below should never raise, since we disallow starting the bot in validate_ordertypes() This may work with a limited number of other exchanges, but correct working needs to be tested individually. WARNING: setting `stoploss_on_exchange` to True will NOT auto-enable stoploss on exchange. `stoploss_adjust` must still be implemented for this to work. """ if not self._ft_has['stoploss_on_exchange']: raise OperationalException(f"stoploss is not implemented for {self.name}.") user_order_type = order_types.get('stoploss', 'market') ordertype, user_order_type = self._get_stop_order_type(user_order_type) round_mode = ROUND_DOWN if side == 'buy' else ROUND_UP stop_price_norm = self.price_to_precision(pair, stop_price, rounding_mode=round_mode) limit_rate = None if user_order_type == 'limit': limit_rate = self._get_stop_limit_rate(stop_price, order_types, side) limit_rate = self.price_to_precision(pair, limit_rate, rounding_mode=round_mode) if self._config['dry_run']: dry_order = self.create_dry_run_order( pair, ordertype, side, amount, stop_price_norm, stop_loss=True, leverage=leverage, ) return dry_order try: params = self._get_stop_params(side=side, ordertype=ordertype, stop_price=stop_price_norm) if self.trading_mode == TradingMode.FUTURES: params['reduceOnly'] = True if 'stoploss_price_type' in order_types and 'stop_price_type_field' in self._ft_has: price_type = self._ft_has['stop_price_type_value_mapping'][ order_types.get('stoploss_price_type', PriceType.LAST)] params[self._ft_has['stop_price_type_field']] = price_type amount = self.amount_to_precision(pair, self._amount_to_contracts(pair, amount)) self._lev_prep(pair, leverage, side, accept_fail=True) order = self._api.create_order(symbol=pair, type=ordertype, side=side, amount=amount, price=limit_rate, params=params) self._log_exchange_response('create_stoploss_order', order) order = self._order_contracts_to_amount(order) logger.info(f"stoploss {user_order_type} order added for {pair}. " f"stop price: {stop_price}. limit: {limit_rate}") return order except ccxt.InsufficientFunds as e: raise InsufficientFundsError( f'Insufficient funds to create {ordertype} sell order on market {pair}. ' f'Tried to sell amount {amount} at rate {limit_rate}. ' f'Message: {e}') from e except ccxt.InvalidOrder as e: # Errors: # `Order would trigger immediately.` raise InvalidOrderException( f'Could not create {ordertype} sell order on market {pair}. ' f'Tried to sell amount {amount} at rate {limit_rate}. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f"Could not place stoploss order due to {e.__class__.__name__}. " f"Message: {e}") from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier(retries=API_FETCH_ORDER_RETRY_COUNT) def fetch_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: if self._config['dry_run']: return self.fetch_dry_run_order(order_id) try: order = self._api.fetch_order(order_id, pair, params=params) self._log_exchange_response('fetch_order', order) order = self._order_contracts_to_amount(order) return order except ccxt.OrderNotFound as e: raise RetryableOrderError( f'Order not found (pair: {pair} id: {order_id}). Message: {e}') from e except ccxt.InvalidOrder as e: raise InvalidOrderException( f'Tried to get an invalid order (pair: {pair} id: {order_id}). Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get order due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def fetch_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: return self.fetch_order(order_id, pair, params) def fetch_order_or_stoploss_order(self, order_id: str, pair: str, stoploss_order: bool = False) -> Dict: """ Simple wrapper calling either fetch_order or fetch_stoploss_order depending on the stoploss_order parameter :param order_id: OrderId to fetch order :param pair: Pair corresponding to order_id :param stoploss_order: If true, uses fetch_stoploss_order, otherwise fetch_order. """ if stoploss_order: return self.fetch_stoploss_order(order_id, pair) return self.fetch_order(order_id, pair) def check_order_canceled_empty(self, order: Dict) -> bool: """ Verify if an order has been cancelled without being partially filled :param order: Order dict as returned from fetch_order() :return: True if order has been cancelled without being filled, False otherwise. """ return (order.get('status') in NON_OPEN_EXCHANGE_STATES and order.get('filled') == 0.0) @retrier def cancel_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: if self._config['dry_run']: try: order = self.fetch_dry_run_order(order_id) order.update({'status': 'canceled', 'filled': 0.0, 'remaining': order['amount']}) return order except InvalidOrderException: return {} try: order = self._api.cancel_order(order_id, pair, params=params) self._log_exchange_response('cancel_order', order) order = self._order_contracts_to_amount(order) return order except ccxt.InvalidOrder as e: raise InvalidOrderException( f'Could not cancel order. Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not cancel order due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def cancel_stoploss_order(self, order_id: str, pair: str, params: Dict = {}) -> Dict: return self.cancel_order(order_id, pair, params) def is_cancel_order_result_suitable(self, corder) -> bool: if not isinstance(corder, dict): return False required = ('fee', 'status', 'amount') return all(corder.get(k, None) is not None for k in required) def cancel_order_with_result(self, order_id: str, pair: str, amount: float) -> Dict: """ Cancel order returning a result. Creates a fake result if cancel order returns a non-usable result and fetch_order does not work (certain exchanges don't return cancelled orders) :param order_id: Orderid to cancel :param pair: Pair corresponding to order_id :param amount: Amount to use for fake response :return: Result from either cancel_order if usable, or fetch_order """ try: corder = self.cancel_order(order_id, pair) if self.is_cancel_order_result_suitable(corder): return corder except InvalidOrderException: logger.warning(f"Could not cancel order {order_id} for {pair}.") try: order = self.fetch_order(order_id, pair) except InvalidOrderException: logger.warning(f"Could not fetch cancelled order {order_id}.") order = { 'id': order_id, 'status': 'canceled', 'amount': amount, 'filled': 0.0, 'fee': {}, 'info': {} } return order def cancel_stoploss_order_with_result(self, order_id: str, pair: str, amount: float) -> Dict: """ Cancel stoploss order returning a result. Creates a fake result if cancel order returns a non-usable result and fetch_order does not work (certain exchanges don't return cancelled orders) :param order_id: stoploss-order-id to cancel :param pair: Pair corresponding to order_id :param amount: Amount to use for fake response :return: Result from either cancel_order if usable, or fetch_order """ corder = self.cancel_stoploss_order(order_id, pair) if self.is_cancel_order_result_suitable(corder): return corder try: order = self.fetch_stoploss_order(order_id, pair) except InvalidOrderException: logger.warning(f"Could not fetch cancelled stoploss order {order_id}.") order = {'fee': {}, 'status': 'canceled', 'amount': amount, 'info': {}} return order @retrier def get_balances(self) -> dict: try: balances = self._api.fetch_balance() # Remove additional info from ccxt results balances.pop("info", None) balances.pop("free", None) balances.pop("total", None) balances.pop("used", None) return balances except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get balance due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def fetch_positions(self, pair: Optional[str] = None) -> List[Dict]: """ Fetch positions from the exchange. If no pair is given, all positions are returned. :param pair: Pair for the query """ if self._config['dry_run'] or self.trading_mode != TradingMode.FUTURES: return [] try: symbols = [] if pair: symbols.append(pair) positions: List[Dict] = self._api.fetch_positions(symbols) self._log_exchange_response('fetch_positions', positions) return positions except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get positions due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def _fetch_orders_emulate(self, pair: str, since_ms: int) -> List[Dict]: orders = [] if self.exchange_has('fetchClosedOrders'): orders = self._api.fetch_closed_orders(pair, since=since_ms) if self.exchange_has('fetchOpenOrders'): orders_open = self._api.fetch_open_orders(pair, since=since_ms) orders.extend(orders_open) return orders @retrier(retries=0) def fetch_orders(self, pair: str, since: datetime, params: Optional[Dict] = None) -> List[Dict]: """ Fetch all orders for a pair "since" :param pair: Pair for the query :param since: Starting time for the query """ if self._config['dry_run']: return [] try: since_ms = int((since.timestamp() - 10) * 1000) if self.exchange_has('fetchOrders'): if not params: params = {} try: orders: List[Dict] = self._api.fetch_orders(pair, since=since_ms, params=params) except ccxt.NotSupported: # Some exchanges don't support fetchOrders # attempt to fetch open and closed orders separately orders = self._fetch_orders_emulate(pair, since_ms) else: orders = self._fetch_orders_emulate(pair, since_ms) self._log_exchange_response('fetch_orders', orders) orders = [self._order_contracts_to_amount(o) for o in orders] return orders except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not fetch positions due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def fetch_trading_fees(self) -> Dict[str, Any]: """ Fetch user account trading fees Can be cached, should not update often. """ if (self._config['dry_run'] or self.trading_mode != TradingMode.FUTURES or not self.exchange_has('fetchTradingFees')): return {} try: trading_fees: Dict[str, Any] = self._api.fetch_trading_fees() self._log_exchange_response('fetch_trading_fees', trading_fees) return trading_fees except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not fetch trading fees due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def fetch_bids_asks(self, symbols: Optional[List[str]] = None, cached: bool = False) -> Dict: """ :param cached: Allow cached result :return: fetch_tickers result """ if not self.exchange_has('fetchBidsAsks'): return {} if cached: with self._cache_lock: tickers = self._fetch_tickers_cache.get('fetch_bids_asks') if tickers: return tickers try: tickers = self._api.fetch_bids_asks(symbols) with self._cache_lock: self._fetch_tickers_cache['fetch_bids_asks'] = tickers return tickers except ccxt.NotSupported as e: raise OperationalException( f'Exchange {self._api.name} does not support fetching bids/asks in batch. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not load bids/asks due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @retrier def get_tickers(self, symbols: Optional[List[str]] = None, cached: bool = False) -> Tickers: """ :param cached: Allow cached result :return: fetch_tickers result """ tickers: Tickers if not self.exchange_has('fetchTickers'): return {} if cached: with self._cache_lock: tickers = self._fetch_tickers_cache.get('fetch_tickers') # type: ignore if tickers: return tickers try: tickers = self._api.fetch_tickers(symbols) with self._cache_lock: self._fetch_tickers_cache['fetch_tickers'] = tickers return tickers except ccxt.NotSupported as e: raise OperationalException( f'Exchange {self._api.name} does not support fetching tickers in batch. ' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not load tickers due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e # Pricing info @retrier def fetch_ticker(self, pair: str) -> Ticker: try: if (pair not in self.markets or self.markets[pair].get('active', False) is False): raise ExchangeError(f"Pair {pair} not available") data: Ticker = self._api.fetch_ticker(pair) return data except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not load ticker due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @staticmethod def get_next_limit_in_list(limit: int, limit_range: Optional[List[int]], range_required: bool = True): """ Get next greater value in the list. Used by fetch_l2_order_book if the api only supports a limited range """ if not limit_range: return limit result = min([x for x in limit_range if limit <= x] + [max(limit_range)]) if not range_required and limit > result: # Range is not required - we can use None as parameter. return None return result @retrier def fetch_l2_order_book(self, pair: str, limit: int = 100) -> OrderBook: """ Get L2 order book from exchange. Can be limited to a certain amount (if supported). Returns a dict in the format {'asks': [price, volume], 'bids': [price, volume]} """ limit1 = self.get_next_limit_in_list(limit, self._ft_has['l2_limit_range'], self._ft_has['l2_limit_range_required']) try: return self._api.fetch_l2_order_book(pair, limit1) except ccxt.NotSupported as e: raise OperationalException( f'Exchange {self._api.name} does not support fetching order book.' f'Message: {e}') from e except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get order book due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def _get_price_side(self, side: str, is_short: bool, conf_strategy: Dict) -> BidAsk: price_side = conf_strategy['price_side'] if price_side in ('same', 'other'): price_map = { ('entry', 'long', 'same'): 'bid', ('entry', 'long', 'other'): 'ask', ('entry', 'short', 'same'): 'ask', ('entry', 'short', 'other'): 'bid', ('exit', 'long', 'same'): 'ask', ('exit', 'long', 'other'): 'bid', ('exit', 'short', 'same'): 'bid', ('exit', 'short', 'other'): 'ask', } price_side = price_map[(side, 'short' if is_short else 'long', price_side)] return price_side def get_rate(self, pair: str, refresh: bool, side: EntryExit, is_short: bool, order_book: Optional[OrderBook] = None, ticker: Optional[Ticker] = None) -> float: """ Calculates bid/ask target bid rate - between current ask price and last price ask rate - either using ticker bid or first bid based on orderbook or remain static in any other case since it's not updating. :param pair: Pair to get rate for :param refresh: allow cached data :param side: "buy" or "sell" :return: float: Price :raises PricingError if orderbook price could not be determined. """ name = side.capitalize() strat_name = 'entry_pricing' if side == "entry" else 'exit_pricing' cache_rate: TTLCache = self._entry_rate_cache if side == "entry" else self._exit_rate_cache if not refresh: with self._cache_lock: rate = cache_rate.get(pair) # Check if cache has been invalidated if rate: logger.debug(f"Using cached {side} rate for {pair}.") return rate conf_strategy = self._config.get(strat_name, {}) price_side = self._get_price_side(side, is_short, conf_strategy) if conf_strategy.get('use_order_book', False): order_book_top = conf_strategy.get('order_book_top', 1) if order_book is None: order_book = self.fetch_l2_order_book(pair, order_book_top) rate = self._get_rate_from_ob(pair, side, order_book, name, price_side, order_book_top) else: logger.debug(f"Using Last {price_side.capitalize()} / Last Price") if ticker is None: ticker = self.fetch_ticker(pair) rate = self._get_rate_from_ticker(side, ticker, conf_strategy, price_side) if rate is None: raise PricingError(f"{name}-Rate for {pair} was empty.") with self._cache_lock: cache_rate[pair] = rate return rate def _get_rate_from_ticker(self, side: EntryExit, ticker: Ticker, conf_strategy: Dict[str, Any], price_side: BidAsk) -> Optional[float]: """ Get rate from ticker. """ ticker_rate = ticker[price_side] if ticker['last'] and ticker_rate: if side == 'entry' and ticker_rate > ticker['last']: balance = conf_strategy.get('price_last_balance', 0.0) ticker_rate = ticker_rate + balance * (ticker['last'] - ticker_rate) elif side == 'exit' and ticker_rate < ticker['last']: balance = conf_strategy.get('price_last_balance', 0.0) ticker_rate = ticker_rate - balance * (ticker_rate - ticker['last']) rate = ticker_rate return rate def _get_rate_from_ob(self, pair: str, side: EntryExit, order_book: OrderBook, name: str, price_side: BidAsk, order_book_top: int) -> float: """ Get rate from orderbook :raises: PricingError if rate could not be determined. """ logger.debug('order_book %s', order_book) # top 1 = index 0 try: obside: OBLiteral = 'bids' if price_side == 'bid' else 'asks' rate = order_book[obside][order_book_top - 1][0] except (IndexError, KeyError) as e: logger.warning( f"{pair} - {name} Price at location {order_book_top} from orderbook " f"could not be determined. Orderbook: {order_book}" ) raise PricingError from e logger.debug(f"{pair} - {name} price from orderbook {price_side.capitalize()}" f"side - top {order_book_top} order book {side} rate {rate:.8f}") return rate def get_rates(self, pair: str, refresh: bool, is_short: bool) -> Tuple[float, float]: entry_rate = None exit_rate = None if not refresh: with self._cache_lock: entry_rate = self._entry_rate_cache.get(pair) exit_rate = self._exit_rate_cache.get(pair) if entry_rate: logger.debug(f"Using cached buy rate for {pair}.") if exit_rate: logger.debug(f"Using cached sell rate for {pair}.") entry_pricing = self._config.get('entry_pricing', {}) exit_pricing = self._config.get('exit_pricing', {}) order_book = ticker = None if not entry_rate and entry_pricing.get('use_order_book', False): order_book_top = max(entry_pricing.get('order_book_top', 1), exit_pricing.get('order_book_top', 1)) order_book = self.fetch_l2_order_book(pair, order_book_top) entry_rate = self.get_rate(pair, refresh, 'entry', is_short, order_book=order_book) elif not entry_rate: ticker = self.fetch_ticker(pair) entry_rate = self.get_rate(pair, refresh, 'entry', is_short, ticker=ticker) if not exit_rate: exit_rate = self.get_rate(pair, refresh, 'exit', is_short, order_book=order_book, ticker=ticker) return entry_rate, exit_rate # Fee handling @retrier def get_trades_for_order(self, order_id: str, pair: str, since: datetime, params: Optional[Dict] = None) -> List: """ Fetch Orders using the "fetch_my_trades" endpoint and filter them by order-id. The "since" argument passed in is coming from the database and is in UTC, as timezone-native datetime object. From the python documentation: > Naive datetime instances are assumed to represent local time Therefore, calling "since.timestamp()" will get the UTC timestamp, after applying the transformation from local timezone to UTC. This works for timezones UTC+ since then the result will contain trades from a few hours instead of from the last 5 seconds, however fails for UTC- timezones, since we're then asking for trades with a "since" argument in the future. :param order_id order_id: Order-id as given when creating the order :param pair: Pair the order is for :param since: datetime object of the order creation time. Assumes object is in UTC. """ if self._config['dry_run']: return [] if not self.exchange_has('fetchMyTrades'): return [] try: # Allow 5s offset to catch slight time offsets (discovered in #1185) # since needs to be int in milliseconds _params = params if params else {} my_trades = self._api.fetch_my_trades( pair, int((since.replace(tzinfo=timezone.utc).timestamp() - 5) * 1000), params=_params) matched_trades = [trade for trade in my_trades if trade['order'] == order_id] self._log_exchange_response('get_trades_for_order', matched_trades) matched_trades = self._trades_contracts_to_amount(matched_trades) return matched_trades except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get trades due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e def get_order_id_conditional(self, order: Dict[str, Any]) -> str: return order['id'] @retrier def get_fee(self, symbol: str, type: str = '', side: str = '', amount: float = 1, price: float = 1, taker_or_maker: MakerTaker = 'maker') -> float: """ Retrieve fee from exchange :param symbol: Pair :param type: Type of order (market, limit, ...) :param side: Side of order (buy, sell) :param amount: Amount of order :param price: Price of order :param taker_or_maker: 'maker' or 'taker' (ignored if "type" is provided) """ if type and type == 'market': taker_or_maker = 'taker' try: if self._config['dry_run'] and self._config.get('fee', None) is not None: return self._config['fee'] # validate that markets are loaded before trying to get fee if self._api.markets is None or len(self._api.markets) == 0: self._api.load_markets(params={}) return self._api.calculate_fee(symbol=symbol, type=type, side=side, amount=amount, price=price, takerOrMaker=taker_or_maker)['rate'] except ccxt.DDoSProtection as e: raise DDosProtection(e) from e except (ccxt.NetworkError, ccxt.ExchangeError) as e: raise TemporaryError( f'Could not get fee info due to {e.__class__.__name__}. Message: {e}') from e except ccxt.BaseError as e: raise OperationalException(e) from e @staticmethod def order_has_fee(order: Dict) -> bool: """ Verifies if the passed in order dict has the needed keys to extract fees, and that these keys (currency, cost) are not empty. :param order: Order or trade (one trade) dict :return: True if the fee substructure contains currency and cost, false otherwise """ if not isinstance(order, dict): return False return ('fee' in order and order['fee'] is not None and (order['fee'].keys() >= {'currency', 'cost'}) and order['fee']['currency'] is not None and order['fee']['cost'] is not None ) def calculate_fee_rate( self, fee: Dict, symbol: str, cost: float, amount: float) -> Optional[float]: """ Calculate fee rate if it's not given by the exchange. :param fee: ccxt Fee dict - must contain cost / currency / rate :param symbol: Symbol of the order :param cost: Total cost of the order :param amount: Amount of the order """ if fee.get('rate') is not None: return fee.get('rate') fee_curr = fee.get('currency') if fee_curr is None: return None fee_cost = float(fee['cost']) # Calculate fee based on order details if fee_curr == self.get_pair_base_currency(symbol): # Base currency - divide by amount return round(fee_cost / amount, 8) elif fee_curr == self.get_pair_quote_currency(symbol): # Quote currency - divide by cost return round(fee_cost / cost, 8) if cost else None else: # If Fee currency is a different currency if not cost: # If cost is None or 0.0 -> falsy, return None return None try: comb = self.get_valid_pair_combination(fee_curr, self._config['stake_currency']) tick = self.fetch_ticker(comb) fee_to_quote_rate = safe_value_fallback2(tick, tick, 'last', 'ask') except (ValueError, ExchangeError): fee_to_quote_rate = self._config['exchange'].get('unknown_fee_rate', None) if not fee_to_quote_rate: return None return round((fee_cost * fee_to_quote_rate) / cost, 8) def extract_cost_curr_rate(self, fee: Dict, symbol: str, cost: float, amount: float) -> Tuple[float, str, Optional[float]]: """ Extract tuple of cost, currency, rate. Requires order_has_fee to run first! :param fee: ccxt Fee dict - must contain cost / currency / rate :param symbol: Symbol of the order :param cost: Total cost of the order :param amount: Amount of the order :return: Tuple with cost, currency, rate of the given fee dict """ return (float(fee['cost']), fee['currency'], self.calculate_fee_rate( fee, symbol, cost, amount ) ) # Historic data def get_historic_ohlcv(self, pair: str, timeframe: str, since_ms: int, candle_type: CandleType, is_new_pair: bool = False, until_ms: Optional[int] = None) -> List: """ Get candle history using asyncio and returns the list of candles. Handles all async work for this. Async over one pair, assuming we get `self.ohlcv_candle_limit()` candles per call. :param pair: Pair to download :param timeframe: Timeframe to get data for :param since_ms: Timestamp in milliseconds to get history from :param until_ms: Timestamp in milliseconds to get history up to :param candle_type: '', mark, index, premiumIndex, or funding_rate :return: List with candle (OHLCV) data """ pair, _, _, data, _ = self.loop.run_until_complete( self._async_get_historic_ohlcv(pair=pair, timeframe=timeframe, since_ms=since_ms, until_ms=until_ms, is_new_pair=is_new_pair, candle_type=candle_type)) logger.info(f"Downloaded data for {pair} with length {len(data)}.") return data async def _async_get_historic_ohlcv(self, pair: str, timeframe: str, since_ms: int, candle_type: CandleType, is_new_pair: bool = False, raise_: bool = False, until_ms: Optional[int] = None ) -> OHLCVResponse: """ Download historic ohlcv :param is_new_pair: used by binance subclass to allow "fast" new pair downloading :param candle_type: Any of the enum CandleType (must match trading mode!) """ one_call = timeframe_to_msecs(timeframe) * self.ohlcv_candle_limit( timeframe, candle_type, since_ms) logger.debug( "one_call: %s msecs (%s)", one_call, dt_humanize(dt_now() - timedelta(milliseconds=one_call), only_distance=True) ) input_coroutines = [self._async_get_candle_history( pair, timeframe, candle_type, since) for since in range(since_ms, until_ms or dt_ts(), one_call)] data: List = [] # Chunk requests into batches of 100 to avoid overwelming ccxt Throttling for input_coro in chunks(input_coroutines, 100): results = await asyncio.gather(*input_coro, return_exceptions=True) for res in results: if isinstance(res, Exception): logger.warning(f"Async code raised an exception: {repr(res)}") if raise_: raise continue else: # Deconstruct tuple if it's not an exception p, _, c, new_data, _ = res if p == pair and c == candle_type: data.extend(new_data) # Sort data again after extending the result - above calls return in "async order" data = sorted(data, key=lambda x: x[0]) return pair, timeframe, candle_type, data, self._ohlcv_partial_candle def _build_coroutine( self, pair: str, timeframe: str, candle_type: CandleType, since_ms: Optional[int], cache: bool) -> Coroutine[Any, Any, OHLCVResponse]: not_all_data = cache and self.required_candle_call_count > 1 if cache and (pair, timeframe, candle_type) in self._klines: candle_limit = self.ohlcv_candle_limit(timeframe, candle_type) min_date = date_minus_candles(timeframe, candle_limit - 5).timestamp() # Check if 1 call can get us updated candles without hole in the data. if min_date < self._pairs_last_refresh_time.get((pair, timeframe, candle_type), 0): # Cache can be used - do one-off call. not_all_data = False else: # Time jump detected, evict cache logger.info( f"Time jump detected. Evicting cache for {pair}, {timeframe}, {candle_type}") del self._klines[(pair, timeframe, candle_type)] if (not since_ms and (self._ft_has["ohlcv_require_since"] or not_all_data)): # Multiple calls for one pair - to get more history one_call = timeframe_to_msecs(timeframe) * self.ohlcv_candle_limit( timeframe, candle_type, since_ms) move_to = one_call * self.required_candle_call_count now = timeframe_to_next_date(timeframe) since_ms = int((now - timedelta(seconds=move_to // 1000)).timestamp() * 1000) if since_ms: return self._async_get_historic_ohlcv( pair, timeframe, since_ms=since_ms, raise_=True, candle_type=candle_type) else: # One call ... "regular" refresh return self._async_get_candle_history( pair, timeframe, since_ms=since_ms, candle_type=candle_type) def _build_ohlcv_dl_jobs( self, pair_list: ListPairsWithTimeframes, since_ms: Optional[int], cache: bool) -> Tuple[List[Coroutine], List[Tuple[str, str, CandleType]]]: """ Build Coroutines to execute as part of refresh_latest_ohlcv """ input_coroutines: List[Coroutine[Any, Any, OHLCVResponse]] = [] cached_pairs = [] for pair, timeframe, candle_type in set(pair_list): if (timeframe not in self.timeframes and candle_type in (CandleType.SPOT, CandleType.FUTURES)): logger.warning( f"Cannot download ({pair}, {timeframe}) combination as this timeframe is " f"not available on {self.name}. Available timeframes are " f"{', '.join(self.timeframes)}.") continue if ((pair, timeframe, candle_type) not in self._klines or not cache or self._now_is_time_to_refresh(pair, timeframe, candle_type)): input_coroutines.append( self._build_coroutine(pair, timeframe, candle_type, since_ms, cache)) else: logger.debug( f"Using cached candle (OHLCV) data for {pair}, {timeframe}, {candle_type} ..." ) cached_pairs.append((pair, timeframe, candle_type)) return input_coroutines, cached_pairs def _process_ohlcv_df(self, pair: str, timeframe: str, c_type: CandleType, ticks: List[List], cache: bool, drop_incomplete: bool) -> DataFrame: # keeping last candle time as last refreshed time of the pair if ticks and cache: idx = -2 if drop_incomplete and len(ticks) > 1 else -1 self._pairs_last_refresh_time[(pair, timeframe, c_type)] = ticks[idx][0] // 1000 # keeping parsed dataframe in cache ohlcv_df = ohlcv_to_dataframe(ticks, timeframe, pair=pair, fill_missing=True, drop_incomplete=drop_incomplete) if cache: if (pair, timeframe, c_type) in self._klines: old = self._klines[(pair, timeframe, c_type)] # Reassign so we return the updated, combined df ohlcv_df = clean_ohlcv_dataframe(concat([old, ohlcv_df], axis=0), timeframe, pair, fill_missing=True, drop_incomplete=False) candle_limit = self.ohlcv_candle_limit(timeframe, self._config['candle_type_def']) # Age out old candles ohlcv_df = ohlcv_df.tail(candle_limit + self._startup_candle_count) ohlcv_df = ohlcv_df.reset_index(drop=True) self._klines[(pair, timeframe, c_type)] = ohlcv_df else: self._klines[(pair, timeframe, c_type)] = ohlcv_df return ohlcv_df def refresh_latest_ohlcv(self, pair_list: ListPairsWithTimeframes, *, since_ms: Optional[int] = None, cache: bool = True, drop_incomplete: Optional[bool] = None ) -> Dict[PairWithTimeframe, DataFrame]: """ Refresh in-memory OHLCV asynchronously and set `_klines` with the result Loops asynchronously over pair_list and downloads all pairs async (semi-parallel). Only used in the dataprovider.refresh() method. :param pair_list: List of 2 element tuples containing pair, interval to refresh :param since_ms: time since when to download, in milliseconds :param cache: Assign result to _klines. Usefull for one-off downloads like for pairlists :param drop_incomplete: Control candle dropping. Specifying None defaults to _ohlcv_partial_candle :return: Dict of [{(pair, timeframe): Dataframe}] """ logger.debug("Refreshing candle (OHLCV) data for %d pairs", len(pair_list)) # Gather coroutines to run input_coroutines, cached_pairs = self._build_ohlcv_dl_jobs(pair_list, since_ms, cache) results_df = {} # Chunk requests into batches of 100 to avoid overwelming ccxt Throttling for input_coro in chunks(input_coroutines, 100): async def gather_stuff(): return await asyncio.gather(*input_coro, return_exceptions=True) with self._loop_lock: results = self.loop.run_until_complete(gather_stuff()) for res in results: if isinstance(res, Exception): logger.warning(f"Async code raised an exception: {repr(res)}") continue # Deconstruct tuple (has 5 elements) pair, timeframe, c_type, ticks, drop_hint = res drop_incomplete_ = drop_hint if drop_incomplete is None else drop_incomplete ohlcv_df = self._process_ohlcv_df( pair, timeframe, c_type, ticks, cache, drop_incomplete_) results_df[(pair, timeframe, c_type)] = ohlcv_df # Return cached klines for pair, timeframe, c_type in cached_pairs: results_df[(pair, timeframe, c_type)] = self.klines( (pair, timeframe, c_type), copy=False ) return results_df def _now_is_time_to_refresh(self, pair: str, timeframe: str, candle_type: CandleType) -> bool: # Timeframe in seconds
interval_in_sec = timeframe_to_seconds(timeframe)
21
2023-10-21 10:02:05+00:00
24k
yanzhh/HGERE
transformers/src/transformers/modeling_initdistilbert.py
[ { "identifier": "gelu", "path": "transformers/src/transformers/activations.py", "snippet": "def swish(x):\ndef _gelu_python(x):\ndef gelu_new(x):\ndef get_activation(activation_string):\nACT2FN = {\n \"relu\": F.relu,\n \"swish\": swish,\n \"gelu\": gelu,\n \"tanh\": F.tanh,\n \"gelu_new\": gelu_new,\n}" }, { "identifier": "DistilBertConfig", "path": "transformers/src/transformers/configuration_distilbert.py", "snippet": "class DistilBertConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a :class:`~transformers.DistilBertModel`.\n It is used to instantiate a DistilBERT model according to the specified arguments, defining the model\n architecture. Instantiating a configuration with the defaults will yield a similar configuration to that of\n the DistilBERT `distilbert-base-uncased <https://huggingface.co/distilbert-base-uncased>`__ architecture.\n\n Configuration objects inherit from :class:`~transformers.PretrainedConfig` and can be used\n to control the model outputs. Read the documentation from :class:`~transformers.PretrainedConfig`\n for more information.\n\n\n Args:\n vocab_size (:obj:`int`, optional, defaults to 30522):\n Vocabulary size of the DistilBERT model. Defines the different tokens that\n can be represented by the `inputs_ids` passed to the forward method of :class:`~transformers.BertModel`.\n max_position_embeddings (:obj:`int`, optional, defaults to 512):\n The maximum sequence length that this model might ever be used with.\n Typically set this to something large just in case (e.g., 512 or 1024 or 2048).\n sinusoidal_pos_embds (:obj:`boolean`, optional, defaults to :obj:`False`):\n Whether to use sinusoidal positional embeddings.\n n_layers (:obj:`int`, optional, defaults to 6):\n Number of hidden layers in the Transformer encoder.\n n_heads (:obj:`int`, optional, defaults to 12):\n Number of attention heads for each attention layer in the Transformer encoder.\n dim (:obj:`int`, optional, defaults to 768):\n Dimensionality of the encoder layers and the pooler layer.\n hidden_dim (:obj:`int`, optional, defaults to 3072):\n The size of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n dropout (:obj:`float`, optional, defaults to 0.1):\n The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler.\n attention_dropout (:obj:`float`, optional, defaults to 0.1):\n The dropout ratio for the attention probabilities.\n activation (:obj:`str` or :obj:`function`, optional, defaults to \"gelu\"):\n The non-linear activation function (function or string) in the encoder and pooler.\n If string, \"gelu\", \"relu\", \"swish\" and \"gelu_new\" are supported.\n initializer_range (:obj:`float`, optional, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n qa_dropout (:obj:`float`, optional, defaults to 0.1):\n The dropout probabilities used in the question answering model\n :class:`~tranformers.DistilBertForQuestionAnswering`.\n seq_classif_dropout (:obj:`float`, optional, defaults to 0.2):\n The dropout probabilities used in the sequence classification model\n :class:`~tranformers.DistilBertForSequenceClassification`.\n\n Example::\n\n from transformers import DistilBertModel, DistilBertConfig\n\n # Initializing a DistilBERT configuration\n configuration = DistilBertConfig()\n\n # Initializing a model from the configuration\n model = DistilBertModel(configuration)\n\n # Accessing the model configuration\n configuration = model.config\n\n Attributes:\n pretrained_config_archive_map (Dict[str, str]):\n A dictionary containing all the available pre-trained checkpoints.\n \"\"\"\n pretrained_config_archive_map = DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP\n model_type = \"distilbert\"\n\n def __init__(\n self,\n vocab_size=30522,\n max_position_embeddings=512,\n sinusoidal_pos_embds=False,\n n_layers=6,\n n_heads=12,\n dim=768,\n hidden_dim=4 * 768,\n dropout=0.1,\n attention_dropout=0.1,\n activation=\"gelu\",\n initializer_range=0.02,\n qa_dropout=0.1,\n seq_classif_dropout=0.2,\n **kwargs\n ):\n super().__init__(**kwargs)\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.sinusoidal_pos_embds = sinusoidal_pos_embds\n self.n_layers = n_layers\n self.n_heads = n_heads\n self.dim = dim\n self.hidden_dim = hidden_dim\n self.dropout = dropout\n self.attention_dropout = attention_dropout\n self.activation = activation\n self.initializer_range = initializer_range\n self.qa_dropout = qa_dropout\n self.seq_classif_dropout = seq_classif_dropout\n\n @property\n def hidden_size(self):\n return self.dim\n\n @property\n def num_attention_heads(self):\n return self.n_heads\n\n @property\n def num_hidden_layers(self):\n return self.n_layers" }, { "identifier": "add_start_docstrings", "path": "transformers/src/transformers/file_utils.py", "snippet": "def add_start_docstrings(*docstr):\n def docstring_decorator(fn):\n fn.__doc__ = \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator" }, { "identifier": "add_start_docstrings_to_callable", "path": "transformers/src/transformers/file_utils.py", "snippet": "def add_start_docstrings_to_callable(*docstr):\n def docstring_decorator(fn):\n class_name = \":class:`~transformers.{}`\".format(fn.__qualname__.split(\".\")[0])\n intro = \" The {} forward method, overrides the :func:`__call__` special method.\".format(class_name)\n note = r\"\"\"\n\n .. note::\n Although the recipe for forward pass needs to be defined within\n this function, one should call the :class:`Module` instance afterwards\n instead of this since the former takes care of running the\n pre and post processing steps while the latter silently ignores them.\n \"\"\"\n fn.__doc__ = intro + note + \"\".join(docstr) + (fn.__doc__ if fn.__doc__ is not None else \"\")\n return fn\n\n return docstring_decorator" }, { "identifier": "PreTrainedModel", "path": "transformers/src/transformers/modeling_utils.py", "snippet": "class PreTrainedModel(nn.Module, ModuleUtilsMixin):\n r\"\"\" Base class for all models.\n\n :class:`~transformers.PreTrainedModel` takes care of storing the configuration of the models and handles methods for loading/downloading/saving models\n as well as a few methods common to all models to (i) resize the input embeddings and (ii) prune heads in the self-attention heads.\n\n Class attributes (overridden by derived classes):\n - ``config_class``: a class derived from :class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.\n - ``pretrained_model_archive_map``: a python ``dict`` of with `short-cut-names` (string) as keys and `url` (string) of associated pretrained weights as values.\n - ``load_tf_weights``: a python ``method`` for loading a TensorFlow checkpoint in a PyTorch model, taking as arguments:\n\n - ``model``: an instance of the relevant subclass of :class:`~transformers.PreTrainedModel`,\n - ``config``: an instance of the relevant subclass of :class:`~transformers.PretrainedConfig`,\n - ``path``: a path (string) to the TensorFlow checkpoint.\n\n - ``base_model_prefix``: a string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.\n \"\"\"\n config_class = None\n pretrained_model_archive_map = {}\n base_model_prefix = \"\"\n\n @property\n def dummy_inputs(self):\n \"\"\" Dummy inputs to do a forward pass in the network.\n\n Returns:\n torch.Tensor with dummy inputs\n \"\"\"\n return {\"input_ids\": torch.tensor(DUMMY_INPUTS)}\n\n def __init__(self, config, *inputs, **kwargs):\n super().__init__()\n if not isinstance(config, PretrainedConfig):\n raise ValueError(\n \"Parameter config in `{}(config)` should be an instance of class `PretrainedConfig`. \"\n \"To create a model from a pretrained model use \"\n \"`model = {}.from_pretrained(PRETRAINED_MODEL_NAME)`\".format(\n self.__class__.__name__, self.__class__.__name__\n )\n )\n # Save config in model\n self.config = config\n\n @property\n def base_model(self):\n return getattr(self, self.base_model_prefix, self)\n\n def get_input_embeddings(self):\n \"\"\"\n Returns the model's input embeddings.\n\n Returns:\n :obj:`nn.Module`:\n A torch module mapping vocabulary to hidden states.\n \"\"\"\n base_model = getattr(self, self.base_model_prefix, self)\n if base_model is not self:\n return base_model.get_input_embeddings()\n else:\n raise NotImplementedError\n\n def set_input_embeddings(self, value):\n \"\"\"\n Set model's input embeddings\n\n Args:\n value (:obj:`nn.Module`):\n A module mapping vocabulary to hidden states.\n \"\"\"\n base_model = getattr(self, self.base_model_prefix, self)\n if base_model is not self:\n base_model.set_input_embeddings(value)\n else:\n raise NotImplementedError\n\n def get_output_embeddings(self):\n \"\"\"\n Returns the model's output embeddings.\n\n Returns:\n :obj:`nn.Module`:\n A torch module mapping hidden states to vocabulary.\n \"\"\"\n return None # Overwrite for models with output embeddings\n\n def tie_weights(self):\n \"\"\"\n Tie the weights between the input embeddings and the output embeddings.\n If the `torchscript` flag is set in the configuration, can't handle parameter sharing so we are cloning\n the weights instead.\n \"\"\"\n output_embeddings = self.get_output_embeddings()\n if output_embeddings is not None:\n if isinstance(output_embeddings, list):\n for x in output_embeddings:\n self._tie_or_clone_weights(x, self.get_input_embeddings())\n else:\n self._tie_or_clone_weights(output_embeddings, self.get_input_embeddings())\n\n def _tie_or_clone_weights(self, output_embeddings, input_embeddings):\n \"\"\" Tie or clone module weights depending of weither we are using TorchScript or not\n \"\"\"\n if self.config.torchscript:\n output_embeddings.weight = nn.Parameter(input_embeddings.weight.clone())\n else:\n output_embeddings.weight = input_embeddings.weight\n\n if hasattr(output_embeddings, \"bias\") and output_embeddings.bias is not None:\n output_embeddings.bias.data = torch.nn.functional.pad(\n output_embeddings.bias.data,\n (0, output_embeddings.weight.shape[0] - output_embeddings.bias.shape[0]),\n \"constant\",\n 0,\n )\n if hasattr(output_embeddings, \"out_features\") and hasattr(input_embeddings, \"num_embeddings\"):\n output_embeddings.out_features = input_embeddings.num_embeddings\n\n def resize_token_embeddings(self, new_num_tokens=None):\n \"\"\" Resize input token embeddings matrix of the model if new_num_tokens != config.vocab_size.\n Take care of tying weights embeddings afterwards if the model class has a `tie_weights()` method.\n\n Arguments:\n\n new_num_tokens: (`optional`) int:\n New number of tokens in the embedding matrix. Increasing the size will add newly initialized vectors at the end. Reducing the size will remove vectors from the end.\n If not provided or None: does nothing and just returns a pointer to the input tokens ``torch.nn.Embeddings`` Module of the model.\n\n Return: ``torch.nn.Embeddings``\n Pointer to the input tokens Embeddings Module of the model\n \"\"\"\n base_model = getattr(self, self.base_model_prefix, self) # get the base model if needed\n model_embeds = base_model._resize_token_embeddings(new_num_tokens)\n if new_num_tokens is None:\n return model_embeds\n\n # Update base model and current model config\n self.config.vocab_size = new_num_tokens\n base_model.vocab_size = new_num_tokens\n\n # Tie weights again if needed\n self.tie_weights()\n\n return model_embeds\n\n def _resize_token_embeddings(self, new_num_tokens):\n old_embeddings = self.get_input_embeddings()\n new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)\n self.set_input_embeddings(new_embeddings)\n return self.get_input_embeddings()\n\n def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None):\n \"\"\" Build a resized Embedding Module from a provided token Embedding Module.\n Increasing the size will add newly initialized vectors at the end\n Reducing the size will remove vectors from the end\n\n Args:\n new_num_tokens: (`optional`) int\n New number of tokens in the embedding matrix.\n Increasing the size will add newly initialized vectors at the end\n Reducing the size will remove vectors from the end\n If not provided or None: return the provided token Embedding Module.\n Return: ``torch.nn.Embeddings``\n Pointer to the resized Embedding Module or the old Embedding Module if new_num_tokens is None\n \"\"\"\n if new_num_tokens is None:\n return old_embeddings\n\n old_num_tokens, old_embedding_dim = old_embeddings.weight.size()\n if old_num_tokens == new_num_tokens:\n return old_embeddings\n\n # Build new embeddings\n new_embeddings = nn.Embedding(new_num_tokens, old_embedding_dim)\n new_embeddings.to(old_embeddings.weight.device)\n\n # initialize all new embeddings (in particular added tokens)\n self._init_weights(new_embeddings)\n\n # Copy word embeddings from the previous weights\n num_tokens_to_copy = min(old_num_tokens, new_num_tokens)\n new_embeddings.weight.data[:num_tokens_to_copy, :] = old_embeddings.weight.data[:num_tokens_to_copy, :]\n\n return new_embeddings\n\n def init_weights(self):\n \"\"\" Initialize and prunes weights if needed. \"\"\"\n # Initialize weights\n self.apply(self._init_weights)\n\n # Prune heads if needed\n if self.config.pruned_heads:\n self.prune_heads(self.config.pruned_heads)\n\n # Tie weights if needed\n self.tie_weights()\n\n def prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the base model.\n\n Arguments:\n\n heads_to_prune: dict with keys being selected layer indices (`int`) and associated values being the list of heads to prune in said layer (list of `int`).\n E.g. {1: [0, 2], 2: [2, 3]} will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.\n \"\"\"\n # save new sets of pruned heads as union of previously stored pruned heads and newly pruned heads\n for layer, heads in heads_to_prune.items():\n union_heads = set(self.config.pruned_heads.get(layer, [])) | set(heads)\n self.config.pruned_heads[layer] = list(union_heads) # Unfortunately we have to store it as list for JSON\n\n self.base_model._prune_heads(heads_to_prune)\n\n def save_pretrained(self, save_directory):\n \"\"\" Save a model and its configuration file to a directory, so that it\n can be re-loaded using the `:func:`~transformers.PreTrainedModel.from_pretrained`` class method.\n \"\"\"\n assert os.path.isdir(\n save_directory\n ), \"Saving path should be a directory where the model and configuration can be saved\"\n\n # Only save the model itself if we are using distributed training\n model_to_save = self.module if hasattr(self, \"module\") else self\n\n # Attach architecture to the config\n model_to_save.config.architectures = [model_to_save.__class__.__name__]\n\n # Save configuration file\n model_to_save.config.save_pretrained(save_directory)\n\n # If we save using the predefined names, we can load using `from_pretrained`\n output_model_file = os.path.join(save_directory, WEIGHTS_NAME)\n torch.save(model_to_save.state_dict(), output_model_file)\n logger.info(\"Model weights saved in {}\".format(output_model_file))\n\n @classmethod\n def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):\n r\"\"\"Instantiate a pretrained pytorch model from a pre-trained model configuration.\n\n The model is set in evaluation mode by default using ``model.eval()`` (Dropout modules are deactivated)\n To train the model, you should first set it back in training mode with ``model.train()``\n\n The warning ``Weights from XXX not initialized from pretrained model`` means that the weights of XXX do not come pre-trained with the rest of the model.\n It is up to you to train those weights with a downstream fine-tuning task.\n\n The warning ``Weights from XXX not used in YYY`` means that the layer XXX is not used by YYY, therefore those weights are discarded.\n\n Parameters:\n pretrained_model_name_or_path: either:\n - a string with the `shortcut name` of a pre-trained model to load from cache or download, e.g.: ``bert-base-uncased``.\n - a string with the `identifier name` of a pre-trained model that was user-uploaded to our S3, e.g.: ``dbmdz/bert-base-german-cased``.\n - a path to a `directory` containing model weights saved using :func:`~transformers.PreTrainedModel.save_pretrained`, e.g.: ``./my_model_directory/``.\n - a path or url to a `tensorflow index checkpoint file` (e.g. `./tf_model/model.ckpt.index`). In this case, ``from_tf`` should be set to True and a configuration object should be provided as ``config`` argument. This loading path is slower than converting the TensorFlow checkpoint in a PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.\n - None if you are both providing the configuration and state dictionary (resp. with keyword arguments ``config`` and ``state_dict``)\n\n model_args: (`optional`) Sequence of positional arguments:\n All remaning positional arguments will be passed to the underlying model's ``__init__`` method\n\n config: (`optional`) one of:\n - an instance of a class derived from :class:`~transformers.PretrainedConfig`, or\n - a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained()`\n Configuration for the model to use instead of an automatically loaded configuation. Configuration can be automatically loaded when:\n - the model is a model provided by the library (loaded with the ``shortcut-name`` string of a pretrained model), or\n - the model was saved using :func:`~transformers.PreTrainedModel.save_pretrained` and is reloaded by suppling the save directory.\n - the model is loaded by suppling a local directory as ``pretrained_model_name_or_path`` and a configuration JSON file named `config.json` is found in the directory.\n\n state_dict: (`optional`) dict:\n an optional state dictionnary for the model to use instead of a state dictionary loaded from saved weights file.\n This option can be used if you want to create a model from a pretrained configuration but load your own weights.\n In this case though, you should check if using :func:`~transformers.PreTrainedModel.save_pretrained` and :func:`~transformers.PreTrainedModel.from_pretrained` is not a simpler option.\n\n cache_dir: (`optional`) string:\n Path to a directory in which a downloaded pre-trained model\n configuration should be cached if the standard cache should not be used.\n\n force_download: (`optional`) boolean, default False:\n Force to (re-)download the model weights and configuration files and override the cached versions if they exists.\n\n resume_download: (`optional`) boolean, default False:\n Do not delete incompletely recieved file. Attempt to resume the download if such a file exists.\n\n proxies: (`optional`) dict, default None:\n A dictionary of proxy servers to use by protocol or endpoint, e.g.: {'http': 'foo.bar:3128', 'http://hostname': 'foo.bar:4012'}.\n The proxies are used on each request.\n\n output_loading_info: (`optional`) boolean:\n Set to ``True`` to also return a dictionnary containing missing keys, unexpected keys and error messages.\n\n kwargs: (`optional`) Remaining dictionary of keyword arguments:\n Can be used to update the configuration object (after it being loaded) and initiate the model. (e.g. ``output_attention=True``). Behave differently depending on whether a `config` is provided or automatically loaded:\n\n - If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the underlying model's ``__init__`` method (we assume all relevant updates to the configuration have already been done)\n - If a configuration is not provided, ``kwargs`` will be first passed to the configuration class initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of ``kwargs`` that corresponds to a configuration attribute will be used to override said attribute with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration attribute will be passed to the underlying model's ``__init__`` function.\n\n Examples::\n\n # For example purposes. Not runnable.\n model = BertModel.from_pretrained('bert-base-uncased') # Download model and configuration from S3 and cache.\n model = BertModel.from_pretrained('./test/saved_model/') # E.g. model was saved using `save_pretrained('./test/saved_model/')`\n model = BertModel.from_pretrained('bert-base-uncased', output_attention=True) # Update configuration during loading\n assert model.config.output_attention == True\n # Loading from a TF checkpoint file instead of a PyTorch model (slower)\n config = BertConfig.from_json_file('./tf_model/my_tf_model_config.json')\n model = BertModel.from_pretrained('./tf_model/my_tf_checkpoint.ckpt.index', from_tf=True, config=config)\n\n \"\"\"\n config = kwargs.pop(\"config\", None)\n state_dict = kwargs.pop(\"state_dict\", None)\n cache_dir = kwargs.pop(\"cache_dir\", None)\n from_tf = kwargs.pop(\"from_tf\", False)\n force_download = kwargs.pop(\"force_download\", False)\n resume_download = kwargs.pop(\"resume_download\", False)\n proxies = kwargs.pop(\"proxies\", None)\n output_loading_info = kwargs.pop(\"output_loading_info\", False)\n local_files_only = kwargs.pop(\"local_files_only\", False)\n\n # Load config if we don't provide a configuration\n if not isinstance(config, PretrainedConfig):\n config_path = config if config is not None else pretrained_model_name_or_path\n config, model_kwargs = cls.config_class.from_pretrained(\n config_path,\n *model_args,\n cache_dir=cache_dir,\n return_unused_kwargs=True,\n force_download=force_download,\n resume_download=resume_download,\n proxies=proxies,\n local_files_only=local_files_only,\n **kwargs,\n )\n else:\n model_kwargs = kwargs\n\n # Load model\n if pretrained_model_name_or_path is not None:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n archive_file = cls.pretrained_model_archive_map[pretrained_model_name_or_path]\n elif os.path.isdir(pretrained_model_name_or_path):\n if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")):\n # Load from a TF 1.0 checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + \".index\")\n elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):\n # Load from a TF 2.0 checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)\n elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):\n # Load from a PyTorch checkpoint\n archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)\n else:\n raise EnvironmentError(\n \"Error no file named {} found in directory {} or `from_tf` set to False\".format(\n [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + \".index\"], pretrained_model_name_or_path\n )\n )\n elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):\n archive_file = pretrained_model_name_or_path\n elif os.path.isfile(pretrained_model_name_or_path + \".index\"):\n assert (\n from_tf\n ), \"We found a TensorFlow checkpoint at {}, please set from_tf to True to load from this checkpoint\".format(\n pretrained_model_name_or_path + \".index\"\n )\n archive_file = pretrained_model_name_or_path + \".index\"\n else:\n archive_file = hf_bucket_url(\n pretrained_model_name_or_path, postfix=(TF2_WEIGHTS_NAME if from_tf else WEIGHTS_NAME)\n )\n\n # redirect to the cache, if necessary\n try:\n resolved_archive_file = cached_path(\n archive_file,\n cache_dir=cache_dir,\n force_download=force_download,\n proxies=proxies,\n resume_download=resume_download,\n local_files_only=local_files_only,\n )\n except EnvironmentError:\n if pretrained_model_name_or_path in cls.pretrained_model_archive_map:\n msg = \"Couldn't reach server at '{}' to download pretrained weights.\".format(archive_file)\n else:\n msg = (\n \"Model name '{}' was not found in model name list ({}). \"\n \"We assumed '{}' was a path or url to model weight files named one of {} but \"\n \"couldn't find any such file at this path or url.\".format(\n pretrained_model_name_or_path,\n \", \".join(cls.pretrained_model_archive_map.keys()),\n archive_file,\n [WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME],\n )\n )\n raise EnvironmentError(msg)\n\n if resolved_archive_file == archive_file:\n logger.info(\"loading weights file {}\".format(archive_file))\n else:\n logger.info(\"loading weights file {} from cache at {}\".format(archive_file, resolved_archive_file))\n else:\n resolved_archive_file = None\n\n # Instantiate model.\n model = cls(config, *model_args, **model_kwargs)\n\n if state_dict is None and not from_tf:\n try:\n state_dict = torch.load(resolved_archive_file, map_location=\"cpu\")\n except Exception:\n raise OSError(\n \"Unable to load weights from pytorch checkpoint file. \"\n \"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. \"\n )\n\n missing_keys = []\n unexpected_keys = []\n error_msgs = []\n\n if from_tf:\n if resolved_archive_file.endswith(\".index\"):\n # Load from a TensorFlow 1.X checkpoint - provided by original authors\n model = cls.load_tf_weights(model, config, resolved_archive_file[:-6]) # Remove the '.index'\n else:\n # Load from our TensorFlow 2.0 checkpoints\n try:\n from transformers import load_tf2_checkpoint_in_pytorch_model\n\n model = load_tf2_checkpoint_in_pytorch_model(model, resolved_archive_file, allow_missing_keys=True)\n except ImportError:\n logger.error(\n \"Loading a TensorFlow model in PyTorch, requires both PyTorch and TensorFlow to be installed. Please see \"\n \"https://pytorch.org/ and https://www.tensorflow.org/install/ for installation instructions.\"\n )\n raise\n else:\n # Convert old format to new format if needed from a PyTorch state_dict\n old_keys = []\n new_keys = []\n for key in state_dict.keys():\n new_key = None\n if \"gamma\" in key:\n new_key = key.replace(\"gamma\", \"weight\")\n if \"beta\" in key:\n new_key = key.replace(\"beta\", \"bias\")\n if new_key:\n old_keys.append(key)\n new_keys.append(new_key)\n for old_key, new_key in zip(old_keys, new_keys):\n state_dict[new_key] = state_dict.pop(old_key)\n\n # copy state_dict so _load_from_state_dict can modify it\n metadata = getattr(state_dict, \"_metadata\", None)\n state_dict = state_dict.copy()\n if metadata is not None:\n state_dict._metadata = metadata\n\n # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants\n # so we need to apply the function recursively.\n def load(module: nn.Module, prefix=\"\"):\n local_metadata = {} if metadata is None else metadata.get(prefix[:-1], {})\n module._load_from_state_dict(\n state_dict, prefix, local_metadata, True, missing_keys, unexpected_keys, error_msgs\n )\n for name, child in module._modules.items():\n if child is not None:\n load(child, prefix + name + \".\")\n\n # Make sure we are able to load base models as well as derived models (with heads)\n start_prefix = \"\"\n model_to_load = model\n if not hasattr(model, cls.base_model_prefix) and any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n start_prefix = cls.base_model_prefix + \".\"\n if hasattr(model, cls.base_model_prefix) and not any(\n s.startswith(cls.base_model_prefix) for s in state_dict.keys()\n ):\n model_to_load = getattr(model, cls.base_model_prefix)\n\n load(model_to_load, prefix=start_prefix)\n if len(missing_keys) > 0:\n logger.info(\n \"Weights of {} not initialized from pretrained model: {}\".format(\n model.__class__.__name__, missing_keys\n )\n )\n if len(unexpected_keys) > 0:\n logger.info(\n \"Weights from pretrained model not used in {}: {}\".format(\n model.__class__.__name__, unexpected_keys\n )\n )\n if len(error_msgs) > 0:\n raise RuntimeError(\n \"Error(s) in loading state_dict for {}:\\n\\t{}\".format(\n model.__class__.__name__, \"\\n\\t\".join(error_msgs)\n )\n )\n\n model.tie_weights() # make sure word embedding weights are still tied if needed\n\n # Set model in evaluation mode to desactivate DropOut modules by default\n model.eval()\n\n if output_loading_info:\n loading_info = {\"missing_keys\": missing_keys, \"unexpected_keys\": unexpected_keys, \"error_msgs\": error_msgs}\n return model, loading_info\n\n return model\n\n def prepare_inputs_for_generation(self, input_ids, **kwargs):\n return {\"input_ids\": input_ids}\n\n def _do_output_past(self, outputs):\n has_output_past = hasattr(self.config, \"output_past\") and self.config.output_past\n has_mem_len = hasattr(self.config, \"mem_len\") and self.config.mem_len\n\n if has_output_past and not has_mem_len and len(outputs) > 1:\n return True\n elif has_mem_len and self.config.mem_len > 0 and len(outputs) > 1:\n return True\n\n return False\n\n @torch.no_grad()\n def generate(\n self,\n input_ids=None,\n max_length=None,\n do_sample=True,\n num_beams=None,\n temperature=None,\n top_k=None,\n top_p=None,\n repetition_penalty=None,\n bos_token_id=None,\n pad_token_id=None,\n eos_token_ids=None,\n length_penalty=None,\n num_return_sequences=None,\n ):\n r\"\"\" Generates sequences for models with a LM head. The method currently supports greedy or penalized greedy decoding, sampling with top-k or nucleus sampling\n and beam-search.\n\n Adapted in part from `Facebook's XLM beam search code`_.\n\n .. _`Facebook's XLM beam search code`:\n https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529\n\n\n Parameters:\n\n input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`\n The sequence used as a prompt for the generation. If `None` the method initializes\n it as an empty `torch.LongTensor` of shape `(1,)`.\n\n max_length: (`optional`) int\n The max length of the sequence to be generated. Between 1 and infinity. Default to 20.\n\n do_sample: (`optional`) bool\n If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `True`.\n\n num_beams: (`optional`) int\n Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.\n\n temperature: (`optional`) float\n The value used to module the next token probabilities. Must be strictely positive. Default to 1.0.\n\n top_k: (`optional`) int\n The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.\n\n top_p: (`optional`) float\n The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.\n\n repetition_penalty: (`optional`) float\n The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.\n\n bos_token_id: (`optional`) int\n Beginning of sentence token if no prompt is provided. Default to 0.\n\n eos_token_ids: (`optional`) int or list of int\n End of sequence token or list of tokens to stop the generation. Default to 0.\n length_penalty: (`optional`) float\n Exponential penalty to the length. Default to 1.\n\n num_return_sequences: (`optional`) int\n The number of independently computed returned sequences for each element in the batch. Default to 1.\n\n Return:\n\n output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`\n sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`\n\n Examples::\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n outputs = model.generate(max_length=40, bos_token_id=tokenizer.bos_token_id, eos_token_ids=tokenizer.eos_token_id, do_sample=False) # do greedy decoding\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context\n outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, bos_token_id=tokenizer.bos_token_id, pad_token_id=tokenizer.pad_token_id, eos_token_ids=tokenizer.eos_token_id, num_return_sequences=3) # 3 generate sequences using by sampling\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer\n model = AutoModelWithLMHead.from_pretrained('ctrl') # Download model and configuration from S3 and cache.\n input_context = 'Legal My neighbor is' # \"Legal\" is one of the control codes for ctrl\n input_ids = torch.tensor(tokenizer.encode(input_context)).unsqueeze(0) # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n \"\"\"\n\n # We cannot generate if the model does not have a LM head\n if self.get_output_embeddings() is None:\n raise AttributeError(\n \"You tried to generate sequences with a model that does not have a LM Head.\"\n \"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`)\"\n )\n\n max_length = max_length if max_length is not None else self.config.max_length\n do_sample = do_sample if do_sample is not None else self.config.do_sample\n num_beams = num_beams if num_beams is not None else self.config.num_beams\n temperature = temperature if temperature is not None else self.config.temperature\n top_k = top_k if top_k is not None else self.config.top_k\n top_p = top_p if top_p is not None else self.config.top_p\n repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty\n bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id\n pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id\n eos_token_ids = eos_token_ids if eos_token_ids is not None else self.config.eos_token_ids\n length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty\n num_return_sequences = (\n num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences\n )\n\n if input_ids is not None:\n batch_size = input_ids.shape[0] # overriden by the input batch_size\n else:\n batch_size = 1\n if isinstance(eos_token_ids, int):\n eos_token_ids = [eos_token_ids]\n\n assert isinstance(max_length, int) and max_length > 0, \"`max_length` should be a strictely positive integer.\"\n assert isinstance(do_sample, bool), \"`do_sample` should be a boolean.\"\n assert isinstance(num_beams, int) and num_beams > 0, \"`num_beams` should be a strictely positive integer.\"\n assert temperature > 0, \"`temperature` should be strictely positive.\"\n assert isinstance(top_k, int) and top_k >= 0, \"`top_k` should be a positive integer.\"\n assert 0 <= top_p <= 1, \"`top_p` should be between 0 and 1.\"\n assert repetition_penalty >= 1.0, \"`repetition_penalty` should be >= 1.\"\n assert input_ids is not None or (\n isinstance(bos_token_id, int) and bos_token_id >= 0\n ), \"If input_ids is not defined, `bos_token_id` should be a positive integer.\"\n assert pad_token_id is None or (\n isinstance(pad_token_id, int) and (pad_token_id >= 0)\n ), \"`pad_token_id` should be a positive integer.\"\n assert (eos_token_ids is None) or (\n isinstance(eos_token_ids, (list, tuple)) and ((isinstance(e, int) and e >= 0) for e in eos_token_ids)\n ), \"`eos_token_ids` should be a positive integer or a list/tuple of positive integers.\"\n assert length_penalty > 0, \"`length_penalty` should be strictely positive.\"\n assert (\n isinstance(num_return_sequences, int) and num_return_sequences > 0\n ), \"`num_return_sequences` should be a strictely positive integer.\"\n\n if input_ids is None:\n assert isinstance(bos_token_id, int) and bos_token_id >= 0, (\n \"you should either supply a context to complete as `input_ids` input \"\n \"or a `bos_token_id` (integer >= 0) as a first token to start the generation.\"\n )\n input_ids = torch.full(\n (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device\n )\n else:\n assert input_ids.dim() == 2, \"Input prompt should be of shape (batch_size, sequence length).\"\n\n if pad_token_id is None and eos_token_ids is not None:\n logger.warning(\n \"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence\".format(eos_token_ids[0])\n )\n pad_token_id = eos_token_ids[0]\n\n # current position and vocab size\n cur_len = input_ids.shape[1]\n vocab_size = self.config.vocab_size\n\n if num_return_sequences != 1:\n # Expand input to num return sequences\n input_ids = input_ids.unsqueeze(1).expand(batch_size, num_return_sequences, cur_len)\n input_ids = input_ids.contiguous().view(\n batch_size * num_return_sequences, cur_len\n ) # (batch_size * num_return_sequences, cur_len)\n effective_batch_size = batch_size * num_return_sequences\n else:\n effective_batch_size = batch_size\n\n if num_beams > 1:\n output = self._generate_beam_search(\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n effective_batch_size,\n length_penalty,\n num_beams,\n vocab_size,\n )\n else:\n output = self._generate_no_beam_search(\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n effective_batch_size,\n )\n\n return output\n\n def _generate_no_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n batch_size,\n ):\n \"\"\" Generate sequences for each example without beam search (num_beams == 1).\n All returned sequence are generated independantly.\n \"\"\"\n # current position / max lengths / length of generated sentences / unfinished sentences\n unfinished_sents = input_ids.new(batch_size).fill_(1)\n sent_lengths = input_ids.new(batch_size).fill_(max_length)\n\n past = None\n\n while cur_len < max_length:\n model_inputs = self.prepare_inputs_for_generation(input_ids, past=past)\n outputs = self(**model_inputs)\n next_token_logits = outputs[0][:, -1, :]\n\n # if model has past, then set the past variable to speed up decoding\n if self._do_output_past(outputs):\n past = outputs[1]\n\n # repetition penalty from CTRL paper (https://arxiv.org/abs/1909.05858)\n if repetition_penalty != 1.0:\n for i in range(batch_size):\n for previous_token in set(input_ids[i].tolist()):\n # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability\n if next_token_logits[i, previous_token] < 0:\n next_token_logits[i, previous_token] *= repetition_penalty\n else:\n next_token_logits[i, previous_token] /= repetition_penalty\n\n if do_sample:\n # Temperature (higher temperature => more likely to sample low probability tokens)\n if temperature != 1.0:\n next_token_logits = next_token_logits / temperature\n # Top-p/top-k filtering\n next_token_logits = top_k_top_p_filtering(next_token_logits, top_k=top_k, top_p=top_p)\n # Sample\n next_token = torch.multinomial(F.softmax(next_token_logits, dim=-1), num_samples=1).squeeze(1)\n else:\n # Greedy decoding\n next_token = torch.argmax(next_token_logits, dim=-1)\n\n # update generations and finished sentences\n if eos_token_ids is not None:\n # pad finished sentences if eos_token_ids exist\n tokens_to_add = next_token * unfinished_sents + (pad_token_id) * (1 - unfinished_sents)\n else:\n tokens_to_add = next_token\n\n input_ids = torch.cat([input_ids, tokens_to_add.unsqueeze(-1)], dim=-1)\n\n if eos_token_ids is not None:\n for eos_token_id in eos_token_ids:\n eos_in_sents = tokens_to_add == eos_token_id\n # if sentence is unfinished and the token to add is eos, sent_lengths is filled with current length\n is_sents_unfinished_and_token_to_add_is_eos = unfinished_sents.mul(eos_in_sents.long()).bool()\n sent_lengths.masked_fill_(is_sents_unfinished_and_token_to_add_is_eos, cur_len + 1)\n # unfinished_sents is set to zero if eos in sentence\n unfinished_sents.mul_((~eos_in_sents).long())\n\n cur_len = cur_len + 1\n\n # stop when there is a </s> in each sentence, or if we exceed the maximul length\n if unfinished_sents.max() == 0:\n break\n\n # if there are different sentences lengths in the batch, some batches have to be padded\n if sent_lengths.min().item() != sent_lengths.max().item():\n assert pad_token_id is not None, \"`Pad_token_id` has to be defined if batches have different lengths\"\n # finished sents are filled with pad_token\n decoded = input_ids.new(batch_size, sent_lengths.max().item()).fill_(pad_token_id)\n else:\n decoded = input_ids\n\n for hypo_idx, hypo in enumerate(input_ids):\n decoded[hypo_idx, : sent_lengths[hypo_idx]] = hypo[: sent_lengths[hypo_idx]]\n\n return decoded\n\n def _generate_beam_search(\n self,\n input_ids,\n cur_len,\n max_length,\n do_sample,\n temperature,\n top_k,\n top_p,\n repetition_penalty,\n pad_token_id,\n eos_token_ids,\n batch_size,\n length_penalty,\n num_beams,\n vocab_size,\n ):\n \"\"\" Generate sequences for each example with beam search.\n \"\"\"\n # Expand input to num beams\n input_ids = input_ids.unsqueeze(1).expand(batch_size, num_beams, cur_len)\n input_ids = input_ids.contiguous().view(batch_size * num_beams, cur_len) # (batch_size * num_beams, cur_len)\n\n # generated hypotheses\n generated_hyps = [\n BeamHypotheses(num_beams, max_length, length_penalty, early_stopping=False) for _ in range(batch_size)\n ]\n\n # scores for each sentence in the beam\n beam_scores = torch.zeros((batch_size, num_beams), dtype=torch.float, device=input_ids.device)\n beam_scores[:, 1:] = -1e9\n beam_scores = beam_scores.view(-1) # shape (batch_size * num_beams,)\n\n # cache compute states\n past = None\n\n # done sentences\n done = [False for _ in range(batch_size)]\n\n while cur_len < max_length:\n model_inputs = self.prepare_inputs_for_generation(input_ids, past=past)\n outputs = self(**model_inputs) # (batch_size * num_beams, cur_len, vocab_size)\n scores = outputs[0][:, -1, :] # (batch_size * num_beams, vocab_size)\n\n # if model has past, then set the past variable to speed up decoding\n if self._do_output_past(outputs):\n past = outputs[1]\n\n # repetition penalty (from CTRL paper https://arxiv.org/abs/1909.05858)\n if repetition_penalty != 1.0:\n for i in range(batch_size * num_beams):\n for previous_token in set(input_ids[i].tolist()):\n # if score < 0 then repetition penalty has to multiplied to reduce the previous token probability\n if scores[i, previous_token] < 0:\n scores[i, previous_token] *= repetition_penalty\n else:\n scores[i, previous_token] /= repetition_penalty\n\n if do_sample:\n # Temperature (higher temperature => more likely to sample low probability tokens)\n if temperature != 1.0:\n scores = scores / temperature\n # Top-p/top-k filtering\n scores = top_k_top_p_filtering(\n scores, top_k=top_k, top_p=top_p, min_tokens_to_keep=2\n ) # (batch_size * num_beams, vocab_size)\n # Sample 2 next words for each beam (so we have some spare tokens and match output of greedy beam search)\n next_words = torch.multinomial(F.softmax(scores, dim=-1), num_samples=2) # (batch_size * num_beams, 2)\n # Compute next scores\n _scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)\n _scores = torch.gather(_scores, -1, next_words) # (batch_size * num_beams, 2)\n next_scores = _scores + beam_scores[:, None].expand_as(_scores) # (batch_size * num_beams, 2)\n # Match shape of greedy beam search\n next_words = next_words.view(batch_size, 2 * num_beams) # (batch_size, 2 * num_beams)\n next_scores = next_scores.view(batch_size, 2 * num_beams) # (batch_size, 2 * num_beams)\n else:\n # do greedy beam search\n scores = F.log_softmax(scores, dim=-1) # (batch_size * num_beams, vocab_size)\n assert scores.size() == (batch_size * num_beams, vocab_size)\n # Add the log prob of the new beams to the log prob of the beginning of the sequence (sum of logs == log of the product)\n _scores = scores + beam_scores[:, None].expand_as(scores) # (batch_size * num_beams, vocab_size)\n # re-organize to group the beam together (we are keeping top hypothesis accross beams)\n _scores = _scores.view(batch_size, num_beams * vocab_size) # (batch_size, num_beams * vocab_size)\n next_scores, next_words = torch.topk(_scores, 2 * num_beams, dim=1, largest=True, sorted=True)\n\n assert next_scores.size() == next_words.size() == (batch_size, 2 * num_beams)\n\n # next batch beam content\n # list of (batch_size * num_beams) tuple(next hypothesis score, next word, current position in the batch)\n next_batch_beam = []\n\n # for each sentence\n for batch_idx in range(batch_size):\n\n # if we are done with this sentence\n done[batch_idx] = done[batch_idx] or generated_hyps[batch_idx].is_done(\n next_scores[batch_idx].max().item()\n )\n if done[batch_idx]:\n assert (\n len(generated_hyps[batch_idx]) >= num_beams\n ), \"Batch can only be done if at least {} beams have been generated\".format(num_beams)\n assert (\n eos_token_ids is not None and pad_token_id is not None\n ), \"generated beams >= num_beams -> eos_token_id and pad_token have to be defined\"\n next_batch_beam.extend([(0, pad_token_id, 0)] * num_beams) # pad the batch\n continue\n\n # next sentence beam content\n next_sent_beam = []\n\n # next words for this sentence\n for idx, score in zip(next_words[batch_idx], next_scores[batch_idx]):\n\n # get beam and word IDs\n beam_id = idx // vocab_size\n word_id = idx % vocab_size\n\n # add to generated hypotheses if end of sentence or last iteration\n if eos_token_ids is not None and word_id.item() in eos_token_ids:\n generated_hyps[batch_idx].add(\n input_ids[batch_idx * num_beams + beam_id, :cur_len].clone(), score.item()\n )\n else:\n # add next predicted word if it is not eos_token\n next_sent_beam.append((score, word_id, batch_idx * num_beams + beam_id))\n\n # the beam for next step is full\n if len(next_sent_beam) == num_beams:\n break\n\n # update next beam content\n assert len(next_sent_beam) == num_beams, \"Beam should always be full\"\n next_batch_beam.extend(next_sent_beam)\n assert len(next_batch_beam) == num_beams * (batch_idx + 1)\n\n # sanity check / prepare next batch\n assert len(next_batch_beam) == batch_size * num_beams\n beam_scores = beam_scores.new([x[0] for x in next_batch_beam])\n beam_words = input_ids.new([x[1] for x in next_batch_beam])\n beam_idx = input_ids.new([x[2] for x in next_batch_beam])\n\n # re-order batch\n input_ids = input_ids[beam_idx, :]\n input_ids = torch.cat([input_ids, beam_words.unsqueeze(1)], dim=-1)\n\n # re-order internal states\n if past:\n reordered_past = []\n for layer_past in past:\n # get the correct batch idx from layer past batch dim\n # batch dim of `past` and `mems` is at 2nd position\n reordered_layer_past = [layer_past[:, i].unsqueeze(1).clone().detach() for i in beam_idx]\n reordered_layer_past = torch.cat(reordered_layer_past, dim=1)\n # check that shape matches\n assert reordered_layer_past.shape == layer_past.shape\n reordered_past.append(reordered_layer_past)\n past = tuple(reordered_past)\n\n # update current length\n cur_len = cur_len + 1\n\n # stop when we are done with each sentence\n if all(done):\n break\n\n for batch_idx in range(batch_size):\n # Add all open beam hypothesis to generated_hyps\n if not done[batch_idx]:\n for idx, score in zip(next_words[batch_idx], next_scores[batch_idx]):\n\n # get beam and word IDs\n beam_id = idx // vocab_size\n word_id = idx % vocab_size\n generated_hyps[batch_idx].add(\n input_ids[batch_idx * num_beams + beam_id, :cur_len].clone(), score.item()\n )\n\n # select the best hypotheses\n sent_lengths = input_ids.new(batch_size)\n best = []\n\n for i, hypotheses in enumerate(generated_hyps):\n best_hyp = max(hypotheses.beams, key=lambda x: x[0])[1]\n sent_lengths[i] = len(best_hyp)\n best.append(best_hyp)\n\n # shorter batches are filled with pad_token\n if sent_lengths.min().item() != sent_lengths.max().item():\n assert pad_token_id is not None, \"`Pad_token_id` has to be defined\"\n sent_max_len = min(sent_lengths.max().item() + 1, max_length)\n decoded = input_ids.new(batch_size, sent_max_len).fill_(pad_token_id)\n\n # fill with hypothesis and eos_token_id if necessary\n for i, hypo in enumerate(best):\n decoded[i, : sent_lengths[i]] = hypo\n if sent_lengths[i] < max_length:\n decoded[i, sent_lengths[i]] = eos_token_ids[0]\n else:\n # none of the hypotheses have an eos_token\n assert (len(hypo) == max_length for hypo in best)\n decoded = torch.stack(best).type(torch.long).to(next(self.parameters()).device)\n\n return decoded" }, { "identifier": "prune_linear_layer", "path": "transformers/src/transformers/modeling_utils.py", "snippet": "def prune_linear_layer(layer, index, dim=0):\n \"\"\" Prune a linear layer (a model parameters) to keep only entries in index.\n Return the pruned layer as a new layer with requires_grad=True.\n Used to remove heads.\n \"\"\"\n index = index.to(layer.weight.device)\n W = layer.weight.index_select(dim, index).clone().detach()\n if layer.bias is not None:\n if dim == 1:\n b = layer.bias.clone().detach()\n else:\n b = layer.bias[index].clone().detach()\n new_size = list(layer.weight.size())\n new_size[dim] = len(index)\n new_layer = nn.Linear(new_size[1], new_size[0], bias=layer.bias is not None).to(layer.weight.device)\n new_layer.weight.requires_grad = False\n new_layer.weight.copy_(W.contiguous())\n new_layer.weight.requires_grad = True\n if layer.bias is not None:\n new_layer.bias.requires_grad = False\n new_layer.bias.copy_(b.contiguous())\n new_layer.bias.requires_grad = True\n return new_layer" }, { "identifier": "DistilBertModel", "path": "transformers/src/transformers/modeling_distilbert.py", "snippet": "class DistilBertModel(DistilBertPreTrainedModel):\n def __init__(self, config):\n super().__init__(config)\n\n self.embeddings = Embeddings(config) # Embeddings\n self.transformer = Transformer(config) # Encoder\n\n self.init_weights()\n\n def get_input_embeddings(self):\n return self.embeddings.word_embeddings\n\n def set_input_embeddings(self, new_embeddings):\n self.embeddings.word_embeddings = new_embeddings\n\n def _prune_heads(self, heads_to_prune):\n \"\"\" Prunes heads of the model.\n heads_to_prune: dict of {layer_num: list of heads to prune in this layer}\n See base class PreTrainedModel\n \"\"\"\n for layer, heads in heads_to_prune.items():\n self.transformer.layer[layer].attention.prune_heads(heads)\n\n @add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)\n def forward(self, input_ids=None, attention_mask=None, head_mask=None, inputs_embeds=None):\n r\"\"\"\n Return:\n :obj:`tuple(torch.FloatTensor)` comprising various elements depending on the configuration (:class:`~transformers.DistilBertConfig`) and inputs:\n last_hidden_state (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`):\n Sequence of hidden-states at the output of the last layer of the model.\n hidden_states (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_hidden_states=True``):\n Tuple of :obj:`torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer)\n of shape :obj:`(batch_size, sequence_length, hidden_size)`.\n\n Hidden-states of the model at the output of each layer plus the initial embedding outputs.\n attentions (:obj:`tuple(torch.FloatTensor)`, `optional`, returned when ``config.output_attentions=True``):\n Tuple of :obj:`torch.FloatTensor` (one for each layer) of shape\n :obj:`(batch_size, num_heads, sequence_length, sequence_length)`.\n\n Attentions weights after the attention softmax, used to compute the weighted average in the self-attention\n heads.\n\n Examples::\n\n from transformers import DistilBertTokenizer, DistilBertModel\n import torch\n\n tokenizer = DistilBertTokenizer.from_pretrained('distilbert-base-cased')\n model = DistilBertModel.from_pretrained('distilbert-base-cased')\n\n input_ids = torch.tensor(tokenizer.encode(\"Hello, my dog is cute\", add_special_tokens=True)).unsqueeze(0) # Batch size 1\n outputs = model(input_ids)\n\n last_hidden_states = outputs[0] # The last hidden-state is the first element of the output tuple\n\n \"\"\"\n if input_ids is not None and inputs_embeds is not None:\n raise ValueError(\"You cannot specify both input_ids and inputs_embeds at the same time\")\n elif input_ids is not None:\n input_shape = input_ids.size()\n elif inputs_embeds is not None:\n input_shape = inputs_embeds.size()[:-1]\n else:\n raise ValueError(\"You have to specify either input_ids or inputs_embeds\")\n\n device = input_ids.device if input_ids is not None else inputs_embeds.device\n\n if attention_mask is None:\n attention_mask = torch.ones(input_shape, device=device) # (bs, seq_length)\n\n # Prepare head mask if needed\n # 1.0 in head_mask indicate we keep the head\n # attention_probs has shape bsz x n_heads x N x N\n # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]\n # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]\n if head_mask is not None:\n if head_mask.dim() == 1:\n head_mask = head_mask.unsqueeze(0).unsqueeze(0).unsqueeze(-1).unsqueeze(-1)\n head_mask = head_mask.expand(self.config.num_hidden_layers, -1, -1, -1, -1)\n elif head_mask.dim() == 2:\n head_mask = (\n head_mask.unsqueeze(1).unsqueeze(-1).unsqueeze(-1)\n ) # We can specify head_mask for each layer\n head_mask = head_mask.to(\n dtype=next(self.parameters()).dtype\n ) # switch to fload if need + fp16 compatibility\n else:\n head_mask = [None] * self.config.num_hidden_layers\n\n if inputs_embeds is None:\n inputs_embeds = self.embeddings(input_ids) # (bs, seq_length, dim)\n tfmr_output = self.transformer(x=inputs_embeds, attn_mask=attention_mask, head_mask=head_mask)\n hidden_state = tfmr_output[0]\n output = (hidden_state,) + tfmr_output[1:]\n\n return output # last-layer hidden-state, (all hidden_states), (all attentions)" } ]
import copy import logging import math import numpy as np import torch import torch.nn as nn from torch.nn import CrossEntropyLoss from .activations import gelu from .configuration_distilbert import DistilBertConfig from .file_utils import add_start_docstrings, add_start_docstrings_to_callable from .modeling_utils import PreTrainedModel, prune_linear_layer from .modeling_distilbert import DistilBertModel
18,023
if (i in [1, mid]): h1 = gelu(self.linear_1[w](hidden_states)) h2 = self.linear_2[w](h1).squeeze(-1) if question_ends_mask is None: prob = torch.sigmoid(h2) # temp_loss = MSE(prob, rank_prob[:,w,:]) * seq_mask temp_loss = torch.norm((prob - rank_prob[:,w,:]) * seq_mask, p=1) loss += temp_loss / torch.sum(seq_mask) else: h2 = h2.view(-1, 4, num_items) h2 = torch.mean(h2, dim=1) prob = torch.sigmoid(h2) temp_loss = torch.norm((prob - rank_prob[:,w,:]) * question_ends_mask, p=1) loss += temp_loss / torch.sum(question_ends_mask) layer_outputs = layer_module( hidden_states, attn_mask, head_mask[i], ) hidden_states = layer_outputs[0] outputs = (hidden_states, loss) return outputs # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL # class DistilBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DistilBertConfig pretrained_model_archive_map = DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = None base_model_prefix = "distilbert" def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, nn.Embedding): if module.weight.requires_grad: module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() DISTILBERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ DISTILBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.DistilBertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings( "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.", DISTILBERT_START_DOCSTRING, ) class InitDistilBertModel(DistilBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = Embeddings(config) # Embeddings self.transformer = Transformer(config) # Encoder self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.transformer.layer[layer].attention.prune_heads(heads)
# coding=utf-8 # Copyright 2019-present, the HuggingFace Inc. team, The Google AI Language Team and Facebook, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ PyTorch DistilBERT model adapted in part from Facebook, Inc XLM model (https://github.com/facebookresearch/XLM) and in part from HuggingFace PyTorch version of Google AI Bert model (https://github.com/google-research/bert) """ logger = logging.getLogger(__name__) DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP = { "distilbert-base-uncased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-pytorch_model.bin", "distilbert-base-uncased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-distilled-squad-pytorch_model.bin", "distilbert-base-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-pytorch_model.bin", "distilbert-base-cased-distilled-squad": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-cased-distilled-squad-pytorch_model.bin", "distilbert-base-german-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-german-cased-pytorch_model.bin", "distilbert-base-multilingual-cased": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-multilingual-cased-pytorch_model.bin", "distilbert-base-uncased-finetuned-sst-2-english": "https://s3.amazonaws.com/models.huggingface.co/bert/distilbert-base-uncased-finetuned-sst-2-english-pytorch_model.bin", } # UTILS AND BUILDING BLOCKS OF THE ARCHITECTURE # def create_sinusoidal_embeddings(n_pos, dim, out): position_enc = np.array([[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]) out[:, 0::2] = torch.FloatTensor(np.sin(position_enc[:, 0::2])) out[:, 1::2] = torch.FloatTensor(np.cos(position_enc[:, 1::2])) out.detach_() out.requires_grad = False class Embeddings(nn.Module): def __init__(self, config): super().__init__() self.word_embeddings = nn.Embedding(config.vocab_size, config.dim, padding_idx=0) self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.dim) if config.sinusoidal_pos_embds: create_sinusoidal_embeddings( n_pos=config.max_position_embeddings, dim=config.dim, out=self.position_embeddings.weight ) self.LayerNorm = nn.LayerNorm(config.dim, eps=1e-12) self.dropout = nn.Dropout(config.dropout) def forward(self, input_ids): """ Parameters ---------- input_ids: torch.tensor(bs, max_seq_length) The token ids to embed. Outputs ------- embeddings: torch.tensor(bs, max_seq_length, dim) The embedded tokens (plus position embeddings, no token_type embeddings) """ seq_length = input_ids.size(1) position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) # (max_seq_length) position_ids = position_ids.unsqueeze(0).expand_as(input_ids) # (bs, max_seq_length) word_embeddings = self.word_embeddings(input_ids) # (bs, max_seq_length, dim) position_embeddings = self.position_embeddings(position_ids) # (bs, max_seq_length, dim) embeddings = word_embeddings + position_embeddings # (bs, max_seq_length, dim) embeddings = self.LayerNorm(embeddings) # (bs, max_seq_length, dim) embeddings = self.dropout(embeddings) # (bs, max_seq_length, dim) return embeddings class MultiHeadSelfAttention(nn.Module): def __init__(self, config): super().__init__() self.n_heads = config.n_heads self.dim = config.dim self.dropout = nn.Dropout(p=config.attention_dropout) self.output_attentions = config.output_attentions assert self.dim % self.n_heads == 0 self.q_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.k_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.v_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.out_lin = nn.Linear(in_features=config.dim, out_features=config.dim) self.pruned_heads = set() def prune_heads(self, heads): attention_head_size = self.dim // self.n_heads if len(heads) == 0: return mask = torch.ones(self.n_heads, attention_head_size) heads = set(heads) - self.pruned_heads for head in heads: head -= sum(1 if h < head else 0 for h in self.pruned_heads) mask[head] = 0 mask = mask.view(-1).contiguous().eq(1) index = torch.arange(len(mask))[mask].long() # Prune linear layers self.q_lin = prune_linear_layer(self.q_lin, index) self.k_lin = prune_linear_layer(self.k_lin, index) self.v_lin = prune_linear_layer(self.v_lin, index) self.out_lin = prune_linear_layer(self.out_lin, index, dim=1) # Update hyper params self.n_heads = self.n_heads - len(heads) self.dim = attention_head_size * self.n_heads self.pruned_heads = self.pruned_heads.union(heads) def forward(self, query, key, value, mask, head_mask=None): """ Parameters ---------- query: torch.tensor(bs, seq_length, dim) key: torch.tensor(bs, seq_length, dim) value: torch.tensor(bs, seq_length, dim) mask: torch.tensor(bs, seq_length) Outputs ------- weights: torch.tensor(bs, n_heads, seq_length, seq_length) Attention weights context: torch.tensor(bs, seq_length, dim) Contextualized layer. Optional: only if `output_attentions=True` """ bs, q_length, dim = query.size() k_length = key.size(1) # assert dim == self.dim, 'Dimensions do not match: %s input vs %s configured' % (dim, self.dim) # assert key.size() == value.size() dim_per_head = self.dim // self.n_heads mask_reshp = (bs, 1, 1, k_length) def shape(x): """ separate heads """ return x.view(bs, -1, self.n_heads, dim_per_head).transpose(1, 2) def unshape(x): """ group heads """ return x.transpose(1, 2).contiguous().view(bs, -1, self.n_heads * dim_per_head) q = shape(self.q_lin(query)) # (bs, n_heads, q_length, dim_per_head) k = shape(self.k_lin(key)) # (bs, n_heads, k_length, dim_per_head) v = shape(self.v_lin(value)) # (bs, n_heads, k_length, dim_per_head) q = q / math.sqrt(dim_per_head) # (bs, n_heads, q_length, dim_per_head) scores = torch.matmul(q, k.transpose(2, 3)) # (bs, n_heads, q_length, k_length) mask = (mask == 0).view(mask_reshp).expand_as(scores) # (bs, n_heads, q_length, k_length) scores.masked_fill_(mask, -float("inf")) # (bs, n_heads, q_length, k_length) weights = nn.Softmax(dim=-1)(scores) # (bs, n_heads, q_length, k_length) weights = self.dropout(weights) # (bs, n_heads, q_length, k_length) # Mask heads if we want to if head_mask is not None: weights = weights * head_mask context = torch.matmul(weights, v) # (bs, n_heads, q_length, dim_per_head) context = unshape(context) # (bs, q_length, dim) context = self.out_lin(context) # (bs, q_length, dim) if self.output_attentions: return (context, weights) else: return (context,) class FFN(nn.Module): def __init__(self, config): super().__init__() self.dropout = nn.Dropout(p=config.dropout) self.lin1 = nn.Linear(in_features=config.dim, out_features=config.hidden_dim) self.lin2 = nn.Linear(in_features=config.hidden_dim, out_features=config.dim) assert config.activation in ["relu", "gelu"], "activation ({}) must be in ['relu', 'gelu']".format( config.activation ) self.activation = gelu if config.activation == "gelu" else nn.ReLU() def forward(self, input): x = self.lin1(input) x = self.activation(x) x = self.lin2(x) x = self.dropout(x) return x class TransformerBlock(nn.Module): def __init__(self, config): super().__init__() self.output_attentions = config.output_attentions assert config.dim % config.n_heads == 0 self.attention = MultiHeadSelfAttention(config) self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12) self.ffn = FFN(config) self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12) def forward(self, x, attn_mask=None, head_mask=None): """ Parameters ---------- x: torch.tensor(bs, seq_length, dim) attn_mask: torch.tensor(bs, seq_length) Outputs ------- sa_weights: torch.tensor(bs, n_heads, seq_length, seq_length) The attention weights ffn_output: torch.tensor(bs, seq_length, dim) The output of the transformer block contextualization. """ # Self-Attention sa_output = self.attention(query=x, key=x, value=x, mask=attn_mask, head_mask=head_mask) if self.output_attentions: sa_output, sa_weights = sa_output # (bs, seq_length, dim), (bs, n_heads, seq_length, seq_length) else: # To handle these `output_attention` or `output_hidden_states` cases returning tuples assert type(sa_output) == tuple sa_output = sa_output[0] sa_output = self.sa_layer_norm(sa_output + x) # (bs, seq_length, dim) # Feed Forward Network ffn_output = self.ffn(sa_output) # (bs, seq_length, dim) ffn_output = self.output_layer_norm(ffn_output + sa_output) # (bs, seq_length, dim) output = (ffn_output,) if self.output_attentions: output = (sa_weights,) + output return output class Transformer(nn.Module): def __init__(self, config): super().__init__() self.n_layers = config.n_layers self.output_attentions = config.output_attentions self.output_hidden_states = config.output_hidden_states layer = TransformerBlock(config) self.layer = nn.ModuleList([copy.deepcopy(layer) for _ in range(config.n_layers)]) self.linear_size = 32 self.linear_1 = nn.ModuleList([nn.Linear(config.hidden_size, self.linear_size), nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size), # nn.Linear(config.hidden_size, self.linear_size) ]) self.linear_2 = nn.ModuleList([nn.Linear(self.linear_size, 1), nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1), # nn.Linear(self.linear_size, 1) ]) def forward(self, x, attn_mask=None, head_mask=None, rank_prob=None, question_ends_mask=None): all_hidden_states = () all_attentions = () hidden_states = x bsz, ori_num_items, dim = hidden_states.size() seq_mask = attn_mask.to(dtype=next(self.parameters()).dtype) num_items = hidden_states.size(1) tot_zoom = None tot_select_loss = 0 Ls = [] w = 0 mid = len(self.layer)//2 loss = 0 for i, layer_module in enumerate(self.layer): if self.output_hidden_states: all_hidden_states = all_hidden_states + (hidden_states,) if (i in [1, mid]): h1 = gelu(self.linear_1[w](hidden_states)) h2 = self.linear_2[w](h1).squeeze(-1) if question_ends_mask is None: prob = torch.sigmoid(h2) # temp_loss = MSE(prob, rank_prob[:,w,:]) * seq_mask temp_loss = torch.norm((prob - rank_prob[:,w,:]) * seq_mask, p=1) loss += temp_loss / torch.sum(seq_mask) else: h2 = h2.view(-1, 4, num_items) h2 = torch.mean(h2, dim=1) prob = torch.sigmoid(h2) temp_loss = torch.norm((prob - rank_prob[:,w,:]) * question_ends_mask, p=1) loss += temp_loss / torch.sum(question_ends_mask) layer_outputs = layer_module( hidden_states, attn_mask, head_mask[i], ) hidden_states = layer_outputs[0] outputs = (hidden_states, loss) return outputs # INTERFACE FOR ENCODER AND TASK SPECIFIC MODEL # class DistilBertPreTrainedModel(PreTrainedModel): """ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained models. """ config_class = DistilBertConfig pretrained_model_archive_map = DISTILBERT_PRETRAINED_MODEL_ARCHIVE_MAP load_tf_weights = None base_model_prefix = "distilbert" def _init_weights(self, module): """ Initialize the weights. """ if isinstance(module, nn.Embedding): if module.weight.requires_grad: module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) if isinstance(module, nn.Linear): module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) elif isinstance(module, nn.LayerNorm): module.bias.data.zero_() module.weight.data.fill_(1.0) if isinstance(module, nn.Linear) and module.bias is not None: module.bias.data.zero_() DISTILBERT_START_DOCSTRING = r""" This model is a PyTorch `torch.nn.Module <https://pytorch.org/docs/stable/nn.html#torch.nn.Module>`_ sub-class. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config (:class:`~transformers.DistilBertConfig`): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model weights. """ DISTILBERT_INPUTS_DOCSTRING = r""" Args: input_ids (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_length)`): Indices of input sequence tokens in the vocabulary. Indices can be obtained using :class:`transformers.DistilBertTokenizer`. See :func:`transformers.PreTrainedTokenizer.encode` and :func:`transformers.PreTrainedTokenizer.encode_plus` for details. `What are input IDs? <../glossary.html#input-ids>`__ attention_mask (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length)`, `optional`, defaults to :obj:`None`): Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``: ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens. `What are attention masks? <../glossary.html#attention-mask>`__ head_mask (:obj:`torch.FloatTensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`, defaults to :obj:`None`): Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``: :obj:`1` indicates the head is **not masked**, :obj:`0` indicates the head is **masked**. inputs_embeds (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_length, hidden_size)`, `optional`, defaults to :obj:`None`): Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation. This is useful if you want more control over how to convert `input_ids` indices into associated vectors than the model's internal embedding lookup matrix. """ @add_start_docstrings( "The bare DistilBERT encoder/transformer outputting raw hidden-states without any specific head on top.", DISTILBERT_START_DOCSTRING, ) class InitDistilBertModel(DistilBertPreTrainedModel): def __init__(self, config): super().__init__(config) self.embeddings = Embeddings(config) # Embeddings self.transformer = Transformer(config) # Encoder self.init_weights() def get_input_embeddings(self): return self.embeddings.word_embeddings def set_input_embeddings(self, new_embeddings): self.embeddings.word_embeddings = new_embeddings def _prune_heads(self, heads_to_prune): """ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base class PreTrainedModel """ for layer, heads in heads_to_prune.items(): self.transformer.layer[layer].attention.prune_heads(heads)
@add_start_docstrings_to_callable(DISTILBERT_INPUTS_DOCSTRING)
3
2023-10-15 02:31:09+00:00
24k
akashgreninja/GreSec
backend/venv/lib/python3.10/site-packages/pydantic/type_adapter.py
[ { "identifier": "_config", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_config.py", "snippet": "DEPRECATION_MESSAGE = 'Support for class-based `config` is deprecated, use ConfigDict instead.'\nV2_REMOVED_KEYS = {\n 'allow_mutation',\n 'error_msg_templates',\n 'fields',\n 'getter_dict',\n 'smart_union',\n 'underscore_attrs_are_private',\n 'json_loads',\n 'json_dumps',\n 'copy_on_model_validation',\n 'post_init_call',\n}\nV2_RENAMED_KEYS = {\n 'allow_population_by_field_name': 'populate_by_name',\n 'anystr_lower': 'str_to_lower',\n 'anystr_strip_whitespace': 'str_strip_whitespace',\n 'anystr_upper': 'str_to_upper',\n 'keep_untouched': 'ignored_types',\n 'max_anystr_length': 'str_max_length',\n 'min_anystr_length': 'str_min_length',\n 'orm_mode': 'from_attributes',\n 'schema_extra': 'json_schema_extra',\n 'validate_all': 'validate_default',\n}\nclass ConfigWrapper:\nclass ConfigWrapperStack:\n def __init__(self, config: ConfigDict | dict[str, Any] | type[Any] | None, *, check: bool = True):\n def for_model(cls, bases: tuple[type[Any], ...], namespace: dict[str, Any], kwargs: dict[str, Any]) -> Self:\n def __getattr__(self, name: str) -> Any:\n def core_config(self, obj: Any) -> core_schema.CoreConfig:\n def dict_not_none(**kwargs: Any) -> Any:\n def __repr__(self):\n def __init__(self, config_wrapper: ConfigWrapper):\n def tail(self) -> ConfigWrapper:\n def push(self, config_wrapper: ConfigWrapper | ConfigDict | None) -> ContextManager[None]:\n def _context_manager() -> Iterator[None]:\ndef prepare_config(config: ConfigDict | dict[str, Any] | type[Any] | None) -> ConfigDict:\ndef check_deprecated(config_dict: ConfigDict) -> None:" }, { "identifier": "_core_utils", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_core_utils.py", "snippet": "_CORE_SCHEMA_FIELD_TYPES = {'typed-dict-field', 'dataclass-field', 'model-field', 'computed-field'}\n_FUNCTION_WITH_INNER_SCHEMA_TYPES = {'function-before', 'function-after', 'function-wrap'}\n_LIST_LIKE_SCHEMA_WITH_ITEMS_TYPES = {'list', 'tuple-variable', 'set', 'frozenset'}\n_DEFINITIONS_CACHE_METADATA_KEY = 'pydantic.definitions_cache'\nNEEDS_APPLY_DISCRIMINATED_UNION_METADATA_KEY = 'pydantic.internal.needs_apply_discriminated_union'\nHAS_INVALID_SCHEMAS_METADATA_KEY = 'pydantic.internal.invalid'\nT = TypeVar('T')\ndef is_core_schema(\n schema: CoreSchemaOrField,\n) -> TypeGuard[CoreSchema]:\ndef is_core_schema_field(\n schema: CoreSchemaOrField,\n) -> TypeGuard[CoreSchemaField]:\ndef is_function_with_inner_schema(\n schema: CoreSchemaOrField,\n) -> TypeGuard[FunctionSchemaWithInnerSchema]:\ndef is_list_like_schema_with_items_schema(\n schema: CoreSchema,\n) -> TypeGuard[\ndef get_type_ref(type_: type[Any], args_override: tuple[type[Any], ...] | None = None) -> str:\ndef get_ref(s: core_schema.CoreSchema) -> None | str:\ndef collect_definitions(schema: core_schema.CoreSchema) -> dict[str, core_schema.CoreSchema]:\n def _record_valid_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\ndef define_expected_missing_refs(\n schema: core_schema.CoreSchema, allowed_missing_refs: set[str]\n) -> core_schema.CoreSchema | None:\ndef collect_invalid_schemas(schema: core_schema.CoreSchema) -> bool:\n def _is_schema_valid(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\n def __init__(self):\n def _build_schema_type_to_method(self) -> dict[core_schema.CoreSchemaType, Recurse]:\n def walk(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\n def _walk(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\n def _handle_other_schemas(self, schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\n def _handle_ser_schemas(self, ser_schema: core_schema.SerSchema, f: Walk) -> core_schema.SerSchema:\n def handle_definitions_schema(self, schema: core_schema.DefinitionsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_list_schema(self, schema: core_schema.ListSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_set_schema(self, schema: core_schema.SetSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_frozenset_schema(self, schema: core_schema.FrozenSetSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_generator_schema(self, schema: core_schema.GeneratorSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_tuple_variable_schema(\n self, schema: core_schema.TupleVariableSchema | core_schema.TuplePositionalSchema, f: Walk\n ) -> core_schema.CoreSchema:\n def handle_tuple_positional_schema(\n self, schema: core_schema.TupleVariableSchema | core_schema.TuplePositionalSchema, f: Walk\n ) -> core_schema.CoreSchema:\n def handle_dict_schema(self, schema: core_schema.DictSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_function_schema(self, schema: AnyFunctionSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_union_schema(self, schema: core_schema.UnionSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_tagged_union_schema(self, schema: core_schema.TaggedUnionSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_chain_schema(self, schema: core_schema.ChainSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_lax_or_strict_schema(self, schema: core_schema.LaxOrStrictSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_json_or_python_schema(self, schema: core_schema.JsonOrPythonSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_model_fields_schema(self, schema: core_schema.ModelFieldsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_typed_dict_schema(self, schema: core_schema.TypedDictSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_dataclass_args_schema(self, schema: core_schema.DataclassArgsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_arguments_schema(self, schema: core_schema.ArgumentsSchema, f: Walk) -> core_schema.CoreSchema:\n def handle_call_schema(self, schema: core_schema.CallSchema, f: Walk) -> core_schema.CoreSchema:\ndef walk_core_schema(schema: core_schema.CoreSchema, f: Walk) -> core_schema.CoreSchema:\ndef simplify_schema_references(schema: core_schema.CoreSchema) -> core_schema.CoreSchema: # noqa: C901\n def collect_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\n def count_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\n def can_be_inlined(s: core_schema.DefinitionReferenceSchema, ref: str) -> bool:\n def inline_refs(s: core_schema.CoreSchema, recurse: Recurse) -> core_schema.CoreSchema:\ndef _strip_metadata(schema: CoreSchema) -> CoreSchema:\n def strip_metadata(s: CoreSchema, recurse: Recurse) -> CoreSchema:\ndef pretty_print_core_schema(\n schema: CoreSchema,\n include_metadata: bool = False,\n) -> None:\ndef validate_core_schema(schema: CoreSchema) -> CoreSchema:\nclass _WalkCoreSchema:" }, { "identifier": "_discriminated_union", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_discriminated_union.py", "snippet": "CORE_SCHEMA_METADATA_DISCRIMINATOR_PLACEHOLDER_KEY = 'pydantic.internal.union_discriminator'\nclass MissingDefinitionForUnionRef(Exception):\nclass _ApplyInferredDiscriminator:\n def __init__(self, ref: str) -> None:\ndef set_discriminator(schema: CoreSchema, discriminator: Any) -> None:\ndef apply_discriminators(schema: core_schema.CoreSchema) -> core_schema.CoreSchema:\n def inner(s: core_schema.CoreSchema, recurse: _core_utils.Recurse) -> core_schema.CoreSchema:\ndef apply_discriminator(\n schema: core_schema.CoreSchema, discriminator: str, definitions: dict[str, core_schema.CoreSchema] | None = None\n) -> core_schema.CoreSchema:\n def __init__(self, discriminator: str, definitions: dict[str, core_schema.CoreSchema]):\n def apply(self, schema: core_schema.CoreSchema) -> core_schema.CoreSchema:\n def _apply_to_root(self, schema: core_schema.CoreSchema) -> core_schema.CoreSchema:\n def _handle_choice(self, choice: core_schema.CoreSchema) -> None:\n def _is_discriminator_shared(self, choice: core_schema.TaggedUnionSchema) -> bool:\n def _infer_discriminator_values_for_choice( # noqa C901\n self, choice: core_schema.CoreSchema, source_name: str | None\n ) -> list[str | int]:\n def _infer_discriminator_values_for_typed_dict_choice(\n self, choice: core_schema.TypedDictSchema, source_name: str | None = None\n ) -> list[str | int]:\n def _infer_discriminator_values_for_model_choice(\n self, choice: core_schema.ModelFieldsSchema, source_name: str | None = None\n ) -> list[str | int]:\n def _infer_discriminator_values_for_dataclass_choice(\n self, choice: core_schema.DataclassArgsSchema, source_name: str | None = None\n ) -> list[str | int]:\n def _infer_discriminator_values_for_field(self, field: CoreSchemaField, source: str) -> list[str | int]:\n def _infer_discriminator_values_for_inner_schema(\n self, schema: core_schema.CoreSchema, source: str\n ) -> list[str | int]:\n def _set_unique_choice_for_values(self, choice: core_schema.CoreSchema, values: Sequence[str | int]) -> None:" }, { "identifier": "_generate_schema", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_generate_schema.py", "snippet": "def _generate_schema(self, obj: Any) -> core_schema.CoreSchema:\n \"\"\"Recursively generate a pydantic-core schema for any supported python type.\"\"\"\n has_invalid_schema = self._has_invalid_schema\n self._has_invalid_schema = False\n needs_apply_discriminated_union = self._needs_apply_discriminated_union\n self._needs_apply_discriminated_union = False\n schema = self._post_process_generated_schema(self._generate_schema_inner(obj))\n self._has_invalid_schema = self._has_invalid_schema or has_invalid_schema\n self._needs_apply_discriminated_union = self._needs_apply_discriminated_union or needs_apply_discriminated_union\n return schema" }, { "identifier": "_typing_extra", "path": "backend/venv/lib/python3.10/site-packages/pydantic/_internal/_typing_extra.py", "snippet": " def origin_is_union(tp: type[Any] | None) -> bool:\n def origin_is_union(tp: type[Any] | None) -> bool:\ndef is_none_type(type_: Any) -> bool:\ndef is_callable_type(type_: type[Any]) -> bool:\ndef is_literal_type(type_: type[Any]) -> bool:\ndef literal_values(type_: type[Any]) -> tuple[Any, ...]:\ndef all_literal_values(type_: type[Any]) -> list[Any]:\ndef is_annotated(ann_type: Any) -> bool:\ndef is_namedtuple(type_: type[Any]) -> bool:\ndef is_new_type(type_: type[Any]) -> bool:\ndef _check_classvar(v: type[Any] | None) -> bool:\ndef is_classvar(ann_type: type[Any]) -> bool:\ndef _check_finalvar(v: type[Any] | None) -> bool:\ndef is_finalvar(ann_type: Any) -> bool:\ndef parent_frame_namespace(*, parent_depth: int = 2) -> dict[str, Any] | None:\ndef add_module_globals(obj: Any, globalns: dict[str, Any] | None = None) -> dict[str, Any]:\ndef get_cls_types_namespace(cls: type[Any], parent_namespace: dict[str, Any] | None = None) -> dict[str, Any]:\ndef get_cls_type_hints_lenient(obj: Any, globalns: dict[str, Any] | None = None) -> dict[str, Any]:\ndef eval_type_lenient(value: Any, globalns: dict[str, Any] | None, localns: dict[str, Any] | None) -> Any:\ndef get_function_type_hints(\n function: Callable[..., Any], *, include_keys: set[str] | None = None, types_namespace: dict[str, Any] | None = None\n) -> dict[str, Any]:\n def _make_forward_ref(\n arg: Any,\n is_argument: bool = True,\n *,\n is_class: bool = False,\n ) -> typing.ForwardRef:\n def get_type_hints( # noqa: C901\n obj: Any,\n globalns: dict[str, Any] | None = None,\n localns: dict[str, Any] | None = None,\n include_extras: bool = False,\n ) -> dict[str, Any]: # pragma: no cover\n def evaluate_fwd_ref(\n ref: ForwardRef, globalns: dict[str, Any] | None = None, localns: dict[str, Any] | None = None\n ) -> Any:\n def evaluate_fwd_ref(\n ref: ForwardRef, globalns: dict[str, Any] | None = None, localns: dict[str, Any] | None = None\n ) -> Any:\ndef is_dataclass(_cls: type[Any]) -> TypeGuard[type[StandardDataclass]]:\ndef origin_is_type_alias_type(origin: Any) -> TypeGuard[TypeAliasType]:\nLITERAL_TYPES: set[Any] = {Literal}\nNONE_TYPES: tuple[Any, ...] = (None, NoneType, *(tp[None] for tp in LITERAL_TYPES))" }, { "identifier": "ConfigDict", "path": "backend/venv/lib/python3.10/site-packages/pydantic/config.py", "snippet": "class ConfigDict(TypedDict, total=False):\n \"\"\"A TypedDict for configuring Pydantic behaviour.\"\"\"\n\n title: str | None\n \"\"\"The title for the generated JSON schema, defaults to the model's name\"\"\"\n\n str_to_lower: bool\n \"\"\"Whether to convert all characters to lowercase for str types. Defaults to `False`.\"\"\"\n\n str_to_upper: bool\n \"\"\"Whether to convert all characters to uppercase for str types. Defaults to `False`.\"\"\"\n str_strip_whitespace: bool\n \"\"\"Whether to strip leading and trailing whitespace for str types.\"\"\"\n\n str_min_length: int\n \"\"\"The minimum length for str types. Defaults to `None`.\"\"\"\n\n str_max_length: int | None\n \"\"\"The maximum length for str types. Defaults to `None`.\"\"\"\n\n extra: ExtraValues | None\n \"\"\"\n Whether to ignore, allow, or forbid extra attributes during model initialization. Defaults to `'ignore'`.\n\n You can configure how pydantic handles the attributes that are not defined in the model:\n\n * `allow` - Allow any extra attributes.\n * `forbid` - Forbid any extra attributes.\n * `ignore` - Ignore any extra attributes.\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n\n class User(BaseModel):\n model_config = ConfigDict(extra='ignore') # (1)!\n\n name: str\n\n\n user = User(name='John Doe', age=20) # (2)!\n print(user)\n #> name='John Doe'\n ```\n\n 1. This is the default behaviour.\n 2. The `age` argument is ignored.\n\n Instead, with `extra='allow'`, the `age` argument is included:\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n\n class User(BaseModel):\n model_config = ConfigDict(extra='allow')\n\n name: str\n\n\n user = User(name='John Doe', age=20) # (1)!\n print(user)\n #> name='John Doe' age=20\n ```\n\n 1. The `age` argument is included.\n\n With `extra='forbid'`, an error is raised:\n\n ```py\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n\n class User(BaseModel):\n model_config = ConfigDict(extra='forbid')\n\n name: str\n\n\n try:\n User(name='John Doe', age=20)\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for User\n age\n Extra inputs are not permitted [type=extra_forbidden, input_value=20, input_type=int]\n '''\n ```\n \"\"\"\n\n frozen: bool\n \"\"\"\n Whether or not models are faux-immutable, i.e. whether `__setattr__` is allowed, and also generates\n a `__hash__()` method for the model. This makes instances of the model potentially hashable if all the\n attributes are hashable. Defaults to `False`.\n\n Note:\n On V1, this setting was called `allow_mutation`, and was `True` by default.\n \"\"\"\n\n populate_by_name: bool\n \"\"\"\n Whether an aliased field may be populated by its name as given by the model\n attribute, as well as the alias. Defaults to `False`.\n\n Note:\n The name of this configuration setting was changed in **v2.0** from\n `allow_population_by_alias` to `populate_by_name`.\n\n ```py\n from pydantic import BaseModel, ConfigDict, Field\n\n\n class User(BaseModel):\n model_config = ConfigDict(populate_by_name=True)\n\n name: str = Field(alias='full_name') # (1)!\n age: int\n\n\n user = User(full_name='John Doe', age=20) # (2)!\n print(user)\n #> name='John Doe' age=20\n user = User(name='John Doe', age=20) # (3)!\n print(user)\n #> name='John Doe' age=20\n ```\n\n 1. The field `'name'` has an alias `'full_name'`.\n 2. The model is populated by the alias `'full_name'`.\n 3. The model is populated by the field name `'name'`.\n \"\"\"\n\n use_enum_values: bool\n \"\"\"\n Whether to populate models with the `value` property of enums, rather than the raw enum.\n This may be useful if you want to serialize `model.model_dump()` later. Defaults to `False`.\n \"\"\"\n\n validate_assignment: bool\n \"\"\"\n Whether to validate the data when the model is changed. Defaults to `False`.\n\n The default behavior of Pydantic is to validate the data when the model is created.\n\n In case the user changes the data after the model is created, the model is _not_ revalidated.\n\n ```py\n from pydantic import BaseModel\n\n class User(BaseModel):\n name: str\n\n user = User(name='John Doe') # (1)!\n print(user)\n #> name='John Doe'\n user.name = 123 # (1)!\n print(user)\n #> name=123\n ```\n\n 1. The validation happens only when the model is created.\n 2. The validation does not happen when the data is changed.\n\n In case you want to revalidate the model when the data is changed, you can use `validate_assignment=True`:\n\n ```py\n from pydantic import BaseModel, ValidationError\n\n class User(BaseModel, validate_assignment=True): # (1)!\n name: str\n\n user = User(name='John Doe') # (2)!\n print(user)\n #> name='John Doe'\n try:\n user.name = 123 # (3)!\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for User\n name\n Input should be a valid string [type=string_type, input_value=123, input_type=int]\n '''\n ```\n\n 1. You can either use class keyword arguments, or `model_config` to set `validate_assignment=True`.\n 2. The validation happens when the model is created.\n 3. The validation _also_ happens when the data is changed.\n \"\"\"\n\n arbitrary_types_allowed: bool\n \"\"\"\n Whether arbitrary types are allowed for field types. Defaults to `False`.\n\n ```py\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n # This is not a pydantic model, it's an arbitrary class\n class Pet:\n def __init__(self, name: str):\n self.name = name\n\n class Model(BaseModel):\n model_config = ConfigDict(arbitrary_types_allowed=True)\n\n pet: Pet\n owner: str\n\n pet = Pet(name='Hedwig')\n # A simple check of instance type is used to validate the data\n model = Model(owner='Harry', pet=pet)\n print(model)\n #> pet=<__main__.Pet object at 0x0123456789ab> owner='Harry'\n print(model.pet)\n #> <__main__.Pet object at 0x0123456789ab>\n print(model.pet.name)\n #> Hedwig\n print(type(model.pet))\n #> <class '__main__.Pet'>\n try:\n # If the value is not an instance of the type, it's invalid\n Model(owner='Harry', pet='Hedwig')\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n pet\n Input should be an instance of Pet [type=is_instance_of, input_value='Hedwig', input_type=str]\n '''\n\n # Nothing in the instance of the arbitrary type is checked\n # Here name probably should have been a str, but it's not validated\n pet2 = Pet(name=42)\n model2 = Model(owner='Harry', pet=pet2)\n print(model2)\n #> pet=<__main__.Pet object at 0x0123456789ab> owner='Harry'\n print(model2.pet)\n #> <__main__.Pet object at 0x0123456789ab>\n print(model2.pet.name)\n #> 42\n print(type(model2.pet))\n #> <class '__main__.Pet'>\n ```\n \"\"\"\n\n from_attributes: bool\n \"\"\"\n Whether to build models and look up discriminators of tagged unions using python object attributes.\n \"\"\"\n\n loc_by_alias: bool\n \"\"\"Whether to use the actual key provided in the data (e.g. alias) for error `loc`s rather than the field's name. Defaults to `True`.\"\"\"\n\n alias_generator: Callable[[str], str] | None\n \"\"\"\n A callable that takes a field name and returns an alias for it.\n\n If data source field names do not match your code style (e. g. CamelCase fields),\n you can automatically generate aliases using `alias_generator`:\n\n ```py\n from pydantic import BaseModel, ConfigDict\n from pydantic.alias_generators import to_pascal\n\n class Voice(BaseModel):\n model_config = ConfigDict(alias_generator=to_pascal)\n\n name: str\n language_code: str\n\n voice = Voice(Name='Filiz', LanguageCode='tr-TR')\n print(voice.language_code)\n #> tr-TR\n print(voice.model_dump(by_alias=True))\n #> {'Name': 'Filiz', 'LanguageCode': 'tr-TR'}\n ```\n\n Note:\n Pydantic offers three built-in alias generators: [`to_pascal`][pydantic.alias_generators.to_pascal],\n [`to_camel`][pydantic.alias_generators.to_camel], and [`to_snake`][pydantic.alias_generators.to_snake].\n \"\"\"\n\n ignored_types: tuple[type, ...]\n \"\"\"A tuple of types that may occur as values of class attributes without annotations. This is\n typically used for custom descriptors (classes that behave like `property`). If an attribute is set on a\n class without an annotation and has a type that is not in this tuple (or otherwise recognized by\n _pydantic_), an error will be raised. Defaults to `()`.\n \"\"\"\n\n allow_inf_nan: bool\n \"\"\"Whether to allow infinity (`+inf` an `-inf`) and NaN values to float fields. Defaults to `True`.\"\"\"\n\n json_schema_extra: dict[str, object] | JsonSchemaExtraCallable | None\n \"\"\"A dict or callable to provide extra JSON schema properties. Defaults to `None`.\"\"\"\n\n json_encoders: dict[type[object], JsonEncoder] | None\n \"\"\"\n A `dict` of custom JSON encoders for specific types. Defaults to `None`.\n\n !!! warning \"Deprecated\"\n This config option is a carryover from v1.\n We originally planned to remove it in v2 but didn't have a 1:1 replacement so we are keeping it for now.\n It is still deprecated and will likely be removed in the future.\n \"\"\"\n\n # new in V2\n strict: bool\n \"\"\"\n _(new in V2)_ If `True`, strict validation is applied to all fields on the model.\n\n By default, Pydantic attempts to coerce values to the correct type, when possible.\n\n There are situations in which you may want to disable this behavior, and instead raise an error if a value's type\n does not match the field's type annotation.\n\n To configure strict mode for all fields on a model, you can set `strict=True` on the model.\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n class Model(BaseModel):\n model_config = ConfigDict(strict=True)\n\n name: str\n age: int\n ```\n\n See [Strict Mode](../concepts/strict_mode.md) for more details.\n\n See the [Conversion Table](../concepts/conversion_table.md) for more details on how Pydantic converts data in both\n strict and lax modes.\n \"\"\"\n # whether instances of models and dataclasses (including subclass instances) should re-validate, default 'never'\n revalidate_instances: Literal['always', 'never', 'subclass-instances']\n \"\"\"\n When and how to revalidate models and dataclasses during validation. Accepts the string\n values of `'never'`, `'always'` and `'subclass-instances'`. Defaults to `'never'`.\n\n - `'never'` will not revalidate models and dataclasses during validation\n - `'always'` will revalidate models and dataclasses during validation\n - `'subclass-instances'` will revalidate models and dataclasses during validation if the instance is a\n subclass of the model or dataclass\n\n By default, model and dataclass instances are not revalidated during validation.\n\n ```py\n from typing import List\n\n from pydantic import BaseModel\n\n class User(BaseModel, revalidate_instances='never'): # (1)!\n hobbies: List[str]\n\n class SubUser(User):\n sins: List[str]\n\n class Transaction(BaseModel):\n user: User\n\n my_user = User(hobbies=['reading'])\n t = Transaction(user=my_user)\n print(t)\n #> user=User(hobbies=['reading'])\n\n my_user.hobbies = [1] # (2)!\n t = Transaction(user=my_user) # (3)!\n print(t)\n #> user=User(hobbies=[1])\n\n my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying'])\n t = Transaction(user=my_sub_user)\n print(t)\n #> user=SubUser(hobbies=['scuba diving'], sins=['lying'])\n ```\n\n 1. `revalidate_instances` is set to `'never'` by **default.\n 2. The assignment is not validated, unless you set `validate_assignment` to `True` in the model's config.\n 3. Since `revalidate_instances` is set to `never`, this is not revalidated.\n\n If you want to revalidate instances during validation, you can set `revalidate_instances` to `'always'`\n in the model's config.\n\n ```py\n from typing import List\n\n from pydantic import BaseModel, ValidationError\n\n class User(BaseModel, revalidate_instances='always'): # (1)!\n hobbies: List[str]\n\n class SubUser(User):\n sins: List[str]\n\n class Transaction(BaseModel):\n user: User\n\n my_user = User(hobbies=['reading'])\n t = Transaction(user=my_user)\n print(t)\n #> user=User(hobbies=['reading'])\n\n my_user.hobbies = [1]\n try:\n t = Transaction(user=my_user) # (2)!\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Transaction\n user.hobbies.0\n Input should be a valid string [type=string_type, input_value=1, input_type=int]\n '''\n\n my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying'])\n t = Transaction(user=my_sub_user)\n print(t) # (3)!\n #> user=User(hobbies=['scuba diving'])\n ```\n\n 1. `revalidate_instances` is set to `'always'`.\n 2. The model is revalidated, since `revalidate_instances` is set to `'always'`.\n 3. Using `'never'` we would have gotten `user=SubUser(hobbies=['scuba diving'], sins=['lying'])`.\n\n It's also possible to set `revalidate_instances` to `'subclass-instances'` to only revalidate instances\n of subclasses of the model.\n\n ```py\n from typing import List\n\n from pydantic import BaseModel\n\n class User(BaseModel, revalidate_instances='subclass-instances'): # (1)!\n hobbies: List[str]\n\n class SubUser(User):\n sins: List[str]\n\n class Transaction(BaseModel):\n user: User\n\n my_user = User(hobbies=['reading'])\n t = Transaction(user=my_user)\n print(t)\n #> user=User(hobbies=['reading'])\n\n my_user.hobbies = [1]\n t = Transaction(user=my_user) # (2)!\n print(t)\n #> user=User(hobbies=[1])\n\n my_sub_user = SubUser(hobbies=['scuba diving'], sins=['lying'])\n t = Transaction(user=my_sub_user)\n print(t) # (3)!\n #> user=User(hobbies=['scuba diving'])\n ```\n\n 1. `revalidate_instances` is set to `'subclass-instances'`.\n 2. This is not revalidated, since `my_user` is not a subclass of `User`.\n 3. Using `'never'` we would have gotten `user=SubUser(hobbies=['scuba diving'], sins=['lying'])`.\n \"\"\"\n\n ser_json_timedelta: Literal['iso8601', 'float']\n \"\"\"\n The format of JSON serialized timedeltas. Accepts the string values of `'iso8601'` and\n `'float'`. Defaults to `'iso8601'`.\n\n - `'iso8601'` will serialize timedeltas to ISO 8601 durations.\n - `'float'` will serialize timedeltas to the total number of seconds.\n \"\"\"\n\n ser_json_bytes: Literal['utf8', 'base64']\n \"\"\"\n The encoding of JSON serialized bytes. Accepts the string values of `'utf8'` and `'base64'`.\n Defaults to `'utf8'`.\n\n - `'utf8'` will serialize bytes to UTF-8 strings.\n - `'base64'` will serialize bytes to URL safe base64 strings.\n \"\"\"\n\n # whether to validate default values during validation, default False\n validate_default: bool\n \"\"\"Whether to validate default values during validation. Defaults to `False`.\"\"\"\n\n validate_return: bool\n \"\"\"whether to validate the return value from call validators. Defaults to `False`.\"\"\"\n\n protected_namespaces: tuple[str, ...]\n \"\"\"\n A `tuple` of strings that prevent model to have field which conflict with them.\n Defaults to `('model_', )`).\n\n Pydantic prevents collisions between model attributes and `BaseModel`'s own methods by\n namespacing them with the prefix `model_`.\n\n ```py\n import warnings\n\n from pydantic import BaseModel\n\n warnings.filterwarnings('error') # Raise warnings as errors\n\n try:\n\n class Model(BaseModel):\n model_prefixed_field: str\n\n except UserWarning as e:\n print(e)\n '''\n Field \"model_prefixed_field\" has conflict with protected namespace \"model_\".\n\n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ()`.\n '''\n ```\n\n You can customize this behavior using the `protected_namespaces` setting:\n\n ```py\n import warnings\n\n from pydantic import BaseModel, ConfigDict\n\n warnings.filterwarnings('error') # Raise warnings as errors\n\n try:\n\n class Model(BaseModel):\n model_prefixed_field: str\n also_protect_field: str\n\n model_config = ConfigDict(\n protected_namespaces=('protect_me_', 'also_protect_')\n )\n\n except UserWarning as e:\n print(e)\n '''\n Field \"also_protect_field\" has conflict with protected namespace \"also_protect_\".\n\n You may be able to resolve this warning by setting `model_config['protected_namespaces'] = ('protect_me_',)`.\n '''\n ```\n\n While Pydantic will only emit a warning when an item is in a protected namespace but does not actually have a collision,\n an error _is_ raised if there is an actual collision with an existing attribute:\n\n ```py\n from pydantic import BaseModel\n\n try:\n\n class Model(BaseModel):\n model_validate: str\n\n except NameError as e:\n print(e)\n '''\n Field \"model_validate\" conflicts with member <bound method BaseModel.model_validate of <class 'pydantic.main.BaseModel'>> of protected namespace \"model_\".\n '''\n ```\n \"\"\"\n\n hide_input_in_errors: bool\n \"\"\"\n Whether to hide inputs when printing errors. Defaults to `False`.\n\n Pydantic shows the input value and type when it raises `ValidationError` during the validation.\n\n ```py\n from pydantic import BaseModel, ValidationError\n\n class Model(BaseModel):\n a: str\n\n try:\n Model(a=123)\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n a\n Input should be a valid string [type=string_type, input_value=123, input_type=int]\n '''\n ```\n\n You can hide the input value and type by setting the `hide_input_in_errors` config to `True`.\n\n ```py\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n class Model(BaseModel):\n a: str\n model_config = ConfigDict(hide_input_in_errors=True)\n\n try:\n Model(a=123)\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n a\n Input should be a valid string [type=string_type]\n '''\n ```\n \"\"\"\n\n defer_build: bool\n \"\"\"\n Whether to defer model validator and serializer construction until the first model validation.\n\n This can be useful to avoid the overhead of building models which are only\n used nested within other models, or when you want to manually define type namespace via\n [`Model.model_rebuild(_types_namespace=...)`][pydantic.BaseModel.model_rebuild]. Defaults to False.\n \"\"\"\n\n plugin_settings: dict[str, object] | None\n \"\"\"A `dict` of settings for plugins. Defaults to `None`.\n\n See [Pydantic Plugins](../concepts/plugins.md) for details.\n \"\"\"\n\n schema_generator: type[_GenerateSchema] | None\n \"\"\"\n A custom core schema generator class to use when generating JSON schemas.\n Useful if you want to change the way types are validated across an entire model/schema. Defaults to `None`.\n\n The `GenerateSchema` interface is subject to change, currently only the `string_schema` method is public.\n\n See [#6737](https://github.com/pydantic/pydantic/pull/6737) for details.\n \"\"\"\n\n json_schema_serialization_defaults_required: bool\n \"\"\"\n Whether fields with default values should be marked as required in the serialization schema. Defaults to `False`.\n\n This ensures that the serialization schema will reflect the fact a field with a default will always be present\n when serializing the model, even though it is not required for validation.\n\n However, there are scenarios where this may be undesirable — in particular, if you want to share the schema\n between validation and serialization, and don't mind fields with defaults being marked as not required during\n serialization. See [#7209](https://github.com/pydantic/pydantic/issues/7209) for more details.\n\n ```py\n from pydantic import BaseModel, ConfigDict\n\n class Model(BaseModel):\n a: str = 'a'\n\n model_config = ConfigDict(json_schema_serialization_defaults_required=True)\n\n print(Model.model_json_schema(mode='validation'))\n '''\n {\n 'properties': {'a': {'default': 'a', 'title': 'A', 'type': 'string'}},\n 'title': 'Model',\n 'type': 'object',\n }\n '''\n print(Model.model_json_schema(mode='serialization'))\n '''\n {\n 'properties': {'a': {'default': 'a', 'title': 'A', 'type': 'string'}},\n 'required': ['a'],\n 'title': 'Model',\n 'type': 'object',\n }\n '''\n ```\n \"\"\"\n\n json_schema_mode_override: Literal['validation', 'serialization', None]\n \"\"\"\n If not `None`, the specified mode will be used to generate the JSON schema regardless of what `mode` was passed to\n the function call. Defaults to `None`.\n\n This provides a way to force the JSON schema generation to reflect a specific mode, e.g., to always use the\n validation schema.\n\n It can be useful when using frameworks (such as FastAPI) that may generate different schemas for validation\n and serialization that must both be referenced from the same schema; when this happens, we automatically append\n `-Input` to the definition reference for the validation schema and `-Output` to the definition reference for the\n serialization schema. By specifying a `json_schema_mode_override` though, this prevents the conflict between\n the validation and serialization schemas (since both will use the specified schema), and so prevents the suffixes\n from being added to the definition references.\n\n ```py\n from pydantic import BaseModel, ConfigDict, Json\n\n class Model(BaseModel):\n a: Json[int] # requires a string to validate, but will dump an int\n\n print(Model.model_json_schema(mode='serialization'))\n '''\n {\n 'properties': {'a': {'title': 'A', 'type': 'integer'}},\n 'required': ['a'],\n 'title': 'Model',\n 'type': 'object',\n }\n '''\n\n class ForceInputModel(Model):\n # the following ensures that even with mode='serialization', we\n # will get the schema that would be generated for validation.\n model_config = ConfigDict(json_schema_mode_override='validation')\n\n print(ForceInputModel.model_json_schema(mode='serialization'))\n '''\n {\n 'properties': {\n 'a': {\n 'contentMediaType': 'application/json',\n 'contentSchema': {'type': 'integer'},\n 'title': 'A',\n 'type': 'string',\n }\n },\n 'required': ['a'],\n 'title': 'ForceInputModel',\n 'type': 'object',\n }\n '''\n ```\n \"\"\"\n\n coerce_numbers_to_str: bool\n \"\"\"\n If `True`, enables automatic coercion of any `Number` type to `str` in \"lax\" (non-strict) mode. Defaults to `False`.\n\n Pydantic doesn't allow number types (`int`, `float`, `Decimal`) to be coerced as type `str` by default.\n\n ```py\n from decimal import Decimal\n\n from pydantic import BaseModel, ConfigDict, ValidationError\n\n class Model(BaseModel):\n value: str\n\n try:\n print(Model(value=42))\n except ValidationError as e:\n print(e)\n '''\n 1 validation error for Model\n value\n Input should be a valid string [type=string_type, input_value=42, input_type=int]\n '''\n\n class Model(BaseModel):\n model_config = ConfigDict(coerce_numbers_to_str=True)\n\n value: str\n\n repr(Model(value=42).value)\n #> \"42\"\n repr(Model(value=42.13).value)\n #> \"42.13\"\n repr(Model(value=Decimal('42.13')).value)\n #> \"42.13\"\n ```\n \"\"\"" }, { "identifier": "DEFAULT_REF_TEMPLATE", "path": "backend/venv/lib/python3.10/site-packages/pydantic/json_schema.py", "snippet": "_MODE_TITLE_MAPPING: dict[JsonSchemaMode, str] = {'validation': 'Input', 'serialization': 'Output'}\nDEFAULT_REF_TEMPLATE = '#/$defs/{model}'\ndef update_json_schema(schema: JsonSchemaValue, updates: dict[str, Any]) -> JsonSchemaValue:\n def from_prioritized_choices(\n prioritized_choices: dict[DefsRef, list[DefsRef]],\n defs_to_json: dict[DefsRef, JsonRef],\n definitions: dict[DefsRef, JsonSchemaValue],\n ) -> _DefinitionsRemapping:\n def remap_defs_ref(self, ref: DefsRef) -> DefsRef:\n def remap_json_ref(self, ref: JsonRef) -> JsonRef:\n def remap_json_schema(self, schema: Any) -> Any:\n def __init__(self, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE):\n def _config(self) -> _config.ConfigWrapper:\n def mode(self) -> JsonSchemaMode:\n def build_schema_type_to_method(\n self,\n ) -> dict[CoreSchemaOrFieldType, Callable[[CoreSchemaOrField], JsonSchemaValue]]:\n def generate_definitions(\n self, inputs: Sequence[tuple[JsonSchemaKeyT, JsonSchemaMode, core_schema.CoreSchema]]\n ) -> tuple[dict[tuple[JsonSchemaKeyT, JsonSchemaMode], JsonSchemaValue], dict[DefsRef, JsonSchemaValue]]:\n def generate(self, schema: CoreSchema, mode: JsonSchemaMode = 'validation') -> JsonSchemaValue:\n def generate_inner(self, schema: CoreSchemaOrField) -> JsonSchemaValue: # noqa: C901\n def populate_defs(core_schema: CoreSchema, json_schema: JsonSchemaValue) -> JsonSchemaValue:\n def convert_to_all_of(json_schema: JsonSchemaValue) -> JsonSchemaValue:\n def handler_func(schema_or_field: CoreSchemaOrField) -> JsonSchemaValue:\n def new_handler_func(\n schema_or_field: CoreSchemaOrField,\n current_handler: GetJsonSchemaHandler = current_handler,\n js_modify_function: GetJsonSchemaFunction = js_modify_function,\n ) -> JsonSchemaValue:\n def new_handler_func(\n schema_or_field: CoreSchemaOrField,\n current_handler: GetJsonSchemaHandler = current_handler,\n js_modify_function: GetJsonSchemaFunction = js_modify_function,\n ) -> JsonSchemaValue:\n def any_schema(self, schema: core_schema.AnySchema) -> JsonSchemaValue:\n def none_schema(self, schema: core_schema.NoneSchema) -> JsonSchemaValue:\n def bool_schema(self, schema: core_schema.BoolSchema) -> JsonSchemaValue:\n def int_schema(self, schema: core_schema.IntSchema) -> JsonSchemaValue:\n def float_schema(self, schema: core_schema.FloatSchema) -> JsonSchemaValue:\n def decimal_schema(self, schema: core_schema.DecimalSchema) -> JsonSchemaValue:\n def str_schema(self, schema: core_schema.StringSchema) -> JsonSchemaValue:\n def bytes_schema(self, schema: core_schema.BytesSchema) -> JsonSchemaValue:\n def date_schema(self, schema: core_schema.DateSchema) -> JsonSchemaValue:\n def time_schema(self, schema: core_schema.TimeSchema) -> JsonSchemaValue:\n def datetime_schema(self, schema: core_schema.DatetimeSchema) -> JsonSchemaValue:\n def timedelta_schema(self, schema: core_schema.TimedeltaSchema) -> JsonSchemaValue:\n def literal_schema(self, schema: core_schema.LiteralSchema) -> JsonSchemaValue:\n def is_instance_schema(self, schema: core_schema.IsInstanceSchema) -> JsonSchemaValue:\n def is_subclass_schema(self, schema: core_schema.IsSubclassSchema) -> JsonSchemaValue:\n def callable_schema(self, schema: core_schema.CallableSchema) -> JsonSchemaValue:\n def list_schema(self, schema: core_schema.ListSchema) -> JsonSchemaValue:\n def tuple_positional_schema(self, schema: core_schema.TuplePositionalSchema) -> JsonSchemaValue:\n def tuple_variable_schema(self, schema: core_schema.TupleVariableSchema) -> JsonSchemaValue:\n def set_schema(self, schema: core_schema.SetSchema) -> JsonSchemaValue:\n def frozenset_schema(self, schema: core_schema.FrozenSetSchema) -> JsonSchemaValue:\n def _common_set_schema(self, schema: core_schema.SetSchema | core_schema.FrozenSetSchema) -> JsonSchemaValue:\n def generator_schema(self, schema: core_schema.GeneratorSchema) -> JsonSchemaValue:\n def dict_schema(self, schema: core_schema.DictSchema) -> JsonSchemaValue:\n def _function_schema(\n self,\n schema: _core_utils.AnyFunctionSchema,\n ) -> JsonSchemaValue:\n def function_before_schema(self, schema: core_schema.BeforeValidatorFunctionSchema) -> JsonSchemaValue:\n def function_after_schema(self, schema: core_schema.AfterValidatorFunctionSchema) -> JsonSchemaValue:\n def function_plain_schema(self, schema: core_schema.PlainValidatorFunctionSchema) -> JsonSchemaValue:\n def function_wrap_schema(self, schema: core_schema.WrapValidatorFunctionSchema) -> JsonSchemaValue:\n def default_schema(self, schema: core_schema.WithDefaultSchema) -> JsonSchemaValue:\n def nullable_schema(self, schema: core_schema.NullableSchema) -> JsonSchemaValue:\n def union_schema(self, schema: core_schema.UnionSchema) -> JsonSchemaValue:\n def tagged_union_schema(self, schema: core_schema.TaggedUnionSchema) -> JsonSchemaValue:\n def _extract_discriminator(\n self, schema: core_schema.TaggedUnionSchema, one_of_choices: list[_JsonDict]\n ) -> str | None:\n def chain_schema(self, schema: core_schema.ChainSchema) -> JsonSchemaValue:\n def lax_or_strict_schema(self, schema: core_schema.LaxOrStrictSchema) -> JsonSchemaValue:\n def json_or_python_schema(self, schema: core_schema.JsonOrPythonSchema) -> JsonSchemaValue:\n def typed_dict_schema(self, schema: core_schema.TypedDictSchema) -> JsonSchemaValue:\n def _name_required_computed_fields(\n computed_fields: list[ComputedField],\n ) -> list[tuple[str, bool, core_schema.ComputedField]]:\n def _named_required_fields_schema(\n self, named_required_fields: Sequence[tuple[str, bool, CoreSchemaField]]\n ) -> JsonSchemaValue:\n def _get_alias_name(self, field: CoreSchemaField, name: str) -> str:\n def typed_dict_field_schema(self, schema: core_schema.TypedDictField) -> JsonSchemaValue:\n def dataclass_field_schema(self, schema: core_schema.DataclassField) -> JsonSchemaValue:\n def model_field_schema(self, schema: core_schema.ModelField) -> JsonSchemaValue:\n def computed_field_schema(self, schema: core_schema.ComputedField) -> JsonSchemaValue:\n def model_schema(self, schema: core_schema.ModelSchema) -> JsonSchemaValue:\n def _update_class_schema(\n self,\n json_schema: JsonSchemaValue,\n title: str | None,\n extra: Literal['allow', 'ignore', 'forbid'] | None,\n cls: type[Any],\n json_schema_extra: dict[str, Any] | JsonSchemaExtraCallable | None,\n ) -> JsonSchemaValue:\n def resolve_schema_to_update(self, json_schema: JsonSchemaValue) -> JsonSchemaValue:\n def model_fields_schema(self, schema: core_schema.ModelFieldsSchema) -> JsonSchemaValue:\n def field_is_present(self, field: CoreSchemaField) -> bool:\n def field_is_required(\n self,\n field: core_schema.ModelField | core_schema.DataclassField | core_schema.TypedDictField,\n total: bool,\n ) -> bool:\n def dataclass_args_schema(self, schema: core_schema.DataclassArgsSchema) -> JsonSchemaValue:\n def dataclass_schema(self, schema: core_schema.DataclassSchema) -> JsonSchemaValue:\n def arguments_schema(self, schema: core_schema.ArgumentsSchema) -> JsonSchemaValue:\n def kw_arguments_schema(\n self, arguments: list[core_schema.ArgumentsParameter], var_kwargs_schema: CoreSchema | None\n ) -> JsonSchemaValue:\n def p_arguments_schema(\n self, arguments: list[core_schema.ArgumentsParameter], var_args_schema: CoreSchema | None\n ) -> JsonSchemaValue:\n def get_argument_name(self, argument: core_schema.ArgumentsParameter) -> str:\n def call_schema(self, schema: core_schema.CallSchema) -> JsonSchemaValue:\n def custom_error_schema(self, schema: core_schema.CustomErrorSchema) -> JsonSchemaValue:\n def json_schema(self, schema: core_schema.JsonSchema) -> JsonSchemaValue:\n def url_schema(self, schema: core_schema.UrlSchema) -> JsonSchemaValue:\n def multi_host_url_schema(self, schema: core_schema.MultiHostUrlSchema) -> JsonSchemaValue:\n def uuid_schema(self, schema: core_schema.UuidSchema) -> JsonSchemaValue:\n def definitions_schema(self, schema: core_schema.DefinitionsSchema) -> JsonSchemaValue:\n def definition_ref_schema(self, schema: core_schema.DefinitionReferenceSchema) -> JsonSchemaValue:\n def ser_schema(\n self, schema: core_schema.SerSchema | core_schema.IncExSeqSerSchema | core_schema.IncExDictSerSchema\n ) -> JsonSchemaValue | None:\n def get_title_from_name(self, name: str) -> str:\n def field_title_should_be_set(self, schema: CoreSchemaOrField) -> bool:\n def normalize_name(self, name: str) -> str:\n def get_defs_ref(self, core_mode_ref: CoreModeRef) -> DefsRef:\n def get_cache_defs_ref_schema(self, core_ref: CoreRef) -> tuple[DefsRef, JsonSchemaValue]:\n def handle_ref_overrides(self, json_schema: JsonSchemaValue) -> JsonSchemaValue:\n def get_schema_from_definitions(self, json_ref: JsonRef) -> JsonSchemaValue | None:\n def encode_default(self, dft: Any) -> Any:\n def update_with_validations(\n self, json_schema: JsonSchemaValue, core_schema: CoreSchema, mapping: dict[str, str]\n ) -> None:\n def get_flattened_anyof(self, schemas: list[JsonSchemaValue]) -> JsonSchemaValue:\n def get_json_ref_counts(self, json_schema: JsonSchemaValue) -> dict[JsonRef, int]:\n def _add_json_refs(schema: Any) -> None:\n def handle_invalid_for_json_schema(self, schema: CoreSchemaOrField, error_info: str) -> JsonSchemaValue:\n def emit_warning(self, kind: JsonSchemaWarningKind, detail: str) -> None:\n def render_warning_message(self, kind: JsonSchemaWarningKind, detail: str) -> str | None:\n def _build_definitions_remapping(self) -> _DefinitionsRemapping:\n def _garbage_collect_definitions(self, schema: JsonSchemaValue) -> None:\ndef model_json_schema(\n cls: type[BaseModel] | type[PydanticDataclass],\n by_alias: bool = True,\n ref_template: str = DEFAULT_REF_TEMPLATE,\n schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema,\n mode: JsonSchemaMode = 'validation',\n) -> dict[str, Any]:\ndef models_json_schema(\n models: Sequence[tuple[type[BaseModel] | type[PydanticDataclass], JsonSchemaMode]],\n *,\n by_alias: bool = True,\n title: str | None = None,\n description: str | None = None,\n ref_template: str = DEFAULT_REF_TEMPLATE,\n schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema,\n) -> tuple[dict[tuple[type[BaseModel] | type[PydanticDataclass], JsonSchemaMode], JsonSchemaValue], JsonSchemaValue]:\ndef _deduplicate_schemas(schemas: Iterable[_JsonDict]) -> list[_JsonDict]:\ndef _make_json_hashable(value: _Json) -> _HashableJson:\ndef _sort_json_schema(value: JsonSchemaValue, parent_key: str | None = None) -> JsonSchemaValue:\n def __get_pydantic_json_schema__(\n self, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler\n ) -> JsonSchemaValue:\n def __hash__(self) -> int:\n def __get_pydantic_json_schema__(\n self, core_schema: core_schema.CoreSchema, handler: GetJsonSchemaHandler\n ) -> JsonSchemaValue:\n def __hash__(self) -> int:\ndef _get_all_json_refs(item: Any) -> set[JsonRef]:\n def __class_getitem__(cls, item: AnyType) -> AnyType:\n def __get_pydantic_json_schema__(\n self, core_schema: CoreSchema, handler: GetJsonSchemaHandler\n ) -> JsonSchemaValue:\n def __hash__(self) -> int:\ndef _get_typed_dict_config(schema: core_schema.TypedDictSchema) -> ConfigDict:\nclass PydanticJsonSchemaWarning(UserWarning):\nclass _DefinitionsRemapping:\nclass GenerateJsonSchema:\n class ValidationsMapping:\nclass WithJsonSchema:\nclass Examples:\n class SkipJsonSchema:" }, { "identifier": "create_schema_validator", "path": "backend/venv/lib/python3.10/site-packages/pydantic/plugin/_schema_validator.py", "snippet": "def create_schema_validator(\n schema: CoreSchema, config: CoreConfig | None = None, plugin_settings: dict[str, Any] | None = None\n) -> SchemaValidator:\n \"\"\"Create a `SchemaValidator` or `PluggableSchemaValidator` if plugins are installed.\n\n Returns:\n If plugins are installed then return `PluggableSchemaValidator`, otherwise return `SchemaValidator`.\n \"\"\"\n from ._loader import get_plugins\n\n plugins = get_plugins()\n if plugins:\n return PluggableSchemaValidator(schema, config, plugins, plugin_settings or {}) # type: ignore\n else:\n return SchemaValidator(schema, config)" } ]
import sys from dataclasses import is_dataclass from typing import TYPE_CHECKING, Any, Dict, Generic, Iterable, Set, TypeVar, Union, overload from pydantic_core import CoreSchema, SchemaSerializer, SchemaValidator, Some from typing_extensions import Literal, is_typeddict from pydantic.errors import PydanticUserError from pydantic.main import BaseModel from ._internal import _config, _core_utils, _discriminated_union, _generate_schema, _typing_extra from .config import ConfigDict from .json_schema import ( DEFAULT_REF_TEMPLATE, GenerateJsonSchema, JsonSchemaKeyT, JsonSchemaMode, JsonSchemaValue, ) from .plugin._schema_validator import create_schema_validator
15,770
def validate_strings(self, __obj: Any, *, strict: bool | None = None, context: dict[str, Any] | None = None) -> T: """Validate object contains string data against the model. Args: __obj: The object contains string data to validate. strict: Whether to strictly check types. context: Additional context to use during validation. Returns: The validated object. """ return self.validator.validate_strings(__obj, strict=strict, context=context) def get_default_value(self, *, strict: bool | None = None, context: dict[str, Any] | None = None) -> Some[T] | None: """Get the default value for the wrapped type. Args: strict: Whether to strictly check types. context: Additional context to pass to the validator. Returns: The default value wrapped in a `Some` if there is one or None if not. """ return self.validator.get_default_value(strict=strict, context=context) def dump_python( self, __instance: T, *, mode: Literal['json', 'python'] = 'python', include: IncEx | None = None, exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool = True, ) -> Any: """Dump an instance of the adapted type to a Python object. Args: __instance: The Python object to serialize. mode: The output format. include: Fields to include in the output. exclude: Fields to exclude from the output. by_alias: Whether to use alias names for field names. exclude_unset: Whether to exclude unset fields. exclude_defaults: Whether to exclude fields with default values. exclude_none: Whether to exclude fields with None values. round_trip: Whether to output the serialized data in a way that is compatible with deserialization. warnings: Whether to display serialization warnings. Returns: The serialized object. """ return self.serializer.to_python( __instance, mode=mode, by_alias=by_alias, include=include, exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, round_trip=round_trip, warnings=warnings, ) def dump_json( self, __instance: T, *, indent: int | None = None, include: IncEx | None = None, exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool = True, ) -> bytes: """Serialize an instance of the adapted type to JSON. Args: __instance: The instance to be serialized. indent: Number of spaces for JSON indentation. include: Fields to include. exclude: Fields to exclude. by_alias: Whether to use alias names for field names. exclude_unset: Whether to exclude unset fields. exclude_defaults: Whether to exclude fields with default values. exclude_none: Whether to exclude fields with a value of `None`. round_trip: Whether to serialize and deserialize the instance to ensure round-tripping. warnings: Whether to emit serialization warnings. Returns: The JSON representation of the given instance as bytes. """ return self.serializer.to_json( __instance, indent=indent, include=include, exclude=exclude, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, round_trip=round_trip, warnings=warnings, ) def json_schema( self, *, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE, schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema,
""" You may have types that are not `BaseModel`s that you want to validate data against. Or you may want to validate a `List[SomeModel]`, or dump it to JSON. For use cases like this, Pydantic provides [`TypeAdapter`][pydantic.type_adapter.TypeAdapter], which can be used for type validation, serialization, and JSON schema generation without creating a [`BaseModel`][pydantic.main.BaseModel]. A [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] instance exposes some of the functionality from [`BaseModel`][pydantic.main.BaseModel] instance methods for types that do not have such methods (such as dataclasses, primitive types, and more): ```py from typing import List from typing_extensions import TypedDict from pydantic import TypeAdapter, ValidationError class User(TypedDict): name: str id: int UserListValidator = TypeAdapter(List[User]) print(repr(UserListValidator.validate_python([{'name': 'Fred', 'id': '3'}]))) #> [{'name': 'Fred', 'id': 3}] try: UserListValidator.validate_python( [{'name': 'Fred', 'id': 'wrong', 'other': 'no'}] ) except ValidationError as e: print(e) ''' 1 validation error for list[typed-dict] 0.id Input should be a valid integer, unable to parse string as an integer [type=int_parsing, input_value='wrong', input_type=str] ''' ``` Note: Despite some overlap in use cases with [`RootModel`][pydantic.root_model.RootModel], [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] should not be used as a type annotation for specifying fields of a `BaseModel`, etc. ## Parsing data into a specified type [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] can be used to apply the parsing logic to populate Pydantic models in a more ad-hoc way. This function behaves similarly to [`BaseModel.model_validate`][pydantic.main.BaseModel.model_validate], but works with arbitrary Pydantic-compatible types. This is especially useful when you want to parse results into a type that is not a direct subclass of [`BaseModel`][pydantic.main.BaseModel]. For example: ```py from typing import List from pydantic import BaseModel, TypeAdapter class Item(BaseModel): id: int name: str # `item_data` could come from an API call, eg., via something like: # item_data = requests.get('https://my-api.com/items').json() item_data = [{'id': 1, 'name': 'My Item'}] items = TypeAdapter(List[Item]).validate_python(item_data) print(items) #> [Item(id=1, name='My Item')] ``` [`TypeAdapter`][pydantic.type_adapter.TypeAdapter] is capable of parsing data into any of the types Pydantic can handle as fields of a [`BaseModel`][pydantic.main.BaseModel]. """ # noqa: D212 from __future__ import annotations as _annotations T = TypeVar('T') if TYPE_CHECKING: # should be `set[int] | set[str] | dict[int, IncEx] | dict[str, IncEx] | None`, but mypy can't cope IncEx = Union[Set[int], Set[str], Dict[int, Any], Dict[str, Any]] def _get_schema(type_: Any, config_wrapper: _config.ConfigWrapper, parent_depth: int) -> CoreSchema: """`BaseModel` uses its own `__module__` to find out where it was defined and then look for symbols to resolve forward references in those globals. On the other hand this function can be called with arbitrary objects, including type aliases where `__module__` (always `typing.py`) is not useful. So instead we look at the globals in our parent stack frame. This works for the case where this function is called in a module that has the target of forward references in its scope, but does not work for more complex cases. For example, take the following: a.py ```python from typing import Dict, List IntList = List[int] OuterDict = Dict[str, 'IntList'] ``` b.py ```python test="skip" from a import OuterDict from pydantic import TypeAdapter IntList = int # replaces the symbol the forward reference is looking for v = TypeAdapter(OuterDict) v({'x': 1}) # should fail but doesn't ``` If OuterDict were a `BaseModel`, this would work because it would resolve the forward reference within the `a.py` namespace. But `TypeAdapter(OuterDict)` can't know what module OuterDict came from. In other words, the assumption that _all_ forward references exist in the module we are being called from is not technically always true. Although most of the time it is and it works fine for recursive models and such, `BaseModel`'s behavior isn't perfect either and _can_ break in similar ways, so there is no right or wrong between the two. But at the very least this behavior is _subtly_ different from `BaseModel`'s. """ local_ns = _typing_extra.parent_frame_namespace(parent_depth=parent_depth) global_ns = sys._getframe(max(parent_depth - 1, 1)).f_globals.copy() global_ns.update(local_ns or {}) gen = _generate_schema.GenerateSchema(config_wrapper, types_namespace=global_ns, typevars_map={}) schema = gen.generate_schema(type_) schema = gen.collect_definitions(schema) return schema def _getattr_no_parents(obj: Any, attribute: str) -> Any: """Returns the attribute value without attempting to look up attributes from parent types.""" if hasattr(obj, '__dict__'): try: return obj.__dict__[attribute] except KeyError: pass slots = getattr(obj, '__slots__', None) if slots is not None and attribute in slots: return getattr(obj, attribute) else: raise AttributeError(attribute) class TypeAdapter(Generic[T]): """Type adapters provide a flexible way to perform validation and serialization based on a Python type. A `TypeAdapter` instance exposes some of the functionality from `BaseModel` instance methods for types that do not have such methods (such as dataclasses, primitive types, and more). Note that `TypeAdapter` is not an actual type, so you cannot use it in type annotations. Attributes: core_schema: The core schema for the type. validator (SchemaValidator): The schema validator for the type. serializer: The schema serializer for the type. """ if TYPE_CHECKING: @overload def __new__(cls, __type: type[T], *, config: ConfigDict | None = ...) -> TypeAdapter[T]: ... # this overload is for non-type things like Union[int, str] # Pyright currently handles this "correctly", but MyPy understands this as TypeAdapter[object] # so an explicit type cast is needed @overload def __new__(cls, __type: T, *, config: ConfigDict | None = ...) -> TypeAdapter[T]: ... def __new__(cls, __type: Any, *, config: ConfigDict | None = ...) -> TypeAdapter[T]: """A class representing the type adapter.""" raise NotImplementedError @overload def __init__(self, type: type[T], *, config: ConfigDict | None = None, _parent_depth: int = 2) -> None: ... # this overload is for non-type things like Union[int, str] # Pyright currently handles this "correctly", but MyPy understands this as TypeAdapter[object] # so an explicit type cast is needed @overload def __init__(self, type: T, *, config: ConfigDict | None = None, _parent_depth: int = 2) -> None: ... def __init__(self, type: Any, *, config: ConfigDict | None = None, _parent_depth: int = 2) -> None: """Initializes the TypeAdapter object.""" config_wrapper = _config.ConfigWrapper(config) try: type_has_config = issubclass(type, BaseModel) or is_dataclass(type) or is_typeddict(type) except TypeError: # type is not a class type_has_config = False if type_has_config and config is not None: raise PydanticUserError( 'Cannot use `config` when the type is a BaseModel, dataclass or TypedDict.' ' These types can have their own config and setting the config via the `config`' ' parameter to TypeAdapter will not override it, thus the `config` you passed to' ' TypeAdapter becomes meaningless, which is probably not what you want.', code='type-adapter-config-unused', ) core_schema: CoreSchema try: core_schema = _getattr_no_parents(type, '__pydantic_core_schema__') except AttributeError: core_schema = _get_schema(type, config_wrapper, parent_depth=_parent_depth + 1) core_schema = _discriminated_union.apply_discriminators(_core_utils.simplify_schema_references(core_schema)) core_schema = _core_utils.validate_core_schema(core_schema) core_config = config_wrapper.core_config(None) validator: SchemaValidator try: validator = _getattr_no_parents(type, '__pydantic_validator__') except AttributeError: validator = create_schema_validator(core_schema, core_config, config_wrapper.plugin_settings) serializer: SchemaSerializer try: serializer = _getattr_no_parents(type, '__pydantic_serializer__') except AttributeError: serializer = SchemaSerializer(core_schema, core_config) self.core_schema = core_schema self.validator = validator self.serializer = serializer def validate_python( self, __object: Any, *, strict: bool | None = None, from_attributes: bool | None = None, context: dict[str, Any] | None = None, ) -> T: """Validate a Python object against the model. Args: __object: The Python object to validate against the model. strict: Whether to strictly check types. from_attributes: Whether to extract data from object attributes. context: Additional context to pass to the validator. Returns: The validated object. """ return self.validator.validate_python(__object, strict=strict, from_attributes=from_attributes, context=context) def validate_json( self, __data: str | bytes, *, strict: bool | None = None, context: dict[str, Any] | None = None ) -> T: """Validate a JSON string or bytes against the model. Args: __data: The JSON data to validate against the model. strict: Whether to strictly check types. context: Additional context to use during validation. Returns: The validated object. """ return self.validator.validate_json(__data, strict=strict, context=context) def validate_strings(self, __obj: Any, *, strict: bool | None = None, context: dict[str, Any] | None = None) -> T: """Validate object contains string data against the model. Args: __obj: The object contains string data to validate. strict: Whether to strictly check types. context: Additional context to use during validation. Returns: The validated object. """ return self.validator.validate_strings(__obj, strict=strict, context=context) def get_default_value(self, *, strict: bool | None = None, context: dict[str, Any] | None = None) -> Some[T] | None: """Get the default value for the wrapped type. Args: strict: Whether to strictly check types. context: Additional context to pass to the validator. Returns: The default value wrapped in a `Some` if there is one or None if not. """ return self.validator.get_default_value(strict=strict, context=context) def dump_python( self, __instance: T, *, mode: Literal['json', 'python'] = 'python', include: IncEx | None = None, exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool = True, ) -> Any: """Dump an instance of the adapted type to a Python object. Args: __instance: The Python object to serialize. mode: The output format. include: Fields to include in the output. exclude: Fields to exclude from the output. by_alias: Whether to use alias names for field names. exclude_unset: Whether to exclude unset fields. exclude_defaults: Whether to exclude fields with default values. exclude_none: Whether to exclude fields with None values. round_trip: Whether to output the serialized data in a way that is compatible with deserialization. warnings: Whether to display serialization warnings. Returns: The serialized object. """ return self.serializer.to_python( __instance, mode=mode, by_alias=by_alias, include=include, exclude=exclude, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, round_trip=round_trip, warnings=warnings, ) def dump_json( self, __instance: T, *, indent: int | None = None, include: IncEx | None = None, exclude: IncEx | None = None, by_alias: bool = False, exclude_unset: bool = False, exclude_defaults: bool = False, exclude_none: bool = False, round_trip: bool = False, warnings: bool = True, ) -> bytes: """Serialize an instance of the adapted type to JSON. Args: __instance: The instance to be serialized. indent: Number of spaces for JSON indentation. include: Fields to include. exclude: Fields to exclude. by_alias: Whether to use alias names for field names. exclude_unset: Whether to exclude unset fields. exclude_defaults: Whether to exclude fields with default values. exclude_none: Whether to exclude fields with a value of `None`. round_trip: Whether to serialize and deserialize the instance to ensure round-tripping. warnings: Whether to emit serialization warnings. Returns: The JSON representation of the given instance as bytes. """ return self.serializer.to_json( __instance, indent=indent, include=include, exclude=exclude, by_alias=by_alias, exclude_unset=exclude_unset, exclude_defaults=exclude_defaults, exclude_none=exclude_none, round_trip=round_trip, warnings=warnings, ) def json_schema( self, *, by_alias: bool = True, ref_template: str = DEFAULT_REF_TEMPLATE, schema_generator: type[GenerateJsonSchema] = GenerateJsonSchema,
mode: JsonSchemaMode = 'validation',
6
2023-10-23 18:09:28+00:00
24k
zju3dv/nr_in_a_room
data_gen/batch_real_scene_neural_render.py
[ { "identifier": "read_json", "path": "utils/util.py", "snippet": "def read_json(fname):\n fname = Path(fname)\n with fname.open(\"rt\") as handle:\n return json.load(handle, object_hook=OrderedDict)" }, { "identifier": "read_yaml", "path": "utils/util.py", "snippet": "def read_yaml(fname):\n with open(fname, \"r\") as stream:\n return yaml.safe_load(stream)" }, { "identifier": "RoomOptimizer", "path": "optim/room_optimizer.py", "snippet": "class RoomOptimizer:\n def __init__(\n self,\n scale_factor: float,\n bg_scale_factor: float,\n bg_scene_center: list,\n img_wh: list,\n near: float,\n far: float,\n chunk: int,\n model_ckpt_path_dict: Dict[str, Any],\n config=None,\n scale_factor_dict: Dict[str, Any] = {},\n scene_info_path: str = None,\n scene_info_json_path: str = None,\n model_type=\"NeuS\",\n N_samples: int = 64,\n N_importance: int = 128,\n relation_info: Dict[str, Any] = {},\n output_path: str = None,\n prefix: str = \"\",\n active_instance_id: list = [46, 4, 9, 102],\n virtual_instance_id: list = [], # specific for edit (insert virtual to real) mode\n filter_door_and_window: bool = True,\n lr: float = 1e-2,\n N_optim_step: int = 500,\n adjust_lr_per_step: int = 150,\n optim_batch_size: int = 1024,\n use_amp: bool = False,\n extract_obj_bbox_from_neural_model: bool = False,\n ig_data_base_dir: str = \"data/ig_dataset_v1.0.1/\",\n mask_per_object: bool = False,\n bbox_ray_intersect: bool = True,\n bbox_enlarge: float = 0.1,\n optimize_light_env: bool = True,\n optimize_appearance_code: bool = False,\n use_light_from_image_attr: bool = False,\n use_appearance_from_image_attr: bool = False,\n optimize_option: list = [\n \"photometric_loss\",\n \"perceptual_loss\",\n \"z_axis_align_loss\",\n \"object_room_wall_attach\",\n \"object_room_floor_attach\",\n \"physical_violation\",\n \"object_object_attach\",\n ],\n ):\n # load config\n self.scene_info_path = scene_info_path\n self.scale_factor = scale_factor\n self.scale_factor_dict = scale_factor_dict\n self.bg_scale_factor = bg_scale_factor\n self.bg_scene_center = np.array(bg_scene_center)\n self.ig_data_base_dir = ig_data_base_dir\n self.mask_per_object = mask_per_object\n self.bbox_ray_intersect = bbox_ray_intersect\n self.bbox_enlarge = bbox_enlarge\n self.virtual_instance_id = virtual_instance_id\n\n self.img_wh = img_wh\n self.w = img_wh[0]\n self.h = img_wh[1]\n self.near = near\n self.far = far\n self.N_importance = N_importance\n self.N_samples = N_samples\n self.chunk = chunk\n self.lr = lr\n self.N_optim_step = N_optim_step\n self.adjust_lr_per_step = adjust_lr_per_step\n self.optim_batch_size = optim_batch_size\n self.use_amp = use_amp\n self.optimize_light_env = optimize_light_env\n self.optimize_appearance_code = optimize_appearance_code\n self.optimize_option = optimize_option\n self.config = config\n\n self.use_light_from_image_attr = use_light_from_image_attr\n if self.use_light_from_image_attr:\n print(\n \"WARNING: self.use_light_from_image_attr = True, using hard coded light env.\"\n )\n self.hard_coded_light_id = 0 # just for compatibility\n # self.hard_coded_light_id = 9 # probe_03 in 10 HDR multi_light training\n\n self.use_appearance_from_image_attr = use_appearance_from_image_attr\n if self.use_appearance_from_image_attr:\n print(\n \"WARNING: self.use_appearance_from_image_attr = True, using first frame appearance code.\"\n )\n self.hard_coded_appearance_frame_id = 0\n\n self.optimize_exposure = \"optimize_exposure\" in self.optimize_option\n\n # laod scene info\n if scene_info_json_path is None:\n scene_info_json_path = os.path.join(scene_info_path, \"data.json\")\n self.scene_meta = read_json(scene_info_json_path)\n\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n self.relation_info = relation_info\n\n self.model_type = model_type\n # self.load_model(\n # model_type, model_ckpt_path_dict[\"obj\"], model_ckpt_path_dict[\"bg\"]\n # )\n self.load_model_from_dict_path(model_type, model_ckpt_path_dict)\n\n self.reset_optimizable_parameters()\n\n if extract_obj_bbox_from_neural_model:\n self.extract_bounding_boxes_from_neural_model()\n\n if self.bbox_ray_intersect:\n self.prepare_bbox_ray_helper()\n\n self.set_output_path(output_path, prefix)\n\n print(\"RoomOptimizer initialize finished.\")\n\n def load_model_from_dict_path(self, model_type, model_ckpt_path_dict):\n assert model_type == \"NeuS\"\n self.models = {}\n self.image_attrs = {}\n\n # avoid duplicate loading\n self.models_cache = {}\n self.image_attrs_cache = {}\n\n print(\"loading model with instance_id\", self.active_instance_id)\n\n # print(model_ckpt_path_dict)\n for obj_id in self.active_instance_id:\n # identify ckpt_path\n if str(obj_id) in model_ckpt_path_dict:\n ckpt_info = model_ckpt_path_dict[str(obj_id)]\n elif obj_id == 0:\n assert (\n \"bg\" in model_ckpt_path_dict or \"0\" in model_ckpt_path_dict\n ), \"model_ckpt_path_dict missing background 'bg' or '0' ckpt\"\n ckpt_info = model_ckpt_path_dict.get(\"bg\", model_ckpt_path_dict[\"0\"])\n else:\n print(\n f\"Cannot find specific model for obj_id = {obj_id}, \\\n maybe config file is not compatible with given active_instance_id.\"\n )\n ckpt_info = model_ckpt_path_dict[\"obj\"]\n # load with cache\n ckpt_path, neus_conf = ckpt_info[\"path\"], ckpt_info[\"neus_conf\"]\n if ckpt_info not in self.models_cache:\n (\n self.models_cache[ckpt_path],\n self.image_attrs_cache[ckpt_path],\n ) = self.load_model_neus(ckpt_path, obj_id, neus_conf)\n self.models[f\"neus_{obj_id}\"] = self.models_cache[ckpt_path]\n self.image_attrs[str(obj_id)] = self.image_attrs_cache[ckpt_path]\n\n def load_model_nerf(self, ckpt_path):\n # TODO(ybbbbt): fix hard coding\n conf = {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n }\n nerf_coarse = NeRF_Object(conf)\n nerf_fine = NeRF_Object(conf)\n image_attributes = ImageAttributes(conf)\n load_ckpt(nerf_coarse, ckpt_path, model_name=\"nerf_coarse\")\n load_ckpt(nerf_fine, ckpt_path, model_name=\"nerf_fine\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n nerf_coarse = nerf_coarse.cuda().eval()\n nerf_fine = nerf_fine.cuda().eval()\n image_attributes = image_attributes.cuda().eval()\n\n models = {\n \"coarse\": nerf_coarse,\n \"fine\": nerf_fine,\n }\n\n embedding_xyz = Embedding(3, 10)\n embedding_dir = Embedding(3, 4)\n embeddings = {\n \"xyz\": embedding_xyz,\n \"dir\": embedding_dir,\n }\n return models, embeddings, image_attributes\n\n def load_model_neus(self, ckpt_path, obj_id, config_path=\"config/neus.yaml\"):\n conf = {\n \"model\": {\n \"N_max_objs\": 128,\n \"N_obj_embedding\": 64,\n },\n }\n if self.optimize_light_env:\n # conf[\"model\"].update({\"N_max_lights\": 128, \"N_light_embedding\": 16})\n conf[\"model\"].update({\"N_max_lights\": 1024, \"N_light_embedding\": 16})\n\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n conf[\"model\"].update(\n {\"N_max_appearance_frames\": 10000, \"N_appearance_embedding\": 16}\n )\n\n neus, render_kwargs_train, render_kwargs_test = get_model_neus(\n config_path=config_path, need_trainer=False, extra_conf=conf\n )\n self.render_kwargs_neus = render_kwargs_test\n image_attributes = ImageAttributes(conf[\"model\"])\n\n print(ckpt_path)\n load_ckpt(neus, ckpt_path, model_name=\"neus\")\n load_ckpt(image_attributes, ckpt_path, model_name=\"image_attributes\")\n\n if self.config is not None and (\n str(obj_id) in self.config.get(\"map_virtual_to_local\", {})\n ):\n # image_attributes.embedding_instance\n real_id_in_ckpt = self.config.map_virtual_to_local[str(obj_id)]\n image_attributes.embedding_instance.weight.requires_grad = False\n image_attributes.embedding_instance.weight[\n obj_id\n ] = image_attributes.embedding_instance.weight[real_id_in_ckpt]\n # ipdb.set_trace()\n\n neus.cuda().eval()\n image_attributes.cuda().eval()\n return neus, image_attributes\n\n def reset_optimizable_parameters(self):\n self.params = []\n self.relation_info = {}\n if self.optimize_light_env:\n self.initialize_light_code()\n\n if self.optimize_appearance_code:\n self.initialize_appearance_code()\n\n if self.optimize_exposure:\n self.initialize_autoexposure()\n\n def save_optimizable_parameters(self, path):\n all_param_dict = {}\n # all_param_dict[\"params\"] = self.params\n all_param_dict[\"relation_info\"] = self.relation_info\n all_param_dict[\"object_pose_dict\"] = copy.deepcopy(self.object_pose_dict)\n all_param_dict[\"active_instance_id\"] = copy.deepcopy(self.active_instance_id)\n if self.optimize_light_env:\n all_param_dict[\"light_code\"] = copy.deepcopy(self.light_code_dict)\n if self.optimize_appearance_code:\n all_param_dict[\"appearance_code\"] = copy.deepcopy(self.appearance_code_dict)\n if self.optimize_exposure:\n all_param_dict[\"exposure\"] = copy.deepcopy(self.autoexposure_param)\n torch.save(all_param_dict, path)\n\n def load_optimizable_parameters(self, path):\n all_param_dict = torch.load(path)\n # self.params = all_param_dict[\"params\"]\n self.relation_info = all_param_dict[\"relation_info\"]\n if len(self.virtual_instance_id) == 0: # not overwrite in edit mode\n self.active_instance_id = all_param_dict[\"active_instance_id\"]\n\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if len(self.virtual_instance_id) == 0: # not modify edit mode pose\n if hasattr(self, \"object_pose_dict\"):\n self.object_pose_dict.update(all_param_dict[\"object_pose_dict\"])\n else:\n self.object_pose_dict = all_param_dict[\"object_pose_dict\"]\n if self.optimize_light_env:\n self.light_code_dict = all_param_dict[\"light_code\"]\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n self.appearance_code_dict = all_param_dict[\"appearance_code\"]\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure and \"exposure\" in all_param_dict:\n self.autoexposure_param = all_param_dict[\"exposure\"]\n to_gpu(self.autoexposure_param)\n # ipdb.set_trace()\n\n def interpolate_light_env_from_states(self, path1, path2, interp):\n all_param_dict_1 = torch.load(path1)\n all_param_dict_2 = torch.load(path2)\n\n # self.params = all_param_dict[\"params\"]\n def to_gpu(code_dict):\n for k, v in code_dict.items():\n if isinstance(v, torch.Tensor):\n code_dict[k] = v.cuda()\n elif isinstance(v, dict):\n for k2, v2 in v.items():\n if isinstance(v2, torch.Tensor):\n code_dict[k][k2] = v2.cuda()\n\n if self.optimize_light_env:\n light_code_dict_1 = all_param_dict_1[\"light_code\"]\n light_code_dict_2 = all_param_dict_2[\"light_code\"]\n for k, v in self.light_code_dict.items():\n self.light_code_dict[k] = light_code_dict_1[\n k\n ] * interp + light_code_dict_2[k] * (1 - interp)\n to_gpu(self.light_code_dict)\n if self.optimize_appearance_code:\n appearance_code_dict_1 = all_param_dict_1[\"appearance_code\"]\n appearance_code_dict_2 = all_param_dict_2[\"appearance_code\"]\n for k, v in self.appearance_code_dict.items():\n self.appearance_code_dict[k] = appearance_code_dict_1[\n k\n ] * interp + appearance_code_dict_2[k] * (1 - interp)\n to_gpu(self.appearance_code_dict)\n if self.optimize_exposure:\n autoexposure_param_1 = all_param_dict_1[\"exposure\"]\n autoexposure_param_2 = all_param_dict_2[\"exposure\"]\n for k, v in self.autoexposure_param.items():\n self.autoexposure_param[k] = autoexposure_param_1[\n k\n ] * interp + autoexposure_param_2[k] * (1 - interp)\n to_gpu(self.autoexposure_param)\n\n def reset_active_instance_id(self, active_instance_id, filter_door_and_window=True):\n self.active_instance_id = active_instance_id\n if filter_door_and_window:\n self.filter_door_and_window()\n\n def set_output_path(self, output_path: str, prefix: str, with_timestamp=True):\n if output_path is not None:\n if with_timestamp:\n self.output_path = os.path.join(\n output_path, f\"rendered_{get_timestamp()}_{prefix}\"\n )\n else:\n self.output_path = os.path.join(output_path, f\"{prefix}\")\n os.makedirs(self.output_path, exist_ok=True)\n\n def filter_door_and_window(self):\n print(\"Filtering door and window objects.\")\n filtered_active_instance_id = []\n for obj_id in self.active_instance_id:\n if self.get_type_of_instance(obj_id) not in [\"door\", \"window\"]:\n filtered_active_instance_id += [obj_id]\n self.active_instance_id = filtered_active_instance_id\n\n def initialize_light_code(self):\n self.light_code_dict = {}\n for obj_id in self.active_instance_id:\n # light_code = torch.randn((16)).cuda()\n light_code = torch.zeros((16)).cuda()\n light_code.requires_grad = True\n self.params += [\n {\"params\": light_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.light_code_dict[str(obj_id)] = light_code\n\n def initialize_appearance_code(self):\n self.appearance_code_dict = {}\n for obj_id in self.active_instance_id:\n # appearance_code = torch.randn((16)).cuda()\n appearance_code = torch.zeros((16)).cuda()\n appearance_code.requires_grad = True\n self.params += [\n {\"params\": appearance_code, \"lr\": self.lr}\n ] # light code can be optimized with larger lr\n self.appearance_code_dict[str(obj_id)] = appearance_code\n\n def initialize_autoexposure(self):\n self.autoexposure_param = {}\n for obj_id in self.active_instance_id:\n # scale and shift\n autoexposure_param = torch.Tensor([1, 1, 1, 0, 0, 0]).cuda()\n autoexposure_param.requires_grad = True\n self.params += [\n {\"params\": autoexposure_param, \"lr\": self.lr * 0.1}\n ] # light code can be optimized with larger lr\n self.autoexposure_param[str(obj_id)] = autoexposure_param\n\n def get_scale_factor(self, obj_id):\n if obj_id == 0:\n return self.bg_scale_factor\n elif str(obj_id) in self.scale_factor_dict:\n return self.scale_factor_dict[str(obj_id)]\n else:\n return self.scale_factor\n\n def extract_bounding_boxes_from_neural_model(self):\n print(\"Extracting object bounding boxes from neural model...\")\n assert self.model_type == \"NeuS\"\n for obj_id in tqdm(self.active_instance_id):\n mesh = extract_mesh_from_neus(\n self.models[f\"neus_{obj_id}\"],\n self.image_attrs[str(obj_id)],\n obj_id,\n )\n bbox = mesh.get_axis_aligned_bounding_box()\n bound = np.array([bbox.min_bound, bbox.max_bound])\n size = (bound[1] - bound[0]) * self.get_scale_factor(obj_id)\n # update scene_meta\n for idx, obj_info in enumerate(self.scene_meta[\"objs\"]):\n if obj_info[\"id\"] == obj_id:\n self.scene_meta[\"objs\"][idx][\"bdb3d\"][\"size\"] = size.tolist()\n\n def prepare_bbox_ray_helper(self):\n # bbox ray helper dict\n self.bbox_ray_helper_dict = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n length = np.array(obj_meta_info[\"bbox3d\"][\"size\"])\n self.bbox_ray_helper_dict[str(obj_id)] = BBoxRayHelper(np.zeros(3), length)\n\n def generate_object_rays(\n self, rays_o_obj, rays_d_obj, obj_id, near=None, far=None, select_ind=None\n ):\n \"\"\"\n Generate object rays given rays_o, rays_d and obj_id\n Input:\n select_ind: only for masked rendering\n \"\"\"\n if obj_id == 0: # background\n return self.generate_bg_rays(rays_o_obj, rays_d_obj, near=near, far=far)\n if self.bbox_ray_intersect:\n # for object, rays_o and rays_d should lie in world scale (unscaled)\n bbox_mask, bbox_batch_near, bbox_batch_far = self.bbox_ray_helper_dict[\n str(obj_id)\n ].get_ray_bbox_intersections(\n rays_o_obj,\n rays_d_obj,\n self.get_scale_factor(obj_id),\n # bbox_enlarge=self.bbox_enlarge / self.get_scale_factor(obj_id),\n bbox_enlarge=self.bbox_enlarge, # in physical world\n )\n # for area which hits bbox, we use bbox hit near far\n # bbox_ray_helper has scale for us, do no need to rescale\n batch_near_obj, batch_far_obj = bbox_batch_near, bbox_batch_far\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n # for the invalid part, we use 0 as near far, which assume that (0, 0, 0) is empty\n batch_near_obj[~bbox_mask] = torch.zeros_like(batch_near_obj[~bbox_mask])\n batch_far_obj[~bbox_mask] = torch.zeros_like(batch_far_obj[~bbox_mask])\n else:\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_obj = (\n near\n / self.get_scale_factor(obj_id)\n * torch.ones_like(rays_o_obj[:, :1])\n )\n batch_far_obj = (\n far / self.get_scale_factor(obj_id) * torch.ones_like(rays_d_obj[:, :1])\n )\n rays_o_obj = rays_o_obj / self.get_scale_factor(obj_id)\n\n if self.mask_per_object:\n # mask out of bound rendering\n obj_mask = torch.from_numpy(self.instance_mask == obj_id).view(-1)\n obj_mask = obj_mask[select_ind]\n batch_near_obj[~obj_mask] = 0\n batch_far_obj[~obj_mask] = 0\n\n rays_obj = torch.cat(\n [rays_o_obj, rays_d_obj, batch_near_obj, batch_far_obj], 1\n ) # (H*W, 8)\n rays_obj = rays_obj.cuda()\n return rays_obj\n\n def generate_bg_rays(self, rays_o_bg, rays_d_bg, near=None, far=None):\n near = self.near if near is None else near\n far = self.far if far is None else far\n batch_near_bg = near / self.bg_scale_factor * torch.ones_like(rays_o_bg[:, :1])\n batch_far_bg = far / self.bg_scale_factor * torch.ones_like(rays_d_bg[:, :1])\n rays_o_bg = rays_o_bg / self.bg_scale_factor\n rays_bg = torch.cat(\n [rays_o_bg, rays_d_bg, batch_near_bg, batch_far_bg], 1\n ) # (H*W, 8)\n rays_bg = rays_bg.cuda()\n return rays_bg\n\n def batched_inference_multi(\n self,\n rays_list,\n obj_id_list,\n to_cpu=True,\n hit_test_only=False,\n need_normal=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=True,\n refine_edge=False,\n refine_edge_obj_ids=[],\n render_mask=False,\n # use_sphere_tracing=False,\n show_progress=False,\n **kwargs,\n ):\n \"\"\"Do batched inference on rays using chunk.\"\"\"\n B = rays_list[0].shape[0]\n results = defaultdict(list)\n for i in tqdm(range(0, B, self.chunk), disable=not show_progress):\n extra_chunk = dict()\n for k, v in kwargs.items():\n if isinstance(v, torch.Tensor) and \"autoexposure_\" not in k:\n extra_chunk[k] = v[i : i + self.chunk]\n else:\n extra_chunk[k] = v\n if self.model_type == \"NeRF\":\n rendered_ray_chunks = render_rays_multi(\n self.models,\n self.embeddings,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n self.N_samples,\n use_disp=False,\n perturb=0.001,\n # perturb=0.00,\n noise_std=0,\n N_importance=self.N_importance,\n chunk=self.chunk,\n white_back=True,\n individual_weight_for_coarse=True,\n obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n **extra_chunk,\n )\n elif self.model_type == \"NeuS\":\n rendered_ray_chunks = render_rays_multi_neus(\n self,\n self.models,\n [r[i : i + self.chunk] for r in rays_list],\n obj_id_list,\n noise_std=0,\n white_back=True,\n # white_back=False,\n # obj_bg_relative_scale=self.bg_scale_factor / self.scale_factor,\n hit_test_only=hit_test_only,\n need_normal=need_normal,\n use_sphere_tracing=use_sphere_tracing,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n refine_edge_obj_ids=refine_edge_obj_ids,\n render_mask=render_mask,\n extra_dict=extra_chunk,\n render_kwargs=self.render_kwargs_neus,\n )\n\n for k, v in rendered_ray_chunks.items():\n if to_cpu:\n results[k] += [v.cpu()]\n else:\n results[k] += [v]\n\n for k, v in results.items():\n results[k] = torch.cat(v, 0)\n return results\n\n def render_full_scene(\n self,\n pose: np.ndarray,\n idx: int,\n h: int,\n w: int,\n write_idx_on_image=True,\n return_raw_image=False,\n render_mask=False,\n refine_edge=False,\n use_sphere_tracing=True,\n safe_region_volume_rendering=False,\n show_progress=False,\n refine_edge_obj_ids=[],\n fovx_deg=0,\n ):\n extra_dict = dict()\n extra_dict[\"compute_3d_mask\"] = False\n extra_dict[\"is_eval\"] = True\n\n rays_list = []\n object_id_list = []\n\n if fovx_deg > 0:\n focal = (w / 2) / np.tan((fovx_deg / 2) / (180 / np.pi))\n print(\"focal =\", focal)\n directions = get_ray_directions(h, w, focal).cuda() # (h, w, 3)\n else:\n directions = get_ray_directions_equirectangular(h, w).cuda() # (h, w, 3)\n\n for obj_id in self.active_instance_id:\n # get object location\n # Two: object to world pose\n if obj_id == 0: # 0 denotes background\n Two = np.eye(4)\n Two[:3, 3] = self.bg_scene_center\n else: # other objects\n Two = torch.eye(4).cuda()\n Two[:3, :3] = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n Two[:3, 3] = self.object_pose_dict[str(obj_id)][\"trans\"]\n Two = Two.detach().cpu().numpy()\n # pose: Twc\n # we need: Toc\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n\n Toc = np.linalg.inv(Two) @ Twc\n\n Toc = torch.from_numpy(Toc).float().cuda()[:3, :4]\n rays_o, rays_d = get_rays(directions, Toc)\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id)\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr or obj_id in self.virtual_instance_id:\n if not hasattr(self, \"hard_code_light_id\"):\n self.hard_coded_light_id = 0\n extra_dict[\"embedding_light_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code and obj_id not in self.virtual_instance_id:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n\n # optimize exposure\n if self.optimize_exposure and obj_id not in self.virtual_instance_id:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n with torch.cuda.amp.autocast(enabled=True):\n with torch.no_grad():\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n use_sphere_tracing=use_sphere_tracing,\n # use_sphere_tracing=True,\n safe_region_volume_rendering=safe_region_volume_rendering,\n refine_edge=refine_edge,\n render_mask=render_mask,\n show_progress=show_progress,\n **extra_dict,\n )\n img = results[f\"rgb_fine\"]\n img_pred = np.clip(img.view(h, w, 3).cpu().numpy(), 0, 1)\n img_pred_ = (img_pred * 255).astype(np.uint8)\n\n if return_raw_image:\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0]\n .cpu()\n .numpy()\n .round()\n .astype(np.uint16)\n )\n return img_pred_, img_mask\n return img_pred_ # raw image in [h, w, 3] np.uint8\n\n if write_idx_on_image:\n img_pred_ = cv2.putText(\n img_pred_,\n \"Iter: {:03d}\".format(idx),\n (20, 20),\n cv2.FONT_HERSHEY_SIMPLEX,\n 0.7,\n (255, 0, 0),\n 2,\n )\n\n imageio.imwrite(\n os.path.join(self.output_path, f\"{idx:06d}.multi_obj.png\"), img_pred_\n )\n if render_mask:\n img_mask = results[f\"rendered_instance_mask\"]\n img_mask = (\n img_mask.view(h, w, 3)[:, :, 0].cpu().numpy().round().astype(np.uint16)\n )\n cv2.imwrite(os.path.join(self.output_path, f\"{idx:06d}.seg.png\"), img_mask)\n\n def set_initial_object_poses_from_scene_meta(self, add_noise=True):\n self.object_pose_dict = {}\n\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n obj_meta_info = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n if \"gt_T_wo\" in obj_meta_info:\n Two = obj_meta_info[\"gt_T_wo\"]\n else:\n print(\n f\"Cannot find object pose for obj_id = {obj_id}, use custom pose with minor offset.\"\n )\n Two = np.eye(4)\n from scipy.spatial.transform import Rotation as R\n\n rot_fix = np.array([1, 0, 0, 0, 0, 1, 0, -1, 0]).reshape(3, 3)\n # TODO: update initial pose for real-world scenes\n # if obj_id == 31:\n # blender_xyz = np.array([-1.44, 1.18, 0.1])\n # blender_rot = R.from_quat([0.5, -0.5, 0.5, 0.5]).as_matrix()\n # elif obj_id == 32:\n # blender_xyz = np.array([0.76, 0.54, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n # elif obj_id == 33:\n # blender_xyz = np.array([-0.06, 1.01, -0.9])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 34:\n # blender_xyz = np.array([-0.05, 1.14, -0.15])\n # blender_rot = R.from_quat([0, 0.707107, -0.707107, 0]).as_matrix()\n # elif obj_id == 35:\n # blender_xyz = np.array([-0.35, 1.1, 0.98])\n # blender_rot = R.from_quat([0.707107, 0, 0, 0.707107]).as_matrix()\n\n # Two[:3, :3] = blender_rot @ rot_fix\n # Two[:3, :3] = rot_fix @ blender_rot\n # Two[:3, 3] = rot_fix @ blender_xyz\n\n # Two[1, 3] += 0.75\n # Two[2, 3] -= 0.7\n\n # add noise\n if add_noise:\n Two[:3, 3] += 0.1\n from scipy.spatial.transform import Rotation as R\n\n rot_noise = R.from_euler(\"z\", 20, degrees=True).as_matrix()\n Two[:3, :3] = Two[:3, :3] @ rot_noise\n Two = torch.from_numpy(Two).float().cuda()\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n if \"fix_object_pose\" not in self.optimize_option:\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_from_prediction(self, pred_json_path):\n print(\"Initial pose from\", pred_json_path)\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n pred_info = read_json(pred_json_path)\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.array(pred_info[str(obj_id)][\"Two\"])\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n\n if not \"fix_object_pose\" in self.optimize_option:\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_initial_pose_as_identity(self):\n print(\"Initial pose as identity.\")\n self.object_pose_dict = {}\n self.initial_pose_prediction = {}\n for obj_id in self.active_instance_id:\n if obj_id == 0:\n continue\n Two = np.eye(4)\n Two = torch.from_numpy(Two).float().cuda()\n self.initial_pose_prediction[str(obj_id)] = {\"Two\": Two.clone()}\n\n # split parameters\n rot6d = matrix_to_rotation_6d(Two[:3, :3])\n trans = Two[:3, 3]\n rot6d.requires_grad = True\n trans.requires_grad = True\n\n self.object_pose_dict[str(obj_id)] = {\n \"trans\": trans,\n \"rot6d\": rot6d,\n }\n self.params += [{\"params\": trans, \"lr\": self.lr}]\n self.params += [{\"params\": rot6d, \"lr\": self.lr}]\n\n def set_sampling_mask_from_seg(\n self,\n seg_mask=None,\n seg_mask_path=None,\n add_noise_to_seg=0,\n convert_seg_mask_to_box_mask=False,\n ):\n if seg_mask_path is not None:\n print(\"Read segmentation from gt mask\")\n # read mask\n self.instance_mask = get_instance_mask(seg_mask_path, img_wh=self.img_wh)\n elif seg_mask is not None:\n self.instance_mask = seg_mask\n else:\n print(\"Warning: empty mask\")\n self.merged_mask = (\n np.ones((self.img_wh[1], self.img_wh[0])).reshape(-1).astype(bool)\n )\n return\n\n # merge active object masks\n merged_mask = np.zeros_like(self.instance_mask)\n for i_obj, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue # do not accumulate background obj_id\n instance_mask_obj = self.instance_mask == obj_id\n # use tightly fit bbox instead of segmentation mask\n if convert_seg_mask_to_box_mask:\n instance_mask_obj = seg_mask_to_box_mask(instance_mask_obj)\n merged_mask = np.logical_or(merged_mask, instance_mask_obj)\n\n # if add noise to gt segmentation\n if add_noise_to_seg != 0:\n is_dilate = add_noise_to_seg > 0\n add_noise_to_seg = abs(add_noise_to_seg)\n kernel = np.ones((add_noise_to_seg, add_noise_to_seg), np.uint8)\n if is_dilate:\n merged_mask = cv2.dilate(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n else:\n merged_mask = cv2.erode(\n merged_mask.astype(np.uint8), kernel, iterations=1\n ).astype(bool)\n cv2.imwrite(\n f\"{self.output_path}/merged_mask.png\", merged_mask.astype(np.uint8) * 255\n )\n self.merged_mask = merged_mask.reshape(-1)\n\n def get_type_of_instance(self, instance_id):\n for obj_info in self.scene_meta[\"objs\"]:\n if obj_info[\"id\"] == instance_id:\n return obj_info[\"classname\"]\n return \"unknown\"\n\n def generate_relation(\n self,\n obj_to_room_distance_th: float = 0.5,\n top_down_dist_th: float = 0.3,\n top_down_xy_close_factor: float = 0.8,\n ):\n \"\"\"\n Generate relationship : object-wall, object-floor, object-object\n \"\"\"\n print(\"Start to generate relation from initial poses and neural models...\")\n all_obj_info = {}\n for i, obj_id in enumerate(self.active_instance_id):\n if obj_id == 0:\n continue\n Rwo = rotation_6d_to_matrix(self.object_pose_dict[str(obj_id)][\"rot6d\"])\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n all_obj_info[str(obj_id)] = optimized_meta\n with torch.no_grad():\n generate_relation_for_all(\n room_optimizer=self,\n all_obj_info=all_obj_info,\n obj_to_room_distance_th=obj_to_room_distance_th,\n top_down_dist_th=top_down_dist_th,\n top_down_xy_close_factor=top_down_xy_close_factor,\n )\n # print(\"Relation:\\n\", self.relation_info)\n for k, v in self.relation_info.items():\n print(k, v)\n\n def optimize(self, input_rgb: torch.Tensor, pose=None):\n \"\"\"\n Inputs:\n input_rgb: torch.Tensor [h, w, 3] normalized in 0...1\n \"\"\"\n if pose is None:\n pose = np.array(self.scene_meta[\"camera\"][\"cam3d2world\"]).reshape(4, 4)\n # Original poses has rotation in form \"right down forward\", change to NDC \"right up back\"\n fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n pose[:3, :3] = pose[:3, :3] @ fix_rot\n\n # camera to world pose\n Twc = np.eye(4)\n Twc[:3, :4] = pose[:3, :4]\n Twc = torch.from_numpy(Twc).float().cuda()\n\n if \"keypoint_mask\" in self.optimize_option:\n # detect keypoint for interest region\n keypoint_mask = detect_keypoints(input_rgb.numpy(), circle_radius=5)\n self.merged_mask = np.logical_and(\n keypoint_mask, self.merged_mask.reshape(keypoint_mask.shape)\n )\n cv2.imwrite(\n f\"{self.output_path}/merged_mask_keypoint.png\",\n self.merged_mask.astype(np.uint8) * 255,\n )\n self.merged_mask = self.merged_mask.reshape(-1)\n\n input_rgb = input_rgb.view(-1, 3) # (H*W, 3) RGB\n\n directions = get_ray_directions_equirectangular(\n self.h, self.w\n ).cuda() # (h, w, 3)\n\n mse_loss = nn.MSELoss(reduction=\"none\")\n\n assert hasattr(\n self, \"params\"\n ), \"Please set initial pose params before optimization.\"\n optimizer = torch.optim.Adam(self.params)\n\n scaler = torch.cuda.amp.GradScaler(enabled=self.use_amp)\n perceptual_net = perceptual_model.VGG16_for_Perceptual().cuda()\n\n sample_prob = pano_sample_probability(self.h, self.w).reshape(-1)\n\n t = trange(self.N_optim_step, desc=\"Opt.\", leave=True)\n for i_step in t:\n if \"regenerate_relation_during_test\" in self.optimize_option:\n if i_step != 0 and i_step % 50 == 0:\n self.generate_relation()\n if self.adjust_lr_per_step > 0:\n adjust_learning_rate(\n self.lr,\n optimizer,\n i_step,\n base=0.5,\n adjust_lr_every=self.adjust_lr_per_step,\n )\n extra_dict = dict()\n rays_list = []\n object_id_list = []\n # sample according to batch size limitation\n select_ind = np.arange(self.merged_mask.shape[0])[self.merged_mask]\n if (\n \"perceptual_loss\" not in self.optimize_option\n ): # we only sample some points in this case\n # sample according to pano distribution\n select_sample_prob = sample_prob[self.merged_mask]\n select_sample_prob /= select_sample_prob.sum()\n # assert select_ind.shape[0] > self.optim_batch_size\n sample_size = min(select_ind.shape[0], self.optim_batch_size)\n select_ind = np.random.choice(\n select_ind,\n size=sample_size,\n replace=False,\n p=select_sample_prob,\n )\n\n # add some sampling on the background for bg light code\n if self.optimize_light_env:\n bg_sample_ratio = 0.2\n bg_sample_prob = sample_prob[~self.merged_mask]\n bg_sample_prob /= bg_sample_prob.sum()\n bg_sample_ind = np.arange(self.merged_mask.shape[0])[~self.merged_mask]\n # assert bg_sample_ind.shape[0] > self.optim_batch_size\n bg_sample_size = min(\n bg_sample_ind.shape[0], int(bg_sample_ratio * self.optim_batch_size)\n )\n if bg_sample_size > 0:\n bg_sample_ind = np.random.choice(\n bg_sample_ind,\n size=bg_sample_size,\n replace=False,\n p=bg_sample_prob,\n )\n select_ind = np.concatenate([select_ind, bg_sample_ind], axis=-1)\n\n select_ind = np.unique(select_ind)\n if i_step == 0:\n print(\"Actual optimization rays\", select_ind.shape[0])\n select_input_rgb = input_rgb[select_ind].float().cuda()\n\n loss_dict = {}\n all_obj_info = {} # prepare for violation loss\n\n for i, obj_id in enumerate(self.active_instance_id):\n # object to world pose\n if obj_id == 0:\n Rwo = torch.eye(3).cuda()\n two = torch.from_numpy(self.bg_scene_center).float().cuda()\n else:\n Rwo = rotation_6d_to_matrix(\n self.object_pose_dict[str(obj_id)][\"rot6d\"]\n )\n two = self.object_pose_dict[str(obj_id)][\"trans\"]\n\n # camera to object pose\n Toc = torch.eye(4).cuda()\n Toc[:3, :3] = Rwo.T @ Twc[:3, :3]\n Toc[:3, 3] = Rwo.T @ (Twc[:3, 3] - two)\n\n # generate object rays\n rays_o, rays_d = get_rays(directions, Toc[:3, :4])\n\n rays_o = rays_o[select_ind]\n rays_d = rays_d[select_ind]\n\n rays = self.generate_object_rays(rays_o, rays_d, obj_id)\n rays_list += [rays]\n object_id_list += [obj_id]\n\n # set image_attr for object code\n extra_dict[\"embedding_inst_{}\".format(obj_id)] = self.image_attrs[\n str(obj_id)\n ].embedding_instance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * obj_id\n )\n # light code\n if self.optimize_light_env:\n if self.use_light_from_image_attr:\n extra_dict[\n \"embedding_light_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_light(\n torch.ones_like(rays_o[..., 0]).long().cuda()\n * self.hard_coded_light_id\n )\n else:\n extra_dict[\"embedding_light_{}\".format(obj_id)] = (\n self.light_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # appearance code\n if self.optimize_appearance_code:\n if self.use_appearance_from_image_attr:\n extra_dict[\n \"embedding_appearance_{}\".format(obj_id)\n ] = self.image_attrs[str(obj_id)].embedding_appearance(\n torch.ones_like(rays_o[..., 0]).long().cuda() * 0\n )\n else:\n extra_dict[\"embedding_appearance_{}\".format(obj_id)] = (\n self.appearance_code_dict[str(obj_id)]\n .view(1, -1)\n .expand(rays_o.shape[0], -1)\n )\n # autoexposure\n if self.optimize_exposure:\n extra_dict[f\"autoexposure_{obj_id}\"] = self.autoexposure_param[\n str(obj_id)\n ]\n\n # we do not need to add relation constraints to bg\n if obj_id == 0:\n continue\n\n # enforce optimising on yaw\n if \"z_axis_align_loss\" in self.optimize_option:\n loss_dict[\"z_axis_loss_{}\".format(obj_id)] = (\n z_axis_loss(Rwo, 1.0) * 1e2\n )\n\n optimized_meta = get_object_meta_info(\n self.ig_data_base_dir, self.scene_meta, obj_id\n )\n optimized_meta.pop(\"gt_T_wo\", None) # pop gt\n # pass optimized object pose\n optimized_meta[\"Rwo\"] = Rwo\n optimized_meta[\"two\"] = two\n optimized_meta[\"obj_id\"] = obj_id\n obj_id_key = str(obj_id)\n\n if obj_id_key not in self.relation_info:\n continue\n\n # get obj_relation from input\n obj_relation = self.relation_info[obj_id_key]\n # supplement obj_type\n obj_type = self.get_type_of_instance(obj_id)\n optimized_meta[\"obj_type\"] = obj_type\n\n all_obj_info[str(obj_id)] = optimized_meta\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n \"\"\"attach wall loss\"\"\"\n if (\n \"object_room_wall_attach\" in self.optimize_option\n and obj_relation.get(\"attach_wall\", False)\n ):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n # \"face_direction\": torch.Tensor([0, 1, 0]),\n # \"face_direction\": obj_relation.get(\n # \"attach_wall_face_dir\", torch.Tensor([0, 1, 0])\n # ),\n \"face_direction\": obj_relation[\"attach_wall_face_dir\"],\n \"ray_grid_size\": 10,\n }\n # for door object, we slightly stretch the size to ensure successive hit-test\n if obj_type == \"door\" or obj_type == \"window\":\n kwargs.update(\n {\n \"ray_grid_stretch\": torch.Tensor([1.2, 1.2, 1]),\n \"use_bbox_surface_as_in_detect\": True,\n }\n )\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n \"\"\"attach floor loss\"\"\"\n if (\n \"object_room_floor_attach\" in self.optimize_option\n and obj_relation.get(\"attach_floor\", False)\n ):\n # # TODO(ybbbbt): hard code floor\n # loss_dict.update(\n # obj_attach_floor_loss(optimized_meta, floor=0.0)\n # )\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info\": optimized_meta,\n \"face_direction\": torch.Tensor([0, 0, -1]),\n \"ray_grid_stretch\": torch.Tensor(\n [0.8, 0.8, 1.0]\n ), # avoid too close to wall\n \"use_bbox_surface_as_in_detect\": True,\n \"ray_grid_size\": 3,\n }\n if obj_type == \"door\":\n # kwargs[\"ray_grid_offset\"] = torch.Tensor(\n # [0, -0.3, 0]\n # ) # to avoid to close to wall\n assert (\n \"attach_wall_face_dir\" in obj_relation\n ), f\"door {obj_id} relation prediction failed.\"\n kwargs[\"ray_grid_offset\"] = (\n obj_relation[\"attach_wall_face_dir\"] * -0.3\n ) # to avoid to close to wall\n loss_dict.update(object_room_magnetic_loss(**kwargs))\n\n with torch.cuda.amp.autocast(enabled=self.use_amp):\n results = self.batched_inference_multi(\n rays_list,\n object_id_list,\n to_cpu=False,\n # use_sphere_tracing=True,\n use_sphere_tracing=False,\n **extra_dict,\n )\n pred_rgb = results[\"rgb_fine\"]\n\n if \"photometric_loss\" in self.optimize_option:\n loss_dict[\"mse_loss\"] = mse_loss(pred_rgb, select_input_rgb).mean()\n\n if \"visualize_pred\" in self.optimize_option: # dump image for debug\n # pred_rgb_full = input_rgb.cuda()\n pred_rgb_full = torch.zeros_like(input_rgb.cuda())\n pred_rgb_full[select_ind] = pred_rgb\n\n imageio.imwrite(\n f\"debug/pred_rgb_full.png\",\n (pred_rgb_full * 255)\n .view(self.img_wh[1], self.img_wh[0], 3)\n .detach()\n .cpu()\n .numpy()\n .astype(np.uint8),\n )\n\n if \"perceptual_loss\" in self.optimize_option:\n pred_rgb_full = input_rgb.cuda()\n pred_rgb_full[select_ind] = pred_rgb\n loss_dict.update(\n patch_perceptual_loss(\n perceptual_net,\n pred_rgb_full,\n input_rgb,\n all_obj_info,\n self.instance_mask,\n self.img_wh,\n )\n )\n\n \"\"\"attach bottom to other object loss\"\"\"\n if \"object_object_attach\" in self.optimize_option:\n for obj_id_str, obj_relation in self.relation_info.items():\n if obj_relation.get(\"attach_bottom_to_object\", False):\n kwargs = {\n \"room_optimizer\": self,\n \"obj_info_src\": all_obj_info[obj_id_str],\n \"obj_info_tgt\": all_obj_info[\n str(obj_relation[\"attach_tgt_obj_id\"])\n ],\n \"face_direction\": torch.Tensor([0, 0, -1]),\n }\n loss_dict.update(object_object_attach_loss(**kwargs))\n\n # physical violation loss\n if \"physical_violation\" in self.optimize_option:\n if (\n not \"physical_violation_delayed_start\" in self.optimize_option\n or i_step >= 100\n ):\n loss_dict.update(\n physical_violation_loss(\n self,\n all_obj_info,\n N_nearest_obj=3,\n check_background_violation=True,\n # N_sample_points=1000,\n N_sample_points=2000,\n # N_sample_points=300,\n )\n )\n\n if \"viewing_constraint\" in self.optimize_option:\n loss_dict.update(viewing_constraint_loss(self, Twc, all_obj_info))\n\n if \"print_loss_dict\" in self.optimize_option:\n for k, v in loss_dict.items():\n # if \"_62\" not in k:\n # continue\n print(k, \"=\", float(v))\n loss = sum(list(loss_dict.values()))\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n scaler.update()\n optimizer.zero_grad()\n\n t.set_description(\"Loss: %f\" % float(loss))\n t.refresh()\n # dump image\n if i_step % 20 == 0:\n self.save_optimizable_parameters(\n f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n )\n # self.load_optimizable_parameters(\n # f\"{self.output_path}/{i_step:06d}.state.ckpt\"\n # )\n if i_step >= self.N_optim_step - 20:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n write_idx_on_image=False,\n render_mask=True,\n h=512,\n w=1280,\n )\n else:\n self.render_full_scene(\n pose=pose,\n idx=i_step,\n render_mask=False,\n h=self.h,\n w=self.w,\n )\n dump_optimization_meta_to_file(\n filepath=f\"{self.output_path}/{i_step:06d}.optim.json\",\n obj_pose_dict=self.object_pose_dict,\n )" }, { "identifier": "read_real_scene_localization", "path": "optim/misc_utils.py", "snippet": "def read_real_scene_localization(pose_path: str, transform_info_json_path: str):\n pose_dict = {}\n transform_info = read_json(transform_info_json_path)\n trans_colmap_to_arkit = np.array(transform_info[\"transform_colmap_to_arkit_sRT\"])\n trans_align = np.array(transform_info[\"transform_alignment\"])\n with open(pose_path) as file:\n lines = file.readlines()\n lines = lines[1:]\n for line in lines:\n fname, tx, ty, tz, qx, qy, qz, qw, _, _ = line.strip().split(\" \")\n fname += \".png\"\n pose = np.eye(4)\n pose[0, 3] = tx\n pose[1, 3] = ty\n pose[2, 3] = tz\n # Twc\n pose[:3, :3] = Rotation.from_quat([qx, qy, qz, qw]).as_matrix()\n # pose = np.linalg.inv(pose)\n # pose_ndc = np.linalg.inv(pose_ndc)\n\n # convert to ndc\n # pose_ndc = pose\n # fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3)\n # pose_ndc[:3, :3] = pose_ndc[:3, :3] @ fix_rot\n\n # transform to arkit pose\n s, R, t = decompose_to_sRT(trans_colmap_to_arkit)\n # pose_ndc = transform_colmap_to_arkit @ pose_ndc\n # print(s, R, t)\n pose[:3, 3] = R @ (pose[:3, 3] * s) + t\n pose[:3, :3] = R @ pose[:3, :3]\n\n # apply alignment to poses\n pose = trans_align @ pose\n\n pose_dict[fname] = {\"pose_slam_Twc\": pose}\n # print(fname, pose)\n return pose_dict" }, { "identifier": "read_testing_config", "path": "optim/misc_utils.py", "snippet": "def read_testing_config():\n conf_cli = OmegaConf.from_cli()\n conf_test_file = OmegaConf.load(conf_cli.config)\n # read dataset config\n conf_test_file[\"dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"dataset_config_path\"]\n )\n conf_test_file[\"bg_dataset_config\"] = read_dataset_config_file(\n conf_test_file[\"bg_dataset_config_path\"]\n )\n\n # processing ckpt\n ckpt_path_dict = {}\n for item in conf_test_file[\"ckpt_lists\"]:\n path = item[\"path\"]\n obj_ids = item[\"obj_ids\"]\n neus_conf = item.get(\"neus_conf\", \"config/neus.yaml\")\n for obj_id in obj_ids:\n ckpt_path_dict[str(obj_id)] = {\"path\": path, \"neus_conf\": neus_conf}\n conf_test_file[\"ckpt_path_dict\"] = ckpt_path_dict\n\n conf_merged = OmegaConf.merge(conf_test_file, conf_cli)\n return conf_merged" } ]
import sys import os import torch import numpy as np import imageio import time import cv2 from tqdm import tqdm from argparse import ArgumentParser from utils.util import read_json, read_yaml from optim.room_optimizer import RoomOptimizer from optim.misc_utils import read_real_scene_localization, read_testing_config from scipy.spatial.transform import Rotation
14,984
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def render_frame(config, target_dir): # or load from config active_instance_id = config.active_instance_id dataset_config = config.dataset_config["dataset"] scene_info_json_path = config.scene_info_json active_instance_id = [0] for obj_info in read_json(scene_info_json_path)["objs"]: active_instance_id += [obj_info["id"]] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] # intialize room optimizer room_optimizer = RoomOptimizer( scene_info_json_path=scene_info_json_path, scale_factor=dataset_config["scale_factor"], scale_factor_dict=dataset_config.get("scale_factor_dict", {}), bg_scale_factor=bg_scale_factor, bg_scene_center=bg_scene_center, img_wh=config.img_wh, near=0.3, far=10.0, chunk=config.chunk, model_ckpt_path_dict=config.ckpt_path_dict, active_instance_id=active_instance_id, use_amp=True, use_light_from_image_attr=True, # we use fixed light code (e.g. probe_03) optimize_appearance_code=config.get("optimize_appearance_code", False), use_appearance_from_image_attr=True, ) # initialize object poses with no noise room_optimizer.set_initial_object_poses_from_scene_meta(add_noise=False) # we show an example to use pose scene_meta = read_json(scene_info_json_path) # localization_info = read_real_scene_localization( # "/mnt/nas_54/group/BBYang/neural_scene_capture_360/capture_1104/processed/arrangement_panorama_select/arrangement1/traj.txt", # "data/real_room_0/objects/000/background_hloc_neus_normal_converge/transform_info.json", # ) pose = np.array(scene_meta["camera"]["cam3d2world"]).reshape(4, 4) # Original poses has rotation in form "right down forward", change to NDC "right up back" fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3) pose[:3, :3] = pose[:3, :3] @ fix_rot # from scipy.spatial.transform import Rotation as R # print(pose) # rot_fix_loc = np.array([0, 1, 0, 1, 0, 0, 0, 0, -1]).reshape(3, 3) # pose[:3, :3] = pose[:3, :3] @ rot_fix_loc # pose[:3, :3] = rot_fix_loc @ pose[:3, :3] t1 = time.time() # image_np = room_optimizer.render_full_scene( # pose=pose, # idx=-1, # return_raw_image=True, # refine_edge=True, # # use_sphere_tracing=False, # use_sphere_tracing=True, # ) image_np, mask_np = room_optimizer.render_full_scene( pose=pose, idx=-1, return_raw_image=True, refine_edge=False, # refine_edge=True, # use_sphere_tracing=False, use_sphere_tracing=True, render_mask=True, ) t2 = time.time() print(f"Rendering finish in {t2-t1} s.") os.makedirs("debug", exist_ok=True) imageio.imwrite(f"{target_dir}/rgb.png", image_np) cv2.imwrite(f"{target_dir}/seg.png", mask_np) if __name__ == "__main__": """ Example: python test/test_neural_scene_renderer.py \ config=test/config/ig_bedroom.yml \ "img_wh=[1024,512]" \ base_dir=data/real_room_rand_arrangement """
os.environ["OMP_NUM_THREADS"] = "1" # noqa os.environ["MKL_NUM_THREADS"] = "1" # noqa sys.path.append(".") # noqa def render_frame(config, target_dir): # or load from config active_instance_id = config.active_instance_id dataset_config = config.dataset_config["dataset"] scene_info_json_path = config.scene_info_json active_instance_id = [0] for obj_info in read_json(scene_info_json_path)["objs"]: active_instance_id += [obj_info["id"]] bg_scale_factor = 1 bg_scene_center = [0, 0, 0] if config.bg_dataset_config != "": bg_dataset_config = config.bg_dataset_config["dataset"] bg_scale_factor = bg_dataset_config["scale_factor"] bg_scene_center = bg_dataset_config["scene_center"] # intialize room optimizer room_optimizer = RoomOptimizer( scene_info_json_path=scene_info_json_path, scale_factor=dataset_config["scale_factor"], scale_factor_dict=dataset_config.get("scale_factor_dict", {}), bg_scale_factor=bg_scale_factor, bg_scene_center=bg_scene_center, img_wh=config.img_wh, near=0.3, far=10.0, chunk=config.chunk, model_ckpt_path_dict=config.ckpt_path_dict, active_instance_id=active_instance_id, use_amp=True, use_light_from_image_attr=True, # we use fixed light code (e.g. probe_03) optimize_appearance_code=config.get("optimize_appearance_code", False), use_appearance_from_image_attr=True, ) # initialize object poses with no noise room_optimizer.set_initial_object_poses_from_scene_meta(add_noise=False) # we show an example to use pose scene_meta = read_json(scene_info_json_path) # localization_info = read_real_scene_localization( # "/mnt/nas_54/group/BBYang/neural_scene_capture_360/capture_1104/processed/arrangement_panorama_select/arrangement1/traj.txt", # "data/real_room_0/objects/000/background_hloc_neus_normal_converge/transform_info.json", # ) pose = np.array(scene_meta["camera"]["cam3d2world"]).reshape(4, 4) # Original poses has rotation in form "right down forward", change to NDC "right up back" fix_rot = np.array([1, 0, 0, 0, -1, 0, 0, 0, -1]).reshape(3, 3) pose[:3, :3] = pose[:3, :3] @ fix_rot # from scipy.spatial.transform import Rotation as R # print(pose) # rot_fix_loc = np.array([0, 1, 0, 1, 0, 0, 0, 0, -1]).reshape(3, 3) # pose[:3, :3] = pose[:3, :3] @ rot_fix_loc # pose[:3, :3] = rot_fix_loc @ pose[:3, :3] t1 = time.time() # image_np = room_optimizer.render_full_scene( # pose=pose, # idx=-1, # return_raw_image=True, # refine_edge=True, # # use_sphere_tracing=False, # use_sphere_tracing=True, # ) image_np, mask_np = room_optimizer.render_full_scene( pose=pose, idx=-1, return_raw_image=True, refine_edge=False, # refine_edge=True, # use_sphere_tracing=False, use_sphere_tracing=True, render_mask=True, ) t2 = time.time() print(f"Rendering finish in {t2-t1} s.") os.makedirs("debug", exist_ok=True) imageio.imwrite(f"{target_dir}/rgb.png", image_np) cv2.imwrite(f"{target_dir}/seg.png", mask_np) if __name__ == "__main__": """ Example: python test/test_neural_scene_renderer.py \ config=test/config/ig_bedroom.yml \ "img_wh=[1024,512]" \ base_dir=data/real_room_rand_arrangement """
config = read_testing_config()
4
2023-10-15 08:41:29+00:00
24k
WenzhengZhang/Seq2seqCoref
main_trainer.py
[ { "identifier": "DataArguments", "path": "arguments.py", "snippet": "class DataArguments:\n data_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Path to data directory\"}\n )\n\n max_train_len: Optional[int] = field(\n default=1536,\n metadata={\n \"help\": \"maximum train source input length\"\n },\n )\n max_train_len_out: Optional[int] = field(\n default=2048,\n metadata={\n \"help\": \"maximum train target decoder length\"\n },\n )\n max_eval_len: Optional[int] = field(\n default=1536,\n metadata={\n \"help\": \"maximum dev/test source input length\"\n },\n )\n max_eval_len_out: Optional[int] = field(\n default=2048,\n metadata={\n \"help\": \"maximum dev/test target decode length\"\n },\n )\n\n data_cache_dir: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Where do you want to store the data downloaded from huggingface\"}\n )\n\n beam_sz: Optional[int] = field(\n default=4, metadata={\n \"help\": \"num beams\"\n }\n )\n\n oracle_mentions_dir: Optional[str] = field(\n default=None, metadata={\n \"help\": \"oracle mentions directory\"\n }\n )\n language: Optional[str] = field(\n default='english', metadata={\n \"help\": \"coreference language\"\n }\n )\n joint_data_dirs: Optional[str] = field(\n default=None, metadata={\"help\": \"datasets dirs for joint training\"}\n )\n joint_max_train_lens: Optional[str] = field(\n default=None, metadata={\"help\": \"max train len for each dataset for \"\n \"joint training\"}\n )\n joint_max_eval_lens: Optional[str] = field(\n default=None, metadata={\"help\": \"max eval len for each dataset for \"\n \"joint training\"}\n )\n joint_num_samples: Optional[int] = field(\n default=2000, metadata={\"help\": \"num samples to subsample for joint \"\n \"training\"}\n )" }, { "identifier": "ModelArguments", "path": "arguments.py", "snippet": "class ModelArguments:\n model_name_or_path: str = field(\n default=\"t5-base\",\n metadata={\n \"help\": \"Path to pretrained model or model identifier from huggingface.co/models\"}\n )\n\n config_name: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Pretrained config name or path if not the same as model_name\"}\n )\n tokenizer_name: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Pretrained tokenizer name or path if not the same as model_name\"}\n )\n cache_dir: Optional[str] = field(\n default=None, metadata={\n \"help\": \"Where do you want to store the pretrained models downloaded from s3\"}\n )\n\n decay_rate: Optional[float] = field(\n default=0.6, metadata={\"help\": \"Decay learning rate\"}\n )\n low_cpu_mem_usage: Optional[bool] = field(\n default=False, metadata={\"help\": \"low cpu mem usage when load model\"}\n )" }, { "identifier": "CorefTrainingArguments", "path": "arguments.py", "snippet": "class CorefTrainingArguments(Seq2SeqTrainingArguments):\n do_train: bool = field(default=True,\n metadata={\"help\": \"Whether to run training.\"})\n save_dir: Optional[str] = field(\n default=None, metadata={\"help\": \"Path to save predicts directory\"}\n )\n save_predicts: Optional[bool] = field(\n default=True, metadata={\"help\": \"whether to save predictions\"}\n )\n mark_sentence: Optional[bool] = field(\n default=False, metadata={\"help\": \"mark sentence end for short target?\"}\n )\n align_mode: Optional[str] = field(\n default='l', metadata={\"help\": \"alignment mode: highroad (h) or \"\n \"lowroad (l) \"}\n )\n optim: Union[OptimizerNames, str] = field(\n default=\"adamw_apex_fused\",\n metadata={\"help\": \"The optimizer to use.\"},\n )\n parallelize_model: Optional[bool] = field(\n default=False, metadata={\"help\": \"whether to enable naive model \"\n \"parallel\"}\n )\n manual_empty_cache: Optional[bool] = field(\n default=False, metadata={\"help\": \"whether to empty cuda cache manually\"}\n )\n is_stage3: Optional[bool] = field(\n default=False, metadata={\"help\": \"use deepspeed stage3 for inference \"\n \"if is stage3\"}\n )\n val_after_train: Optional[bool] = field(\n default=False, metadata={\"help\": \"save the checkpoints then do \"\n \"validation after training\"}\n )\n allow_singletons: Optional[bool] = field(\n default=False, metadata={\n \"help\": \"whether to allow singletons\"\n }\n )\n seq2seq_type: Optional[str] = field(\n default='action', metadata={\n \"help\": \"seq2seq type: action, short_seq, full_seq, tagging, \"\n \"input_feed, action_non_int\"\n }\n )\n action_type: Optional[str] = field(\n default='integer', metadata={\n \"help\": \"target action type: integer, non_integer\"\n }\n )\n do_oracle: Optional[bool] = field(\n default=False, metadata={\n \"help\": \"do oracle experiments or not. Provide (gold) mentions \"\n \"and ask the model to predict coreference predictions\"\n }\n )\n add_mention_end: Optional[bool] = field(\n default=False, metadata={\n \"help\": \"add mention end token when using non-integer action format\"\n }\n )\n joint_data_names: Optional[str] = field(\n default=None, metadata={\"help\": \"datasets names for joint training\"}\n )\n joint_min_num_mentions: Optional[str] = field(\n default=None, metadata={\"help\": \"threshold for num mentions per epoch \"\n \"in joint training for each dataset\"}\n )\n min_num_mentions: Optional[int] = field(\n default=2, metadata={\"help\": \"minimum number of mentions per cluster,\"\n \"ontonotes is 2 other datasets is 1 \"\n \"(allow singletons)\"}\n )\n joint_train: Optional[bool] = field(\n default=False, metadata={\"help\": \"whether to use joint training\"}\n )" }, { "identifier": "CorefDataset", "path": "data.py", "snippet": "class CorefDataset(Dataset):\n\n def __init__(self, tokenizer,\n data_args, train_args, split):\n self.tokenizer = tokenizer\n self.data_args = data_args\n self.train_args = train_args\n self.split = split\n # self.task_prefix = self.data_args.task_prefix\n # convert tokens to ids for each sample\n self.samples, self.doc_labels = self.load_dataset()\n\n def __len__(self):\n return len(self.samples)\n\n def load_dataset(self):\n max_len = self.data_args.max_train_len if self.split == 'train' else \\\n self.data_args.max_eval_len\n data_path = os.path.join(\n self.data_args.data_dir,\n f'{self.split}.t5-small.english.{max_len}.jsonlines')\n samples = []\n doc_labels = {}\n thred = self.train_args.min_num_mentions\n with open(data_path, 'r') as f:\n for line in f:\n item = json.loads(line)\n doc_key = item['doc_key']\n doc_id = re.sub(r'_\\d+$', '', doc_key)\n if self.train_args.action_type == \"integer\":\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item['target_sentence'])\n elif self.train_args.action_type == \"non_integer\":\n if self.train_args.add_mention_end:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_sentence\"])\n else:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_sentence\"])\n else:\n raise ValueError(f\"wrong action type \"\n f\"{self.train_args.action_type}\")\n\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n if self.train_args.action_type == 'integer':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n elif self.train_args.action_type == 'non_integer':\n if self.train_args.add_mention_end:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_action\"])\n else:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_action\"])\n else:\n raise ValueError(\"wrong action type (\"\n \"integer/non_integer)\")\n elif self.train_args.seq2seq_type == 'short_seq':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_short_sentence'])\n elif self.train_args.seq2seq_type == 'full_seq':\n target_seq = deepcopy(target_sent)\n elif self.train_args.seq2seq_type == 'tagging':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n # set the last token as eos token\n target_seq[-1] = self.tokenizer.eos_token_id\n else:\n raise ValueError('wrong seq2seq type')\n sample = {'doc_key': doc_key,\n 'sentence': self.tokenizer.convert_tokens_to_ids(\n item['sentence']),\n 'target_sentence': target_sent,\n 'target_seq': target_seq,\n 'subtoken_map': item['subtoken_map'],\n 'seg_clusters': [[tuple(m) for m in c] for c in item[\n 'seg_clusters'] if len(c) >= thred],\n 'offset': item['offset']\n }\n doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[\n 'gold_clusters']]\n samples.append(sample)\n return samples, doc_labels\n\n def __getitem__(self, index):\n sample = self.samples[index]\n input_ids = torch.tensor(sample['sentence'], dtype=torch.long)\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n label_ids = torch.tensor(sample['target_sentence'],\n dtype=torch.long)\n target_ids = torch.tensor(sample['target_seq'], dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'decoder_labels': label_ids,\n 'labels': target_ids\n }\n else:\n label_ids = torch.tensor(sample['target_seq'],\n dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'labels': label_ids,\n }\n return src_encoding" }, { "identifier": "JointDataset", "path": "data.py", "snippet": "class JointDataset(Dataset):\n\n def __init__(self, tokenizer,\n data_args, train_args, split):\n self.tokenizer = tokenizer\n self.data_args = data_args\n self.train_args = train_args\n self.split = split\n self.all_samples, self.doc_labels, self.id_to_name = self.load_dataset()\n self.samples = None if self.split == 'train' else [\n s for data_samples in self.all_samples.values() for s in\n data_samples\n ]\n\n def __len__(self):\n if self.split == 'train':\n num_samples = 0\n for s in self.all_samples.values():\n num_samples += min(self.data_args.joint_num_samples, len(s))\n else:\n num_samples = len(self.samples)\n return num_samples\n\n def set_samples(self, epoch):\n # subsample larger datasets and then concat them\n sample_seed = self.train_args.seed + epoch\n min_num_samples = min(len(s) for s in self.all_samples.values())\n samples = []\n for data_name, data_samples in self.all_samples.items():\n if len(data_samples) > min_num_samples:\n subsamples = random.Random(sample_seed).sample(\n data_samples, self.data_args.joint_num_samples)\n else:\n subsamples = data_samples\n samples += subsamples\n self.samples = samples\n\n def _load_single_data(self, data_dir,\n data_name,\n max_len,\n thred):\n\n samples = []\n doc_labels = {}\n id_to_name = {}\n data_path = os.path.join(\n data_dir,\n f'{self.split}.t5-small.english.{max_len}.jsonlines')\n with open(data_path, 'r') as f:\n for line in f:\n item = json.loads(line)\n doc_key = item['doc_key']\n doc_id = re.sub(r'_\\d+$', '', doc_key)\n id_to_name[doc_id] = data_name\n if self.train_args.action_type == \"integer\":\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item['target_sentence'])\n elif self.train_args.action_type == \"non_integer\":\n if self.train_args.add_mention_end:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_sentence\"])\n else:\n target_sent = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_sentence\"])\n else:\n raise ValueError(f\"wrong action type \"\n f\"{self.train_args.action_type}\")\n\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n if self.train_args.action_type == 'integer':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n elif self.train_args.action_type == 'non_integer':\n if self.train_args.add_mention_end:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_mention_end_action\"])\n else:\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item[\"target_non_int_action\"])\n else:\n raise ValueError(\"wrong action type (\"\n \"integer/non_integer)\")\n elif self.train_args.seq2seq_type == 'short_seq':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_short_sentence'])\n elif self.train_args.seq2seq_type == 'full_seq':\n target_seq = deepcopy(target_sent)\n elif self.train_args.seq2seq_type == 'tagging':\n target_seq = self.tokenizer.convert_tokens_to_ids(\n item['target_action'])\n # set the last token as eos token\n target_seq[-1] = self.tokenizer.eos_token_id\n else:\n raise ValueError('wrong seq2seq type')\n sample = {'doc_key': doc_key,\n 'sentence': self.tokenizer.convert_tokens_to_ids(\n item['sentence']),\n 'target_sentence': target_sent,\n 'target_seq': target_seq,\n 'subtoken_map': item['subtoken_map'],\n 'seg_clusters': [[tuple(m) for m in c] for c in item[\n 'seg_clusters'] if len(c) >= thred],\n 'offset': item['offset']\n }\n doc_labels[doc_id] = [[tuple(m) for m in c] for c in item[\n 'gold_clusters']]\n samples.append(sample)\n return samples, doc_labels, id_to_name\n\n def load_dataset(self):\n doc_labels = {}\n id_to_name = {}\n samples = {}\n max_lens = self.data_args.joint_max_train_lens.split(\n ',') if self.split == 'train' else \\\n self.data_args.joint_max_eval_lens.split(',')\n max_lens = [int(l) for l in max_lens]\n threds = self.train_args.joint_min_num_mentions.split(',')\n threds = [int(t) for t in threds]\n data_dirs = self.data_args.joint_data_dirs.split(',')\n data_names = self.train_args.joint_data_names.split(',')\n for data_dir, data_name, max_len, thred in zip(\n data_dirs, data_names, max_lens, threds):\n single_samples, single_doc_labels, single_id_to_name = \\\n self._load_single_data(data_dir, data_name, max_len, thred)\n samples[data_name] = single_samples\n doc_labels.update(single_doc_labels)\n id_to_name.update(single_id_to_name)\n return samples, doc_labels, id_to_name\n\n def __getitem__(self, index):\n sample = self.samples[index]\n input_ids = torch.tensor(sample['sentence'], dtype=torch.long)\n if self.train_args.seq2seq_type == 'action' or \\\n self.train_args.seq2seq_type == 'input_feed':\n label_ids = torch.tensor(sample['target_sentence'],\n dtype=torch.long)\n target_ids = torch.tensor(sample['target_seq'], dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'decoder_labels': label_ids,\n 'labels': target_ids\n }\n else:\n label_ids = torch.tensor(sample['target_seq'],\n dtype=torch.long)\n input_len, tgt_len = input_ids.size(0), label_ids.size(0)\n attention_mask = torch.tensor([1] * input_len, dtype=torch.long)\n src_encoding = {'input_ids': input_ids,\n 'attention_mask': attention_mask,\n 'labels': label_ids,\n }\n return src_encoding" }, { "identifier": "SPEAKER_START", "path": "constants.py", "snippet": "SPEAKER_START = '<speaker>'" }, { "identifier": "SPEAKER_END", "path": "constants.py", "snippet": "SPEAKER_END = '</speaker>'" }, { "identifier": "MENTION_START", "path": "constants.py", "snippet": "MENTION_START = '<m>'" }, { "identifier": "MENTION_END", "path": "constants.py", "snippet": "MENTION_END = '</m>'" }, { "identifier": "COPY", "path": "constants.py", "snippet": "COPY = '<copy>'" }, { "identifier": "CLUSTER_NEW", "path": "constants.py", "snippet": "CLUSTER_NEW = '</new>'" }, { "identifier": "CLUSTERS", "path": "constants.py", "snippet": "CLUSTERS = []" }, { "identifier": "SENTENCE_START", "path": "constants.py", "snippet": "SENTENCE_START = '<sentence>'" }, { "identifier": "SENTENCE_END", "path": "constants.py", "snippet": "SENTENCE_END = '</sentence>'" }, { "identifier": "SPECIAL_IDS", "path": "constants.py", "snippet": "SPECIAL_IDS = {\n 'speaker_start': int_tokenizer.encode(SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end': int_tokenizer.encode(SPEAKER_END, add_special_tokens=False)[\n 0],\n 'mention_start': int_tokenizer.encode(MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': int_tokenizer.encode(MENTION_END, add_special_tokens=False)[\n 0],\n 'sep': int_tokenizer.encode(SEP_TOKEN, add_special_tokens=False)[0],\n 'copy': int_tokenizer.encode(COPY, add_special_tokens=False)[0],\n 'eos': int_tokenizer.eos_token_id\n}" }, { "identifier": "NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "NON_INT_SPECIAL_IDS = {\n 'speaker_start': non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'cluster_ids': MENTION_ENDS_IDS,\n 'cluster_ids_to_num': END_IDS_TO_NUM,\n 'cluster_new': non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': non_int_tokenizer.eos_token_id\n}" }, { "identifier": "MARK_SPECIAL_IDS", "path": "constants.py", "snippet": "MARK_SPECIAL_IDS = deepcopy(SPECIAL_IDS)" }, { "identifier": "MENTION_END_NON_INT_SPECIAL_IDS", "path": "constants.py", "snippet": "MENTION_END_NON_INT_SPECIAL_IDS = {\n 'speaker_start': mention_end_non_int_tokenizer.encode(\n SPEAKER_START,\n add_special_tokens=False)[0],\n 'speaker_end':\n mention_end_non_int_tokenizer.encode(\n SPEAKER_END, add_special_tokens=False)[0],\n 'mention_start': mention_end_non_int_tokenizer.encode(\n MENTION_START,\n add_special_tokens=False)[0],\n 'mention_end': mention_end_non_int_tokenizer.encode(\n MENTION_END,\n add_special_tokens=False)[0],\n 'cluster_ids': CLUSTER_IDS,\n 'cluster_ids_to_num': CLUSTER_IDS_TO_NUM,\n 'cluster_new': mention_end_non_int_tokenizer.encode(\n CLUSTER_NEW,\n add_special_tokens=False)[0],\n 'copy': mention_end_non_int_tokenizer.encode(\n COPY, add_special_tokens=False)[0],\n 'eos': mention_end_non_int_tokenizer.eos_token_id\n}" }, { "identifier": "MENTION_ENDS", "path": "constants.py", "snippet": "MENTION_ENDS = []" }, { "identifier": "CorefTrainer", "path": "trainer.py", "snippet": "class CorefTrainer(Seq2SeqTrainer):\n\n def _rotate_checkpoints(self, use_mtime=False, output_dir=None) -> None:\n if self.args.save_total_limit is None or self.args.save_total_limit <= 0:\n return\n\n # Check if we should delete older checkpoint(s)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=use_mtime,\n output_dir=output_dir)\n if self.args.val_after_train and self.args.eval_delay < \\\n self.state.global_step:\n for checkpoint in checkpoints_sorted[:-1]:\n states_dir = [str(x) for x in Path(\n checkpoint).glob(f'global_step*') if os.path.isdir(x)]\n for state_dir in states_dir:\n logger.info(f\"Deleting optimizer states of saved \"\n f\"checkpoint {checkpoint}\")\n if os.path.exists(state_dir) and os.path.isdir(\n state_dir):\n shutil.rmtree(state_dir)\n else:\n if len(checkpoints_sorted) <= self.args.save_total_limit:\n return\n\n # If save_total_limit=1 with load_best_model_at_end=True, we could end up deleting the last checkpoint, which\n # we don't do to allow resuming.\n save_total_limit = self.args.save_total_limit\n if (\n self.state.best_model_checkpoint is not None\n and self.args.save_total_limit == 1\n and checkpoints_sorted[\n -1] != self.state.best_model_checkpoint\n ):\n save_total_limit = 2\n\n number_of_checkpoints_to_delete = max(0, len(\n checkpoints_sorted) - save_total_limit)\n checkpoints_to_be_deleted = checkpoints_sorted[\n :number_of_checkpoints_to_delete]\n for checkpoint in checkpoints_to_be_deleted:\n logger.info(\n f\"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit\")\n shutil.rmtree(checkpoint)\n\n def _save(self, output_dir: Optional[str] = None, state_dict=None):\n # If we are executing this function, we are the process zero, so we don't check for that.\n output_dir = output_dir if output_dir is not None else self.args.output_dir\n os.makedirs(output_dir, exist_ok=True)\n logger.info(f\"Saving model checkpoint to {output_dir}\")\n # Save a trained model and configuration using `save_pretrained()`.\n # They can then be reloaded using `from_pretrained()`\n if not isinstance(self.model, PreTrainedModel) and not hasattr(\n self.model, 'save_pretrained'):\n if state_dict is None:\n state_dict = self.model.state_dict()\n\n if isinstance(unwrap_model(self.model), PreTrainedModel):\n unwrap_model(self.model).save_pretrained(\n output_dir, state_dict=state_dict,\n # safe_serialization=self.args.save_safetensors\n )\n else:\n logger.info(\n \"Trainer.model is not a `PreTrainedModel`, only saving its state dict.\")\n # if self.args.save_safetensors:\n # safetensors.torch.save_file(state_dict,\n # os.path.join(output_dir,\n # SAFE_WEIGHTS_NAME))\n # else:\n torch.save(state_dict, os.path.join(output_dir,\n WEIGHTS_NAME))\n else:\n self.model.save_pretrained(\n output_dir, state_dict=state_dict,\n # safe_serialization=self.args.save_safetensors\n )\n\n if self.tokenizer is not None:\n self.tokenizer.save_pretrained(output_dir)\n\n # Good practice: save your training arguments together with the trained model\n torch.save(self.args, os.path.join(output_dir, TRAINING_ARGS_NAME))\n\n def _inner_training_loop(\n self, batch_size=None, args=None, resume_from_checkpoint=None,\n trial=None, ignore_keys_for_eval=None\n ):\n self._train_batch_size = batch_size\n # Data loader and number of training steps\n train_dataloader = self.get_train_dataloader()\n\n # Setting up training control variables:\n # number of training epochs: num_train_epochs\n # number of training steps per epoch: num_update_steps_per_epoch\n # total number of training steps to execute: max_steps\n total_train_batch_size = args.train_batch_size * args.gradient_accumulation_steps * args.world_size\n\n len_dataloader = None\n if has_length(train_dataloader):\n len_dataloader = len(train_dataloader)\n num_update_steps_per_epoch = len_dataloader // args.gradient_accumulation_steps\n num_update_steps_per_epoch = max(num_update_steps_per_epoch, 1)\n num_examples = self.num_examples(train_dataloader)\n if args.max_steps > 0:\n max_steps = args.max_steps\n num_train_epochs = args.max_steps // num_update_steps_per_epoch + int(\n args.max_steps % num_update_steps_per_epoch > 0\n )\n # May be slightly incorrect if the last batch in the training dataloader has a smaller size but it's\n # the best we can do.\n num_train_samples = args.max_steps * total_train_batch_size\n else:\n max_steps = math.ceil(\n args.num_train_epochs * num_update_steps_per_epoch)\n num_train_epochs = math.ceil(args.num_train_epochs)\n num_train_samples = self.num_examples(\n train_dataloader) * args.num_train_epochs\n elif args.max_steps > 0: # Rely on max_steps when dataloader does not have a working size\n max_steps = args.max_steps\n # Setting a very large number of epochs so we go as many times as necessary over the iterator.\n num_train_epochs = sys.maxsize\n num_update_steps_per_epoch = max_steps\n num_examples = total_train_batch_size * args.max_steps\n num_train_samples = args.max_steps * total_train_batch_size\n else:\n raise ValueError(\n \"args.max_steps must be set to a positive value if dataloader does not have a length, was\"\n f\" {args.max_steps}\"\n )\n\n if DebugOption.UNDERFLOW_OVERFLOW in self.args.debug:\n if self.args.n_gpu > 1:\n # nn.DataParallel(model) replicates the model, creating new variables and module\n # references registered here no longer work on other gpus, breaking the module\n raise ValueError(\n \"Currently --debug underflow_overflow is not supported under DP. Please use DDP\"\n \" (torch.distributed.launch).\"\n )\n else:\n debug_overflow = DebugUnderflowOverflow(self.model) # noqa\n\n delay_optimizer_creation = (\n self.sharded_ddp is not None\n and self.sharded_ddp != ShardedDDPOption.SIMPLE\n or is_sagemaker_mp_enabled()\n or self.fsdp is not None\n )\n if args.deepspeed:\n deepspeed_engine, optimizer, lr_scheduler = deepspeed_init(\n self, num_training_steps=max_steps,\n resume_from_checkpoint=resume_from_checkpoint\n )\n self.model = deepspeed_engine.module\n self.model_wrapped = deepspeed_engine\n self.deepspeed = deepspeed_engine\n self.optimizer = optimizer\n self.lr_scheduler = lr_scheduler\n elif not delay_optimizer_creation:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n self.state = TrainerState()\n self.state.is_hyper_param_search = trial is not None\n\n # Activate gradient checkpointing if needed\n if args.gradient_checkpointing:\n self.model.gradient_checkpointing_enable()\n\n model = self._wrap_model(self.model_wrapped)\n\n if is_sagemaker_mp_enabled() and resume_from_checkpoint is not None:\n self._load_from_checkpoint(resume_from_checkpoint, model)\n\n # for the rest of this function `model` is the outside model, whether it was wrapped or not\n if model is not self.model:\n self.model_wrapped = model\n\n if delay_optimizer_creation:\n self.create_optimizer_and_scheduler(num_training_steps=max_steps)\n\n # Check if saved optimizer or scheduler states exist\n self._load_optimizer_and_scheduler(resume_from_checkpoint)\n\n # important: at this point:\n # self.model is the Transformers Model\n # self.model_wrapped is DDP(Transformers Model), Deepspeed(Transformers Model), etc.\n\n # Train!\n logger.info(\"***** Running training *****\")\n logger.info(f\" Num examples = {num_examples}\")\n logger.info(f\" Num Epochs = {num_train_epochs}\")\n logger.info(\n f\" Instantaneous batch size per device = {args.per_device_train_batch_size}\")\n logger.info(\n f\" Total train batch size (w. parallel, distributed & accumulation) = {total_train_batch_size}\")\n logger.info(\n f\" Gradient Accumulation steps = {args.gradient_accumulation_steps}\")\n logger.info(f\" Total optimization steps = {max_steps}\")\n logger.info(\n f\" Number of trainable parameters = {sum(p.numel() for p in model.parameters() if p.requires_grad)}\"\n )\n\n self.state.epoch = 0\n start_time = time.time()\n epochs_trained = 0\n steps_trained_in_current_epoch = 0\n steps_trained_progress_bar = None\n\n # Check if continuing training from a checkpoint\n if resume_from_checkpoint is not None and os.path.isfile(\n os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME)\n ):\n self.state = TrainerState.load_from_json(\n os.path.join(resume_from_checkpoint, TRAINER_STATE_NAME))\n epochs_trained = self.state.global_step // num_update_steps_per_epoch\n if not args.ignore_data_skip:\n steps_trained_in_current_epoch = self.state.global_step % (\n num_update_steps_per_epoch)\n steps_trained_in_current_epoch *= args.gradient_accumulation_steps\n else:\n steps_trained_in_current_epoch = 0\n\n logger.info(\n \" Continuing training from checkpoint, will skip to saved global_step\")\n logger.info(f\" Continuing training from epoch {epochs_trained}\")\n logger.info(\n f\" Continuing training from global step {self.state.global_step}\")\n if not args.ignore_data_skip:\n logger.info(\n f\" Will skip the first {epochs_trained} epochs then the first {steps_trained_in_current_epoch} \"\n \"batches in the first epoch. If this takes a lot of time, you can add the `--ignore_data_skip` \"\n \"flag to your launch command, but you will resume the training on data already seen by your model.\"\n )\n if self.is_local_process_zero() and not args.disable_tqdm:\n steps_trained_progress_bar = tqdm(\n total=steps_trained_in_current_epoch)\n steps_trained_progress_bar.set_description(\n \"Skipping the first batches\")\n\n # Update the references\n self.callback_handler.model = self.model\n self.callback_handler.optimizer = self.optimizer\n self.callback_handler.lr_scheduler = self.lr_scheduler\n self.callback_handler.train_dataloader = train_dataloader\n if self.hp_name is not None and self._trial is not None:\n # use self._trial because the SigOpt/Optuna hpo only call `_hp_search_setup(trial)` instead of passing trial\n # parameter to Train when using DDP.\n self.state.trial_name = self.hp_name(self._trial)\n if trial is not None:\n assignments = trial.assignments if self.hp_search_backend == HPSearchBackend.SIGOPT else trial\n self.state.trial_params = hp_params(assignments)\n else:\n self.state.trial_params = None\n # This should be the same if the state has been saved but in case the training arguments changed, it's safer\n # to set this after the load.\n self.state.max_steps = max_steps\n self.state.num_train_epochs = num_train_epochs\n self.state.is_local_process_zero = self.is_local_process_zero()\n self.state.is_world_process_zero = self.is_world_process_zero()\n\n # tr_loss is a tensor to avoid synchronization of TPUs through .item()\n tr_loss = torch.tensor(0.0).to(args.device)\n # _total_loss_scalar is updated everytime .item() has to be called on tr_loss and stores the sum of all losses\n self._total_loss_scalar = 0.0\n self._globalstep_last_logged = self.state.global_step\n model.zero_grad()\n\n self.control = self.callback_handler.on_train_begin(args, self.state,\n self.control)\n\n # Skip the first epochs_trained epochs to get the random state of the dataloader at the right point.\n if not args.ignore_data_skip:\n for epoch in range(epochs_trained):\n is_random_sampler = hasattr(train_dataloader,\n \"sampler\") and isinstance(\n train_dataloader.sampler, RandomSampler\n )\n if is_torch_less_than_1_11 or not is_random_sampler:\n # We just need to begin an iteration to create the randomization of the sampler.\n # That was before PyTorch 1.11 however...\n if self.args.joint_train:\n train_dataloader.dataset.set_samples(epoch)\n for _ in train_dataloader:\n break\n else:\n # Otherwise we need to call the whooooole sampler cause there is some random operation added\n # AT THE VERY END!\n _ = list(train_dataloader.sampler)\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n for epoch in range(epochs_trained, num_train_epochs):\n if self.args.joint_train:\n train_dataloader.dataset.set_samples(epoch)\n if isinstance(train_dataloader, DataLoader) and isinstance(\n train_dataloader.sampler, DistributedSampler):\n train_dataloader.sampler.set_epoch(epoch)\n elif hasattr(train_dataloader, \"dataset\") and isinstance(\n train_dataloader.dataset, IterableDatasetShard):\n train_dataloader.dataset.set_epoch(epoch)\n\n if is_torch_tpu_available():\n parallel_loader = pl.ParallelLoader(train_dataloader, [\n args.device]).per_device_loader(args.device)\n epoch_iterator = parallel_loader\n else:\n epoch_iterator = train_dataloader\n\n # Reset the past mems state at the beginning of each epoch if necessary.\n if args.past_index >= 0:\n self._past = None\n\n steps_in_epoch = (\n len(epoch_iterator)\n if len_dataloader is not None\n else args.max_steps * args.gradient_accumulation_steps\n )\n self.control = self.callback_handler.on_epoch_begin(args,\n self.state,\n self.control)\n\n if epoch == epochs_trained and resume_from_checkpoint is not None and steps_trained_in_current_epoch == 0:\n self._load_rng_state(resume_from_checkpoint)\n\n step = -1\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n for step, inputs in enumerate(epoch_iterator):\n\n # Skip past any already trained steps if resuming training\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n if steps_trained_in_current_epoch > 0:\n steps_trained_in_current_epoch -= 1\n if steps_trained_progress_bar is not None:\n steps_trained_progress_bar.update(1)\n if steps_trained_in_current_epoch == 0:\n self._load_rng_state(resume_from_checkpoint)\n continue\n elif steps_trained_progress_bar is not None:\n steps_trained_progress_bar.close()\n steps_trained_progress_bar = None\n\n if step % args.gradient_accumulation_steps == 0:\n self.control = self.callback_handler.on_step_begin(args,\n self.state,\n self.control)\n # if args.manual_empty_cache:\n # torch.cuda.empty_cache()\n if (\n ((step + 1) % args.gradient_accumulation_steps != 0)\n and args.local_rank != -1\n and args._no_sync_in_gradient_accumulation\n ):\n # Avoid unnecessary DDP synchronization since there will be no backward pass on this example.\n with model.no_sync():\n tr_loss_step = self.training_step(model, inputs)\n else:\n tr_loss_step = self.training_step(model, inputs)\n\n if (\n args.logging_nan_inf_filter\n and not is_torch_tpu_available()\n and (\n torch.isnan(tr_loss_step) or torch.isinf(tr_loss_step))\n ):\n # if loss is nan or inf simply add the average of previous logged losses\n tr_loss += tr_loss / (\n 1 + self.state.global_step - self._globalstep_last_logged)\n else:\n tr_loss += tr_loss_step\n\n self.current_flos += float(self.floating_point_ops(inputs))\n\n # Optimizer step for deepspeed must be called on every step regardless of the value of gradient_accumulation_steps\n if self.deepspeed:\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n self.deepspeed.step()\n\n if (step + 1) % args.gradient_accumulation_steps == 0 or (\n # last step in epoch but step is always smaller than gradient_accumulation_steps\n steps_in_epoch <= args.gradient_accumulation_steps\n and (step + 1) == steps_in_epoch\n ):\n # Gradient clipping\n if args.max_grad_norm is not None and args.max_grad_norm > 0 and not self.deepspeed:\n # deepspeed does its own clipping\n\n if self.do_grad_scaling:\n # Reduce gradients first for XLA\n if is_torch_tpu_available():\n gradients = xm._fetch_gradients(self.optimizer)\n xm.all_reduce(\"sum\", gradients,\n scale=1.0 / xm.xrt_world_size())\n # AMP: gradients need unscaling\n self.scaler.unscale_(self.optimizer)\n\n if is_sagemaker_mp_enabled() and args.fp16:\n self.optimizer.clip_master_grads(args.max_grad_norm)\n elif hasattr(self.optimizer, \"clip_grad_norm\"):\n # Some optimizers (like the sharded optimizer) have a specific way to do gradient clipping\n self.optimizer.clip_grad_norm(args.max_grad_norm)\n elif hasattr(model, \"clip_grad_norm_\"):\n # Some models (like FullyShardedDDP) have a specific way to do gradient clipping\n model.clip_grad_norm_(args.max_grad_norm)\n else:\n # Revert to normal clipping otherwise, handling Apex or full precision\n nn.utils.clip_grad_norm_(\n amp.master_params(\n self.optimizer) if self.use_apex else model.parameters(),\n args.max_grad_norm,\n )\n\n # Optimizer step\n optimizer_was_run = True\n if self.deepspeed:\n pass # called outside the loop\n elif is_torch_tpu_available():\n if self.do_grad_scaling:\n self.scaler.step(self.optimizer)\n self.scaler.update()\n else:\n xm.optimizer_step(self.optimizer)\n elif self.do_grad_scaling:\n scale_before = self.scaler.get_scale()\n self.scaler.step(self.optimizer)\n self.scaler.update()\n scale_after = self.scaler.get_scale()\n optimizer_was_run = scale_before <= scale_after\n else:\n self.optimizer.step()\n\n if optimizer_was_run and not self.deepspeed:\n self.lr_scheduler.step()\n\n model.zero_grad()\n self.state.global_step += 1\n self.state.epoch = epoch + (step + 1) / steps_in_epoch\n if args.manual_empty_cache:\n torch.cuda.empty_cache()\n self.control = self.callback_handler.on_step_end(args,\n self.state,\n self.control)\n\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch,\n ignore_keys_for_eval)\n else:\n self.control = self.callback_handler.on_substep_end(args,\n self.state,\n self.control)\n\n if self.control.should_epoch_stop or self.control.should_training_stop:\n break\n if step < 0:\n logger.warning(\n \"There seems to be not a single sample in your epoch_iterator, stopping training at step\"\n f\" {self.state.global_step}! This is expected if you're using an IterableDataset and set\"\n f\" num_steps ({max_steps}) higher than the number of available samples.\"\n )\n self.control.should_training_stop = True\n\n self.control = self.callback_handler.on_epoch_end(args, self.state,\n self.control)\n self._maybe_log_save_evaluate(tr_loss, model, trial, epoch,\n ignore_keys_for_eval)\n\n if DebugOption.TPU_METRICS_DEBUG in self.args.debug:\n if is_torch_tpu_available():\n # tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)\n xm.master_print(met.metrics_report())\n else:\n logger.warning(\n \"You enabled PyTorch/XLA debug metrics but you don't have a TPU \"\n \"configured. Check your training configuration if this is unexpected.\"\n )\n if self.control.should_training_stop:\n break\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of training\n delattr(self, \"_past\")\n\n logger.info(\n \"\\n\\nTraining completed. Do not forget to share your model on huggingface.co/models =)\\n\\n\")\n if args.load_best_model_at_end and self.state.best_model_checkpoint is not None:\n # Wait for everyone to get here so we are sur the model has been saved by process 0.\n if is_torch_tpu_available():\n xm.rendezvous(\"load_best_model_at_end\")\n elif args.local_rank != -1:\n dist.barrier()\n elif is_sagemaker_mp_enabled():\n smp.barrier()\n\n self._load_best_model()\n\n # add remaining tr_loss\n self._total_loss_scalar += tr_loss.item()\n train_loss = self._total_loss_scalar / self.state.global_step\n\n metrics = speed_metrics(\"train\", start_time,\n num_samples=num_train_samples,\n num_steps=self.state.max_steps)\n self.store_flos()\n metrics[\"total_flos\"] = self.state.total_flos\n metrics[\"train_loss\"] = train_loss\n\n self.is_in_train = False\n\n self._memory_tracker.stop_and_update_metrics(metrics)\n\n self.log(metrics)\n\n run_dir = self._get_output_dir(trial)\n checkpoints_sorted = self._sorted_checkpoints(use_mtime=False,\n output_dir=run_dir)\n\n # Delete the last checkpoint when save_total_limit=1 if it's different from the best checkpoint.\n if self.state.best_model_checkpoint is not None and \\\n self.args.save_total_limit == 1 and self.is_world_process_zero():\n for checkpoint in checkpoints_sorted:\n if checkpoint != self.state.best_model_checkpoint:\n logger.info(\n f\"Deleting older checkpoint [{checkpoint}] due to args.save_total_limit\")\n shutil.rmtree(checkpoint)\n\n self.control = self.callback_handler.on_train_end(args, self.state,\n self.control)\n\n return TrainOutput(self.state.global_step, train_loss, metrics)\n\n def my_compute_metrics(self,\n doc_labels: Dict[str, List[List]],\n predicts: Any,\n samples: List,\n split: str,\n id_to_name: Dict = None\n ) -> Dict:\n if self.args.joint_train:\n data_names = self.args.joint_data_names.split(',')\n joint_threds = [\n int(t) for t in self.args.joint_min_num_mentions.split(',')]\n name_to_threds = {n: t for n, t in zip(data_names, joint_threds)}\n documents_to_chunk_data = defaultdict(list)\n documents_to_chunk_gold = defaultdict(list)\n predictions = {}\n golds = {}\n assert len(samples) == len(predicts)\n out_sents = []\n last_doc_id = re.sub(r'_\\d+$', '', samples[0]['doc_key'])\n for sample, predict in zip(samples, predicts):\n doc_key = sample['doc_key']\n doc_id = re.sub(r'_\\d+$', '', doc_key)\n # require convert to ids first\n input_ids = sample['sentence']\n subtoken_map = sample['subtoken_map']\n offset = sample['offset']\n # remove bos\n predict_ids = predict[1:].tolist()\n gold_data = sample['seg_clusters']\n if self.args.joint_train:\n thred = name_to_threds[id_to_name[doc_id]]\n else:\n thred = self.args.min_num_mentions\n if self.args.seq2seq_type == \"short_seq\":\n special_ids = MARK_SPECIAL_IDS if self.args.mark_sentence \\\n else SPECIAL_IDS\n pred_data, aligned_input_ids, aligned_pred_ids = \\\n parse_short_target_tokens(input_ids, predict_ids,\n special_ids, subtoken_map,\n self.tokenizer,\n self.args.align_mode,\n thred,\n self.args.mark_sentence\n )\n pred_tokens = self.tokenizer.convert_ids_to_tokens(\n predict_ids)\n out_predict = {\n 'doc_key': doc_key,\n 'pred_tokens': pred_tokens,\n 'pred_text': self.tokenizer.convert_tokens_to_string(\n pred_tokens),\n 'pred_aligned_text': self.tokenizer.convert_ids_to_tokens(\n aligned_pred_ids\n ),\n 'input_aligned_text': self.tokenizer.convert_ids_to_tokens(\n aligned_input_ids\n )\n }\n else:\n is_tagging = (self.args.seq2seq_type == 'tagging')\n if self.args.action_type == 'integer':\n pred_data, pred_token_mentions, predict_ids = \\\n parse_int_output_tokens(\n input_ids,\n predict_ids,\n SPECIAL_IDS,\n subtoken_map,\n self.tokenizer,\n thred, is_tagging)\n else:\n special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \\\n self.args.add_mention_end else NON_INT_SPECIAL_IDS\n pred_data, pred_token_mentions, predict_ids = \\\n parse_nonint_output_tokens(\n input_ids,\n predict_ids,\n special_ids,\n subtoken_map,\n self.tokenizer, self.args.add_mention_end,\n thred)\n pred_token_mentions = [(m[0] + offset, m[1] + offset) for m in\n pred_token_mentions]\n pred_tokens = self.tokenizer.convert_ids_to_tokens(\n predict_ids)\n out_predict = {'doc_key': doc_key,\n 'pred_tokens': pred_tokens,\n 'pred_text':\n self.tokenizer.convert_tokens_to_string(\n pred_tokens),\n 'predict_clusters': pred_data,\n 'gold_clusters': gold_data,\n 'predict_token_mentions': pred_token_mentions\n }\n # list of (m1,m2)\n\n documents_to_chunk_data[doc_id].extend(pred_data)\n documents_to_chunk_gold[doc_id].extend(gold_data)\n\n out_sents.append(out_predict)\n if doc_id != last_doc_id:\n predictions[last_doc_id] = get_document_predicts(\n documents_to_chunk_data[\n last_doc_id])\n golds[last_doc_id] = get_document_predicts(\n documents_to_chunk_gold[\n last_doc_id])\n last_doc_id = doc_id\n # final one\n predictions[last_doc_id] = get_document_predicts(\n documents_to_chunk_data[last_doc_id]\n )\n golds[last_doc_id] = get_document_predicts(\n documents_to_chunk_gold[last_doc_id]\n )\n # print(predictions)\n if self.args.joint_train:\n predictions_list = defaultdict(list)\n labels_list = defaultdict(list)\n golds_list = defaultdict(list)\n else:\n predictions_list = []\n labels_list = []\n golds_list = []\n for document_id, doc_label in doc_labels.items():\n if self.args.joint_train:\n predictions_list[id_to_name[document_id]].append(\n predictions[document_id])\n labels_list[id_to_name[document_id]].append(doc_label)\n golds_list[id_to_name[document_id]].append(golds[document_id])\n else:\n predictions_list.append(predictions[document_id])\n labels_list.append(doc_label)\n golds_list.append(golds[document_id])\n if self.args.joint_train:\n label_results = {}\n gold_results = {}\n for dn in predictions_list.keys():\n metrics = CorefAllMetrics().get_all_metrics(\n labels_list[dn],\n predictions_list[dn])\n metrics_golds = CorefAllMetrics().get_all_metrics(\n golds_list[dn],\n predictions_list[dn])\n single_label_results = {\n f'{dn}_{metric_name}_{x}': v\n for metric_name, metric_values in metrics['micro'].items()\n for x, v in metric_values.items()\n }\n single_gold_results = {\n f'{dn}_gold_{metric_name}_{x}': v\n for metric_name, metric_values in\n metrics_golds['micro'].items()\n for x, v in metric_values.items()\n }\n label_results.update(single_label_results)\n gold_results.update(single_gold_results)\n\n else:\n metrics = CorefAllMetrics().get_all_metrics(labels_list,\n predictions_list)\n metrics_golds = CorefAllMetrics().get_all_metrics(golds_list,\n predictions_list)\n label_results = {\n f'{metric_name}_{x}': v\n for metric_name, metric_values in metrics['micro'].items()\n for x, v in metric_values.items()\n }\n gold_results = {\n f'gold_{metric_name}_{x}': v\n for metric_name, metric_values in metrics_golds['micro'].items()\n for x, v in metric_values.items()\n }\n results = {**label_results, **gold_results}\n if self.args.joint_train:\n avg_f1s = [results[f\"{dname}_average_f1\"] for dname in\n data_names]\n results[\"average_f1\"] = sum(avg_f1s) / len(avg_f1s)\n if self.is_world_process_zero() and self.args.save_predicts:\n os.makedirs(self.args.save_dir, exist_ok=True)\n save_path = os.path.join(self.args.save_dir,\n f'{split}-predicts.txt')\n results_path = os.path.join(self.args.save_dir,\n f'{split}-results.json')\n with open(save_path, 'w') as f:\n for p in out_sents:\n f.write('%s\\n' % json.dumps(p))\n with open(results_path, 'w') as f:\n json.dump(results, f)\n\n return results\n\n def evaluation_loop(\n self,\n dataloader: DataLoader,\n description: str,\n prediction_loss_only: Optional[bool] = False,\n ignore_keys: Optional[List[str]] = None,\n metric_key_prefix: str = \"eval\",\n ) -> EvalLoopOutput:\n \"\"\"\n Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.\n Works both with or without labels.\n \"\"\"\n args = self.args\n\n prediction_loss_only = False\n\n # if eval is called w/o train init deepspeed here\n if args.deepspeed and not self.deepspeed:\n # XXX: eval doesn't have `resume_from_checkpoint` arg but we should be able to do eval\n # from the checkpoint eventually\n deepspeed_engine, _, _ = deepspeed_init(\n self, num_training_steps=0, resume_from_checkpoint=None,\n inference=is_deepspeed_zero3_enabled()\n )\n self.model = deepspeed_engine.module\n self.model_wrapped = deepspeed_engine\n self.deepspeed = deepspeed_engine\n if self.args.gradient_checkpointing:\n self.model.config.use_cache = True\n model = self._wrap_model(self.model, training=False,\n dataloader=dataloader)\n\n # if full fp16 or bf16 eval is wanted and this ``evaluation`` or ``predict`` isn't called\n # while ``train`` is running, cast it to the right dtype first and then put on device\n if not self.is_in_train:\n if args.fp16_full_eval:\n model = model.to(dtype=torch.float16, device=args.device)\n elif args.bf16_full_eval:\n model = model.to(dtype=torch.bfloat16, device=args.device)\n\n batch_size = self.args.eval_batch_size\n\n logger.info(f\"***** Running {description} *****\")\n if has_length(dataloader):\n logger.info(f\" Num examples = {self.num_examples(dataloader)}\")\n else:\n logger.info(\" Num examples: Unknown\")\n logger.info(f\" Batch size = {batch_size}\")\n\n model.eval()\n\n self.callback_handler.eval_dataloader = dataloader\n # Do this before wrapping.\n eval_dataset = getattr(dataloader, \"dataset\", None)\n\n if is_torch_tpu_available():\n dataloader = pl.ParallelLoader(dataloader,\n [args.device]).per_device_loader(\n args.device)\n\n if args.past_index >= 0:\n self._past = None\n\n # Initialize containers\n # losses/preds/labels on GPU/TPU (accumulated for eval_accumulation_steps)\n losses_host = None\n preds_host = None\n labels_host = None\n inputs_host = None\n\n # losses/preds/labels on CPU (final containers)\n all_losses = None\n all_preds = None\n all_labels = None\n all_inputs = None\n # Will be useful when we have an iterable dataset so don't know its length.\n\n observed_num_examples = 0\n # Main evaluation loop\n for step, inputs in enumerate(dataloader):\n # Update the observed num examples\n observed_batch_size = find_batch_size(inputs)\n if observed_batch_size is not None:\n observed_num_examples += observed_batch_size\n # For batch samplers, batch_size is not known by the dataloader in advance.\n if batch_size is None:\n batch_size = observed_batch_size\n\n # Prediction step\n loss, logits, labels = self.prediction_step(model, inputs,\n prediction_loss_only,\n ignore_keys=ignore_keys)\n inputs_decode = self._prepare_input(inputs[\n \"input_ids\"]) if args.include_inputs_for_metrics else None\n\n if is_torch_tpu_available():\n xm.mark_step()\n\n # Update containers on host\n if loss is not None:\n losses = self._nested_gather(loss.repeat(batch_size))\n losses_host = losses if losses_host is None else torch.cat(\n (losses_host, losses), dim=0)\n if labels is not None:\n labels = self._pad_across_processes(labels)\n labels = self._nested_gather(labels)\n labels_host = labels if labels_host is None else nested_concat(\n labels_host, labels, padding_index=-100)\n if inputs_decode is not None:\n inputs_decode = self._pad_across_processes(inputs_decode)\n inputs_decode = self._nested_gather(inputs_decode)\n inputs_host = (\n inputs_decode\n if inputs_host is None\n else nested_concat(inputs_host, inputs_decode,\n padding_index=-100)\n )\n if logits is not None:\n logits = self._pad_across_processes(logits)\n logits = self._nested_gather(logits)\n if self.preprocess_logits_for_metrics is not None:\n logits = self.preprocess_logits_for_metrics(logits, labels)\n preds_host = logits if preds_host is None else nested_concat(\n preds_host, logits, padding_index=-100)\n self.control = self.callback_handler.on_prediction_step(args,\n self.state,\n self.control)\n\n # Gather all tensors and put them back on the CPU if we have done enough accumulation steps.\n if args.eval_accumulation_steps is not None and (\n step + 1) % args.eval_accumulation_steps == 0:\n if losses_host is not None:\n losses = nested_numpify(losses_host)\n all_losses = losses if all_losses is None else np.concatenate(\n (all_losses, losses), axis=0)\n if preds_host is not None:\n logits = nested_numpify(preds_host)\n all_preds = logits if all_preds is None else nested_concat(\n all_preds, logits, padding_index=-100)\n if inputs_host is not None:\n inputs_decode = nested_numpify(inputs_host)\n all_inputs = (\n inputs_decode\n if all_inputs is None\n else nested_concat(all_inputs, inputs_decode,\n padding_index=-100)\n )\n if labels_host is not None:\n labels = nested_numpify(labels_host)\n all_labels = (\n labels if all_labels is None else nested_concat(\n all_labels, labels, padding_index=-100)\n )\n\n # Set back to None to begin a new accumulation\n losses_host, preds_host, inputs_host, labels_host = None, None, None, None\n\n if args.past_index and hasattr(self, \"_past\"):\n # Clean the state at the end of the evaluation loop\n delattr(self, \"_past\")\n\n # Gather all remaining tensors and put them back on the CPU\n if losses_host is not None:\n losses = nested_numpify(losses_host)\n all_losses = losses if all_losses is None else np.concatenate(\n (all_losses, losses), axis=0)\n if preds_host is not None:\n logits = nested_numpify(preds_host)\n all_preds = logits if all_preds is None else nested_concat(\n all_preds, logits, padding_index=-100)\n if inputs_host is not None:\n inputs_decode = nested_numpify(inputs_host)\n all_inputs = (\n inputs_decode if all_inputs is None else nested_concat(\n all_inputs, inputs_decode, padding_index=-100)\n )\n if labels_host is not None:\n labels = nested_numpify(labels_host)\n all_labels = labels if all_labels is None else nested_concat(\n all_labels, labels, padding_index=-100)\n\n # Number of samples\n if has_length(eval_dataset):\n num_samples = len(eval_dataset)\n # The instance check is weird and does not actually check for the type, but whether the dataset has the right\n # methods. Therefore we need to make sure it also has the attribute.\n elif isinstance(eval_dataset, IterableDatasetShard) and getattr(\n eval_dataset, \"num_examples\", 0) > 0:\n num_samples = eval_dataset.num_examples\n else:\n if has_length(dataloader):\n num_samples = self.num_examples(dataloader)\n else: # both len(dataloader.dataset) and len(dataloader) fail\n num_samples = observed_num_examples\n if num_samples == 0 and observed_num_examples > 0:\n num_samples = observed_num_examples\n\n # Number of losses has been rounded to a multiple of batch_size and in a distributed training, the number of\n # samplers has been rounded to a multiple of batch_size, so we truncate.\n if all_losses is not None:\n all_losses = all_losses[:num_samples]\n if all_preds is not None:\n all_preds = nested_truncate(all_preds, num_samples)\n if all_labels is not None:\n all_labels = nested_truncate(all_labels, num_samples)\n if all_inputs is not None:\n all_inputs = nested_truncate(all_inputs, num_samples)\n\n # Metrics!\n doc_labels = eval_dataset.doc_labels\n eval_samples = eval_dataset.samples\n split = eval_dataset.split\n if self.args.joint_train:\n doc_id_to_name = eval_dataset.id_to_name\n else:\n doc_id_to_name = None\n # allow_singletons = eval_dataset.data_args.allow_singletons\n assert all_preds is not None\n metrics = self.my_compute_metrics(doc_labels, all_preds,\n eval_samples, split,\n doc_id_to_name)\n # if all_preds is not None and doc_labels is not None:\n # metrics = self.get_eval_metrics(doc_labels, all_preds,\n # eval_samples, split)\n # else:\n # metrics = {}\n\n # To be JSON-serializable, we need to remove numpy types or zero-d tensors\n metrics = denumpify_detensorize(metrics)\n\n if all_losses is not None:\n metrics[f\"{metric_key_prefix}_loss\"] = all_losses.mean().item()\n\n # Prefix all keys with metric_key_prefix + '_'\n for key in list(metrics.keys()):\n if not key.startswith(f\"{metric_key_prefix}_\"):\n metrics[f\"{metric_key_prefix}_{key}\"] = metrics.pop(key)\n if self.args.gradient_checkpointing:\n self.model.config.use_cache = False\n return EvalLoopOutput(predictions=all_preds, label_ids=all_labels,\n metrics=metrics, num_samples=num_samples)\n\n def prediction_step(\n self,\n model: nn.Module,\n inputs: Dict[str, Union[torch.Tensor, Any]],\n prediction_loss_only: bool,\n ignore_keys: Optional[List[str]] = None,\n ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:\n \"\"\"\n Perform an evaluation step on `model` using `inputs`.\n\n Subclass and override to inject custom behavior.\n\n Args:\n model (`nn.Module`):\n The model to evaluate.\n inputs (`Dict[str, Union[torch.Tensor, Any]]`):\n The inputs and targets of the model.\n\n The dictionary will be unpacked before being fed to the model. Most models expect the targets under the\n argument `labels`. Check your model's documentation for all accepted arguments.\n prediction_loss_only (`bool`):\n Whether or not to return the loss only.\n ignore_keys:\n list of ignore keys\n\n Return:\n Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]: A tuple with the loss, logits and\n labels (each being optional).\n \"\"\"\n\n if not self.args.predict_with_generate or prediction_loss_only:\n return super().prediction_step(\n model, inputs, prediction_loss_only=prediction_loss_only,\n ignore_keys=ignore_keys\n )\n\n has_labels = \"labels\" in inputs\n inputs = self._prepare_inputs(inputs)\n\n # XXX: adapt synced_gpus for fairscale as well\n gen_kwargs = self._gen_kwargs.copy()\n gen_kwargs[\"max_length\"] = (\n gen_kwargs[\"max_length\"] if gen_kwargs.get(\n \"max_length\") is not None else self.model.config.max_length\n )\n gen_kwargs[\"num_beams\"] = (\n gen_kwargs[\"num_beams\"] if gen_kwargs.get(\n \"num_beams\") is not None else self.model.config.num_beams\n )\n default_synced_gpus = True if is_deepspeed_zero3_enabled() else False\n gen_kwargs[\"synced_gpus\"] = (\n gen_kwargs[\"synced_gpus\"] if gen_kwargs.get(\n \"synced_gpus\") is not None else default_synced_gpus\n )\n\n if \"attention_mask\" in inputs:\n gen_kwargs[\"attention_mask\"] = inputs.get(\"attention_mask\", None)\n if \"global_attention_mask\" in inputs:\n gen_kwargs[\"global_attention_mask\"] = inputs.get(\n \"global_attention_mask\", None)\n\n # prepare generation inputs\n # some encoder-decoder models can have varying encoder's and thus\n # varying model input names\n if hasattr(self.model,\n \"encoder\") and self.model.encoder.main_input_name != self.model.main_input_name:\n generation_inputs = inputs[self.model.encoder.main_input_name]\n else:\n generation_inputs = inputs[self.model.main_input_name]\n # add our logits_processor here\n if self.args.seq2seq_type != 'short_seq':\n if self.args.action_type == 'non_integer':\n special_ids = MENTION_END_NON_INT_SPECIAL_IDS if \\\n self.args.add_mention_end else NON_INT_SPECIAL_IDS\n gen_kwargs['logits_processor'] = LogitsProcessorList(\n [NonIntProcessor(generation_inputs, special_ids,\n self.args.seq2seq_type,\n self.args.add_mention_end)])\n else:\n gen_kwargs['logits_processor'] = LogitsProcessorList(\n [IntProcessor(generation_inputs, SPECIAL_IDS,\n self.args.seq2seq_type)])\n elif self.args.mark_sentence:\n gen_kwargs['logits_processor'] = LogitsProcessorList(\n [ShortSeqProcessor(generation_inputs, MARK_SPECIAL_IDS)])\n # if self.args.use_peft:\n # gen_kwargs[\"input_ids\"] = generation_inputs\n # gen_kwargs[\"use_cache\"] = True\n # generated_tokens = self.model.generate(\n # **gen_kwargs,\n # )\n # else:\n generated_tokens = self.model.generate(\n generation_inputs,\n **gen_kwargs,\n )\n # in case the batch is shorter than max length, the output should be padded\n if generated_tokens.shape[-1] < gen_kwargs[\"max_length\"]:\n generated_tokens = self._pad_tensors_to_max_len(generated_tokens,\n gen_kwargs[\n \"max_length\"])\n\n with torch.no_grad():\n with self.compute_loss_context_manager():\n outputs = model(**inputs)\n if has_labels:\n if self.label_smoother is not None:\n loss = self.label_smoother(outputs,\n inputs[\"labels\"]).mean().detach()\n else:\n loss = (outputs[\"loss\"] if isinstance(outputs, dict) else\n outputs[0]).mean().detach()\n else:\n loss = None\n\n if self.args.prediction_loss_only:\n return (loss, None, None)\n\n if has_labels:\n labels = inputs[\"labels\"]\n if labels.shape[-1] < gen_kwargs[\"max_length\"]:\n labels = self._pad_tensors_to_max_len(labels,\n gen_kwargs[\"max_length\"])\n else:\n labels = None\n\n return (loss, generated_tokens, labels)" }, { "identifier": "ConstrainedDataCollator", "path": "data.py", "snippet": "class ConstrainedDataCollator:\n \"\"\"\n Data collator that will dynamically pad the inputs received, as well as the labels.\n\n Args:\n tokenizer ([`PreTrainedTokenizer`] or [`PreTrainedTokenizerFast`]):\n The tokenizer used for encoding the data.\n model ([`PreTrainedModel`]):\n The model that is being trained. If set and has the *prepare_decoder_input_ids_from_labels*, use it to\n prepare the *decoder_input_ids*\n\n This is useful when using *label_smoothing* to avoid calculating loss twice.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `True`):\n Select a strategy to pad the returned sequences (according to the model's padding side and padding index)\n among:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n is provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n max_length (`int`, *optional*):\n Maximum length of the returned list and optionally padding length (see above).\n pad_to_multiple_of (`int`, *optional*):\n If set will pad the sequence to a multiple of the provided value.\n\n This is especially useful to enable the use of Tensor Cores on NVIDIA hardware with compute capability >=\n 7.5 (Volta).\n label_pad_token_id (`int`, *optional*, defaults to -100):\n The id to use when padding the labels (-100 will be automatically ignored by PyTorch loss functions).\n return_tensors (`str`):\n The type of Tensor to return. Allowable values are \"np\", \"pt\" and \"tf\".\n \"\"\"\n\n tokenizer: PreTrainedTokenizerBase\n model: Optional[Any] = None\n padding: Union[bool, str, PaddingStrategy] = True\n max_length: Optional[int] = None\n pad_to_multiple_of: Optional[int] = None\n label_pad_token_id: int = -100\n return_tensors: str = \"pt\"\n\n def __call__(self, features, return_tensors=None):\n import numpy as np\n\n if return_tensors is None:\n return_tensors = self.return_tensors\n labels = [feature[\"labels\"] for\n feature in features] if \"labels\" in features[\n 0].keys() else None\n decoder_labels = [feature[\"decoder_labels\"] for\n feature in features] if \"decoder_labels\" in features[\n 0].keys() else None\n # We have to pad the labels before calling `tokenizer.pad` as this method won't pad them and needs them of the\n # same length to return tensors.\n if labels is not None:\n assert decoder_labels is not None\n max_label_length = max(len(l) for l in labels)\n if self.pad_to_multiple_of is not None:\n max_label_length = (\n (max_label_length + self.pad_to_multiple_of - 1)\n // self.pad_to_multiple_of\n * self.pad_to_multiple_of\n )\n\n padding_side = self.tokenizer.padding_side\n for feature in features:\n remainder = [self.label_pad_token_id] * (\n max_label_length - len(feature[\"labels\"]))\n if isinstance(feature[\"labels\"], list):\n feature[\"labels\"] = (\n feature[\n \"labels\"] + remainder if padding_side == \"right\"\n else remainder + feature[\"labels\"]\n )\n feature[\"decoder_labels\"] = (\n feature[\n \"decoder_labels\"] + remainder if padding_side ==\n \"right\"\n else remainder + feature[\"decoder_labels\"]\n )\n elif padding_side == \"right\":\n feature[\"labels\"] = np.concatenate(\n [feature[\"labels\"], remainder]).astype(np.int64)\n feature[\"decoder_labels\"] = np.concatenate(\n [feature[\"decoder_labels\"], remainder]).astype(np.int64)\n else:\n feature[\"labels\"] = np.concatenate(\n [remainder, feature[\"labels\"]]).astype(np.int64)\n feature[\"decoder_labels\"] = np.concatenate(\n [remainder, feature[\"decoder_labels\"]]).astype(np.int64)\n\n features = self.tokenizer.pad(\n features,\n padding=self.padding,\n max_length=self.max_length,\n pad_to_multiple_of=self.pad_to_multiple_of,\n return_tensors=return_tensors,\n )\n\n # prepare decoder_input_ids\n if (\n labels is not None\n and self.model is not None\n and hasattr(self.model, \"prepare_decoder_input_ids_from_labels\")\n ):\n decoder_input_ids = self.model.prepare_decoder_input_ids_from_labels(\n labels=features[\"decoder_labels\"])\n features[\"decoder_input_ids\"] = decoder_input_ids\n if self.model.is_input_feed:\n decoder_input_actions = \\\n self.model.prepare_decoder_input_ids_from_labels(\n labels=features[\"labels\"])\n features[\"decoder_input_actions\"] = decoder_input_actions\n del features[\"decoder_labels\"]\n return features" }, { "identifier": "ConstrainedT5", "path": "model.py", "snippet": "class ConstrainedT5(T5ForConditionalGeneration):\n\n def __init__(self, config: T5Config, special_ids: Dict,\n seq2seq_type: str, action_type: str,\n add_mention_end: bool):\n super().__init__(config)\n self.mention_start = special_ids['mention_start']\n self.mention_end = special_ids.get('mention_end',None)\n self.eos_id = special_ids['eos']\n self.action_type = action_type\n self.add_mention_end = add_mention_end\n self.cluster_ids = None\n self.copy_id = special_ids['copy']\n self.seq2seq_type = seq2seq_type\n if action_type == 'integer':\n self.sep = special_ids['sep']\n self.ent_ids = special_ids['integers'] + [\n special_ids['mention_end']]\n self.specials = [self.mention_start, self.sep,\n self.copy_id] + self.ent_ids\n # self.seq2seq_type = seq2seq_type\n else:\n self.cluster_new = special_ids['cluster_new']\n self.cluster_ids = special_ids['cluster_ids']\n self.eos_id = special_ids['eos']\n if self.add_mention_end:\n self.specials = [self.mention_start,\n self.mention_end,\n self.cluster_new,\n self.copy_id] + self.cluster_ids\n else:\n self.specials = [self.mention_start,\n self.cluster_new,\n self.copy_id] + self.cluster_ids\n if self.seq2seq_type == 'tagging':\n self.specials.append(self.eos_id)\n self.is_input_feed = (self.seq2seq_type == \"input_feed\")\n\n @add_start_docstrings_to_model_forward(T5_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=Seq2SeqLMOutput,\n config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n decoder_input_ids: Optional[torch.LongTensor] = None,\n decoder_attention_mask: Optional[torch.BoolTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n decoder_head_mask: Optional[torch.FloatTensor] = None,\n cross_attn_head_mask: Optional[torch.Tensor] = None,\n encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n decoder_inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n decoder_input_actions: Optional[torch.LongTensor] = None,\n full_decoder_input_ids: Optional[torch.LongTensor] = None\n ) -> Union[Tuple[torch.FloatTensor], Seq2SeqLMOutput]:\n r\"\"\"\n labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):\n Labels for computing the sequence classification/regression loss. Indices should be in `[-100, 0, ...,\n config.vocab_size - 1]`. All labels set to `-100` are ignored (masked), the loss is only computed for\n labels in `[0, ..., config.vocab_size]`\n\n Returns:\n\n \"\"\"\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n # FutureWarning: head_mask was separated into two input args - head_mask, decoder_head_mask\n if head_mask is not None and decoder_head_mask is None:\n if self.config.num_layers == self.config.num_decoder_layers:\n warnings.warn(HEAD_MASK_WARNING_MSG, FutureWarning)\n decoder_head_mask = head_mask\n\n # Encode if needed (training, first prediction pass)\n if encoder_outputs is None:\n # Convert encoder inputs in embeddings if needed\n encoder_outputs = self.encoder(\n input_ids=input_ids,\n attention_mask=attention_mask,\n inputs_embeds=inputs_embeds,\n head_mask=head_mask,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n elif return_dict and not isinstance(encoder_outputs, BaseModelOutput):\n encoder_outputs = BaseModelOutput(\n last_hidden_state=encoder_outputs[0],\n hidden_states=encoder_outputs[1] if len(\n encoder_outputs) > 1 else None,\n attentions=encoder_outputs[2] if len(\n encoder_outputs) > 2 else None,\n )\n\n hidden_states = encoder_outputs[0]\n\n if self.model_parallel:\n torch.cuda.set_device(self.decoder.first_device)\n\n if labels is not None and decoder_input_ids is None and decoder_inputs_embeds is None:\n # get decoder inputs from shifting lm labels to the right\n decoder_input_ids = self._shift_right(labels)\n # Set device for model parallelism\n if self.is_input_feed and not self.training and decoder_input_actions is None:\n decoder_input_actions = self.input_to_actions(\n full_decoder_input_ids)\n if self.model_parallel:\n torch.cuda.set_device(self.decoder.first_device)\n hidden_states = hidden_states.to(self.decoder.first_device)\n if decoder_input_ids is not None:\n decoder_input_ids = decoder_input_ids.to(\n self.decoder.first_device)\n if attention_mask is not None:\n attention_mask = attention_mask.to(self.decoder.first_device)\n if decoder_attention_mask is not None:\n decoder_attention_mask = decoder_attention_mask.to(\n self.decoder.first_device)\n if self.is_input_feed and decoder_input_actions is \\\n not None:\n decoder_input_actions = decoder_input_actions.to(\n self.decoder.first_device\n )\n if self.is_input_feed:\n decoder_token_embeds = self.decoder.embed_tokens(decoder_input_ids)\n if not self.training and past_key_values is not None:\n decoder_action_embeds = self.decoder.embed_tokens(\n decoder_input_actions[:, -1:])\n else:\n decoder_action_embeds = self.decoder.embed_tokens(\n decoder_input_actions)\n decoder_inputs_embeds = decoder_token_embeds / 2 + decoder_action_embeds / 2\n # Decode\n decoder_outputs = self.decoder(\n input_ids=decoder_input_ids if not self.is_input_feed else None,\n attention_mask=decoder_attention_mask,\n inputs_embeds=decoder_inputs_embeds,\n past_key_values=past_key_values,\n encoder_hidden_states=hidden_states,\n encoder_attention_mask=attention_mask,\n head_mask=decoder_head_mask,\n cross_attn_head_mask=cross_attn_head_mask,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n sequence_output = decoder_outputs[0]\n\n # Set device for model parallelism\n if self.model_parallel:\n torch.cuda.set_device(self.encoder.first_device)\n self.lm_head = self.lm_head.to(self.encoder.first_device)\n sequence_output = sequence_output.to(self.lm_head.weight.device)\n\n if self.config.tie_word_embeddings:\n # Rescale output before projecting on vocab\n # See https://github.com/tensorflow/mesh/blob/fa19d69eafc9a482aff0b59ddd96b025c0cb207d/mesh_tensorflow/transformer/transformer.py#L586\n sequence_output = sequence_output * (self.model_dim ** -0.5)\n\n lm_logits = self.lm_head(sequence_output)\n masks = torch.ones_like(lm_logits,\n dtype=torch.bool)\n masks[:, :, self.specials] = False\n lm_logits.masked_fill_(masks, -float('inf'))\n\n loss = None\n if labels is not None:\n # construct constrained mask here\n\n loss_fct = CrossEntropyLoss(ignore_index=-100)\n loss = loss_fct(lm_logits.view(-1, lm_logits.size(\n -1)), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + decoder_outputs[1:] + encoder_outputs\n return ((loss,) + output) if loss is not None else output\n\n return Seq2SeqLMOutput(\n loss=loss,\n logits=lm_logits,\n past_key_values=decoder_outputs.past_key_values,\n decoder_hidden_states=decoder_outputs.hidden_states,\n decoder_attentions=decoder_outputs.attentions,\n cross_attentions=decoder_outputs.cross_attentions,\n encoder_last_hidden_state=encoder_outputs.last_hidden_state,\n encoder_hidden_states=encoder_outputs.hidden_states,\n encoder_attentions=encoder_outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self,\n input_ids,\n past=None,\n attention_mask=None,\n head_mask=None,\n decoder_head_mask=None,\n cross_attn_head_mask=None,\n use_cache=None,\n encoder_outputs=None,\n **kwargs\n ):\n\n # cut decoder_input_ids if past is used\n if past is not None:\n cut_input_ids = input_ids[:, -1:]\n else:\n cut_input_ids = input_ids\n\n return {\n \"decoder_input_ids\": cut_input_ids,\n \"past_key_values\": past,\n \"encoder_outputs\": encoder_outputs,\n \"attention_mask\": attention_mask,\n \"head_mask\": head_mask,\n \"decoder_head_mask\": decoder_head_mask,\n \"cross_attn_head_mask\": cross_attn_head_mask,\n \"use_cache\": use_cache,\n \"full_decoder_input_ids\": input_ids\n }\n\n def input_to_actions(self, input_ids: torch.LongTensor):\n # input_ids : B x L\n input_actions = deepcopy(input_ids)\n if self.action_type == 'integer':\n is_sep = (input_ids == self.sep)\n is_end = (input_ids == self.mention_end)\n is_start = (input_ids == self.mention_start)\n is_ent = (is_sep.cumsum(-1) - is_end.cumsum(-1)).bool()\n is_copy = ((~is_start) & (~is_ent) & (~is_end))\n else:\n cluster_ids = self.cluster_ids.to(input_ids.device)\n is_not_cid = torch.isin(input_ids, cluster_ids, invert=True)\n is_not_start = (input_ids != self.mention_start)\n if self.add_mention_end:\n is_not_end = (input_ids != self.mention_end)\n is_copy = (is_not_start & is_not_end & is_not_cid)\n else:\n is_copy = (is_not_start & is_not_cid)\n input_actions[:, 1:][is_copy[:, 1:]] = self.copy_id\n return input_actions" } ]
import logging import os import sys from transformers import HfArgumentParser, set_seed from transformers import AutoModelForSeq2SeqLM, \ DataCollatorForSeq2Seq, AutoConfig, AutoTokenizer from transformers.integrations import TensorBoardCallback from arguments import DataArguments, ModelArguments, CorefTrainingArguments \ as TrainingArguments from data import CorefDataset, JointDataset from constants import SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, \ COPY, CLUSTER_NEW, CLUSTERS, SENTENCE_START, SENTENCE_END, SPECIAL_IDS, \ NON_INT_SPECIAL_IDS, MARK_SPECIAL_IDS, MENTION_END_NON_INT_SPECIAL_IDS, \ MENTION_ENDS from trainer import CorefTrainer from data import ConstrainedDataCollator from model import ConstrainedT5
20,328
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY]) elif training_args.action_type == "non_integer": if training_args.add_mention_end: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY, CLUSTER_NEW] +
logger = logging.getLogger(__name__) logger.addHandler(logging.StreamHandler(sys.stdout)) def main(): parser = HfArgumentParser( (ModelArguments, DataArguments, TrainingArguments)) if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): model_args, data_args, training_args = parser.parse_json_file( json_file=os.path.abspath(sys.argv[1])) else: model_args, data_args, training_args = parser.parse_args_into_dataclasses() model_args: ModelArguments data_args: DataArguments training_args: TrainingArguments if ( os.path.exists(training_args.output_dir) and os.listdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir ): raise ValueError( f"Output directory ({training_args.output_dir}) already exists and is not empty. Use --overwrite_output_dir to overcome." ) # Setup logging logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN, ) logger.warning( "Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, fp16 training: %s, bf16 training: %s", training_args.local_rank, training_args.device, training_args.n_gpu, bool(training_args.local_rank != -1), training_args.fp16, training_args.bf16, ) logger.info("Training/evaluation parameters %s", training_args) logger.info("MODEL parameters %s", model_args) logger.info("Data arguments %s", data_args) set_seed(training_args.seed) tokenizer = AutoTokenizer.from_pretrained(model_args.model_name_or_path) if training_args.action_type == "integer": num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY]) elif training_args.action_type == "non_integer": if training_args.add_mention_end: num_new_tokens = tokenizer.add_tokens([SPEAKER_START, SPEAKER_END, MENTION_START, MENTION_END, COPY, CLUSTER_NEW] +
CLUSTERS)
11
2023-10-17 17:39:16+00:00
24k
giulio98/functional-diffusion-processes
src/functional_diffusion_processes/trainers/trainer.py
[ { "identifier": "AudioDataset", "path": "src/functional_diffusion_processes/datasets/audio_dataset.py", "snippet": "class AudioDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for defining audio datasets.\n\n This class serves as the foundation for defining datasets containing audio data.\n It includes methods for preprocessing, resizing, and normalizing audio data.\n Subclasses may override these methods to implement dataset-specific processing and resizing logic.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initialize an AudioDataset instance.\n\n Args:\n data_config (DictConfig): Configuration for loading the dataset, including paths, audio properties, etc.\n split (str): Specifies which split of the dataset to load (e.g., 'train', 'validation', 'test').\n evaluation (bool, optional): Indicates whether the dataset is for evaluation purposes. Defaults to False.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def normalize_audio(audio_np: np.ndarray, sample_rate: int) -> np.ndarray:\n \"\"\"Normalize the amplitude of the audio data to a standard range.\n\n This method utilizes PyDub's effects module to perform audio normalization.\n\n Args:\n audio_np (np.ndarray): Audio data represented as a NumPy array.\n sample_rate (int): The sample rate of the audio data.\n\n Returns:\n np.ndarray: The normalized audio data as a NumPy array.\n \"\"\"\n # Convert numpy array to AudioSegment\n audio_segment = AudioSegment(audio_np.tobytes(), frame_rate=int(sample_rate), sample_width=2, channels=1)\n\n # Normalize with PyDub\n normalized_audio_segment = effects.normalize(audio_segment)\n\n # Convert back to numpy\n normalized_audio_np = np.array(normalized_audio_segment.get_array_of_samples())\n\n return normalized_audio_np\n\n def _resize_op(self, audio: tf.Tensor, size: int) -> tf.Tensor:\n \"\"\"Resize the input audio to a specified size and normalize its amplitude to the range [0, 1].\n\n If the audio length is less than the specified size, zero padding is applied to reach the desired size.\n If the audio length is greater, it is truncated to the specified size.\n\n Args:\n audio (tf.Tensor): Input audio data as a TensorFlow tensor.\n size (int): The target size for the audio data.\n\n Returns:\n tf.Tensor: The resized and normalized audio data as a TensorFlow tensor.\n \"\"\"\n # Normalize dataset\n pylogger.info(\"Normalizing audio...\")\n audio = tf.cast(audio, dtype=tf.int16)\n # Calculate current length of the audio\n pylogger.info(\"Resizing audio to size {}...\".format(size))\n audio_length = tf.shape(audio)[0]\n audio = tf.cond(\n audio_length < size,\n lambda: tf.concat([audio, tf.zeros(size - audio_length, dtype=audio.dtype)], axis=0),\n lambda: audio[:size],\n )\n audio_np = tf.numpy_function(self.normalize_audio, [audio, self.data_config.audio_sample_rate], tf.int16)\n audio = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n audio = tf.cast(audio, dtype=tf.float32)\n pylogger.info(\"Converting audio to range [-1, 1]...\")\n max_intensity = self.data_config.audio_max_intensity\n audio = audio / max_intensity\n return audio\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocess the input audio data.\n\n This method resizes the audio data to a specified size based on the dataset configuration and normalizes the amplitude to the range [-1, +1].\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input audio data and any associated metadata.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed audio data and any associated metadata.\n \"\"\"\n pylogger.info(\"Preprocessing audios for split {}...\".format(self.split))\n audio = self._resize_op(\n audio=d[\"audio\"], size=int(self.data_config.audio_sample_rate * self.data_config.audio_max_duration)\n )\n audio = tf.reshape(\n tensor=audio,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Audio reshaped to shape {}...\".format(audio.shape))\n return dict(data=audio, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Postprocess the output audio data.\n\n This method applies the inverse of the preprocessing steps to revert the audio data to its original form.\n\n Args:\n batch_data (Any): A batch of audio data to postprocess.\n inverse_scaler (Callable): A function that applies the inverse of the preprocessing steps.\n\n Returns:\n Any: A batch of postprocessed audio data.\n \"\"\"\n max_intensity = self.data_config.audio_max_intensity\n batch_audio = inverse_scaler(batch_data)\n batch_audio = batch_audio * max_intensity\n batch_post_processed = tf.cast(batch_audio, tf.int16)\n audio_np = tf.numpy_function(\n self.normalize_audio, [batch_post_processed, self.data_config.audio_sample_rate], tf.int16\n )\n batch_post_processed = tf.convert_to_tensor(audio_np, dtype=tf.int16)\n return batch_post_processed" }, { "identifier": "ImageDataset", "path": "src/functional_diffusion_processes/datasets/image_dataset.py", "snippet": "class ImageDataset(BaseDataset, abc.ABC):\n \"\"\"Base class for handling image datasets.\n\n Provides a structured way to load, preprocess, and post-process image data.\n This class can be extended to handle specific image datasets as required.\n\n Attributes:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Initializes the ImageDataset object with dataset configurations.\n\n Args:\n data_config (DictConfig): Configuration settings for loading the dataset.\n split (str): Specifies the dataset split to load ('train', 'val', 'test', etc.).\n evaluation (bool): Indicates if the dataset is used for evaluation.\n \"\"\"\n super().__init__(data_config, split, evaluation)\n\n @staticmethod\n def _resize_op(image: Any, size: int) -> Any:\n \"\"\"Resizes the input image to the specified size and normalizes its values to the range [0,1].\n\n Args:\n image (Any): A tensor representing the input image.\n size (int): The target size for each dimension of the output image.\n\n Returns:\n Any: A tensor representing the resized and normalized image.\n \"\"\"\n # convert to range [0,1]\n pylogger.info(\"Converting image to range [0,1]...\")\n image = tf.image.convert_image_dtype(image=image, dtype=tf.float32)\n\n # resize to size\n pylogger.info(\"Resizing image to size {}...\".format(size))\n\n image = tf.image.resize(images=image, size=[size, size])\n\n return image\n\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Preprocesses the input data by resizing, possibly flipping, and applying uniform dequantization.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data with keys 'image' and optionally 'label'.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data, with keys 'data' and optionally 'label'.\n \"\"\"\n image = self._resize_op(image=d[\"image\"], size=self.data_config.image_width_size)\n\n pylogger.info(\"Preprocessing images for split {}...\".format(self.split))\n\n if self.data_config.random_flip and not self.evaluation:\n pylogger.info(\"Applying random flips...\")\n image = tf.image.random_flip_left_right(image=image, seed=self.data_config.seed)\n\n if self.data_config.uniform_dequantization:\n pylogger.info(\"Applying uniform dequantization...\")\n image = (\n tf.random.uniform(shape=image.shape, dtype=tf.float32, seed=self.data_config.seed) + image * 255.0\n ) / 256.0\n\n image = tf.reshape(\n tensor=image,\n shape=(-1, self.data_config.output_size),\n )\n pylogger.info(\"Image reshaped to shape {}...\".format(image.shape))\n\n return dict(data=image, label=d.get(\"label\", None))\n\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Post-processes the output data by reverting the preprocessing steps.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to invert the scaling applied to the data.\n\n Returns:\n Any: A batch of postprocessed data, arranged in a grid for visualization.\n \"\"\"\n batch_post_processed = make_grid_image(\n ndarray=process_images(images=batch_data),\n inverse_scaler=inverse_scaler,\n )\n return batch_post_processed" }, { "identifier": "BaseDataset", "path": "src/functional_diffusion_processes/datasets/base_dataset.py", "snippet": "class BaseDataset(abc.ABC):\n \"\"\"Abstract base class for defining datasets.\n\n Provides a template for loading, preprocessing, and iterating over datasets.\n It encapsulates common dataset configurations and operations while allowing for dataset-specific\n preprocessing and post-processing through abstract methods.\n\n Attributes:\n dataset_builder: A builder object for loading the dataset.\n data_config (DictConfig): Configuration parameters for the dataset.\n split (str): Specifies which split of the dataset to load, e.g., 'train', 'validation', or 'test'.\n evaluation (bool): Indicates whether the dataset is for evaluation purposes.\n dataset_options: Options for configuring the dataset pipeline.\n \"\"\"\n\n def __init__(self, data_config: DictConfig, split: str, evaluation: bool = False) -> None:\n \"\"\"Abstract base class for defining datasets.\n\n This class provides a skeleton for defining datasets, with abstract methods for\n preprocessing data, generating batches of data, and resizing images. Subclasses\n must implement these methods to define their specific datasets.\n\n Args:\n data_config (DictConfig): A dictionary-like object containing the configuration for\n loading the dataset.\n\n split (str): A string specifying which split of the dataset to load.\n\n evaluation (bool): A boolean specifying whether the dataset is for evaluation purposes.\n \"\"\"\n self.dataset_builder = None\n self.data_config = data_config\n self.split = split\n self.evaluation = evaluation\n self.dataset_options = tf.data.Options()\n self.dataset_options.experimental_optimization.map_parallelization = True\n self.dataset_options.experimental_threading.private_threadpool_size = 48\n self.dataset_options.experimental_threading.max_intra_op_parallelism = 1\n\n @abc.abstractmethod\n def preprocess_fn(self, d: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Abstract method for preprocessing input data.\n\n Subclasses should override this method to implement dataset-specific preprocessing.\n\n Args:\n d (Dict[str, Any]): A dictionary containing the input data.\n\n Returns:\n Dict[str, Any]: A dictionary containing the preprocessed data.\n \"\"\"\n raise NotImplementedError(\"Subclasses must implement preprocess_fn method.\")\n\n @abc.abstractmethod\n def postprocess_fn(self, batch_data: Any, inverse_scaler: Callable) -> Any:\n \"\"\"Abstract method for postprocessing output data.\n\n Subclasses should override this method to implement dataset-specific post-processing.\n\n Args:\n batch_data (Any): A batch of data to postprocess.\n inverse_scaler (Callable): A function to inverse the scaling of the data.\n\n Returns:\n Any: A dictionary containing the postprocessed data.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the postprocess_fn method.\")\n\n def _generator(self) -> Iterator[Any]:\n \"\"\"Generate batches of preprocessed data.\n\n Loads the dataset, shuffles the data, applies preprocessing, and batches the data.\n Subclasses might override this method to implement dataset-specific batching logic.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n # load the dataset\n if isinstance(self.dataset_builder, tfds.core.DatasetBuilder):\n read_config = tfds.ReadConfig(options=self.dataset_options)\n if self.data_config.download:\n self.dataset_builder.download_and_prepare()\n ds = self.dataset_builder.as_dataset(\n split=self.split,\n shuffle_files=False,\n read_config=read_config,\n as_supervised=False,\n )\n else:\n ds = self.dataset_builder.with_options(options=self.dataset_options)\n\n ds = ds.shuffle(buffer_size=10000, seed=self.data_config.seed)\n\n # apply the preprocessing function to each element in the dataset\n ds = ds.map(map_func=self.preprocess_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)\n\n # determine the batch size per device\n ds = ds.batch(batch_size=self.data_config.batch_size, drop_remainder=True)\n ds = ds.batch(batch_size=jax.device_count(), drop_remainder=True)\n\n ds = ds.repeat(count=100000 if not self.evaluation else 1)\n\n return iter(ds.prefetch(buffer_size=tf.data.experimental.AUTOTUNE))\n\n def __iter__(self) -> Iterator[Any]:\n \"\"\"Return an iterator that generates batches of preprocessed data.\n\n Calls the `_generator` method to obtain an iterator for generating preprocessed data batches.\n\n Returns:\n Iterator[Any]: An iterator that generates batches of preprocessed data.\n \"\"\"\n return self._generator()\n\n def __len__(self) -> int:\n \"\"\"Return the number of examples in the dataset.\n\n Obtains the total number of examples in the specified dataset split from the dataset builder's info attribute.\n\n Returns:\n int: The number of examples in the dataset.\n \"\"\"\n return self.dataset_builder.info.splits[self.split].num_examples" }, { "identifier": "Loss", "path": "src/functional_diffusion_processes/losses/base_loss.py", "snippet": "class Loss(abc.ABC):\n \"\"\"Abstract class representing a loss function.\n\n Provides a framework for defining custom loss functions by enforcing the implementation\n of `construct_loss_fn` method in any derived classes. This class holds a reference to\n a stochastic differential equation (SDE) object which is used to calculate the weight factor for the loss.\n\n Attributes:\n sde (SDE): The stochastic differential equation instance associated with this loss.\n \"\"\"\n\n def __init__(self, sde: SDE) -> None:\n \"\"\"Initializes the Loss instance with a given SDE.\n\n Args:\n sde (SDE): An SDE instance which might be used in the loss computation.\n \"\"\"\n self.sde = sde\n\n def construct_loss_fn(self, model: Any) -> Callable:\n \"\"\"Abstract method to construct a loss function for a given model.\n\n This method should be implemented by any derived class to define the loss\n computation specific to the type of loss being implemented.\n\n Args:\n model (Any): The model for which to construct the loss function.\n\n Returns:\n Callable: A callable representing the constructed loss function.\n\n Raises:\n NotImplementedError: If the method is not implemented by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the construct_loss_fn method.\")" }, { "identifier": "FIDMetric", "path": "src/functional_diffusion_processes/metrics/fid_metric.py", "snippet": "class FIDMetric:\n \"\"\"Class for computing the Frechet Inception Distance (FID) metric.\n\n This class facilitates the computation of the FID metric, which measures the similarity between two distributions of images.\n It precomputes features for the real dataset using a specified Inception feature extractor and provides methods to compute\n and store features for generated images, and to compute the FID and Inception Score (IS).\n\n Attributes:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n generated_pools (list): List to store features of generated images.\n generated_logits (list): List to store logits of generated images.\n real_features (dict): Dictionary to store precomputed features of real dataset.\n \"\"\"\n\n def __init__(\n self,\n metric_config: DictConfig,\n feature_extractor: InceptionFeatureExtractor,\n dataset: BaseDataset,\n ) -> None:\n \"\"\"Initializes the FIDMetric class with specified configurations, feature extractor, and dataset.\n\n Args:\n metric_config (DictConfig): Configuration parameters for the FID metric.\n feature_extractor (InceptionFeatureExtractor): Inception feature extractor for computing the FID metric.\n dataset (BaseDataset): Dataset object providing real samples for FID computation.\n \"\"\"\n self.metric_config = metric_config\n self.feature_extractor = feature_extractor\n self.dataset = dataset\n self.generated_pools = []\n self.generated_logits = []\n try:\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n except FileNotFoundError:\n self._precompute_features(\n dataset_name=metric_config.dataset_name,\n save_path=metric_config.real_features_path,\n )\n self.real_features = load_dataset_stats(\n save_path=metric_config.real_features_path,\n dataset_name=metric_config.dataset_name,\n )\n\n def _precompute_features(self, dataset_name: str, save_path: str) -> None:\n \"\"\"Precomputes and saves features for the real dataset.\n\n Args:\n dataset_name (str): Name of the dataset.\n save_path (str): Path where the computed features will be saved.\n \"\"\"\n tf.io.gfile.makedirs(path=save_path)\n\n tf.io.gfile.makedirs(os.path.join(save_path, f\"{dataset_name.lower()}_clean\"))\n\n # Use the feature extractor to compute features for the real dataset\n all_pools = self.feature_extractor.extract_features(\n dataset=self.dataset, save_path=save_path, dataset_name=dataset_name\n )\n\n # Save latent represents of the Inception network to disk or Google Cloud Storage\n filename = f\"{dataset_name.lower()}_stats.npz\"\n\n if jax.host_id() == 0:\n pylogger.info(\"Saving real dataset stats to: %s\" % os.path.join(save_path, filename))\n\n with tf.io.gfile.GFile(os.path.join(save_path, filename), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=all_pools)\n f_out.write(io_buffer.getvalue())\n\n def compute_fid(self, eval_dir, num_sampling_round) -> Tuple[float, float]:\n \"\"\"Computes the FID and Inception Score (IS) for the generated and real images.\n\n Args:\n eval_dir (str): Directory path for evaluation.\n num_sampling_round (int): Number of sampling rounds.\n\n Returns:\n Tuple[float, float]: A tuple containing the FID and Inception Score.\n \"\"\"\n real_pools = self.real_features[\"pool_3\"]\n if not self.feature_extractor.inception_v3 and not self.feature_extractor.inception_v3 == \"lenet\":\n if len(self.generated_logits) == 0 or len(self.generated_pools) == 0:\n if jax.host_id() == 0:\n # Load all statistics that have been previously computed and saved for each host\n for host in range(jax.host_count()):\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n wait_message = False\n while len(stats) < num_sampling_round:\n if not wait_message:\n print(\"Waiting for statistics on host %d\" % (host,))\n wait_message = True\n stats = tf.io.gfile.glob(os.path.join(eval_dir, \"statistics_*.npz\"))\n time.sleep(10)\n\n for stat_file in stats:\n with tf.io.gfile.GFile(stat_file, \"rb\") as fin:\n stat = np.load(fin)\n\n self.generated_pools.append(stat[\"pool_3\"])\n self.generated_logits.append(stat[\"logits\"])\n\n all_logits = np.concatenate(self.generated_logits, axis=0)[: self.metric_config.num_samples]\n inception_score = tfgan.eval.classifier_score_from_logits(logits=all_logits)\n else:\n inception_score = -1\n\n all_pools = np.concatenate(self.generated_pools, axis=0)[: self.metric_config.num_samples]\n\n fid = tfgan.eval.frechet_classifier_distance_from_activations(activations1=real_pools, activations2=all_pools)\n\n return fid, inception_score\n\n def compute_and_store_generated_features(self, images: Any, sample_dir: str, round_num: int) -> None:\n \"\"\"Computes features for the generated images and stores them in a specified directory.\n\n Args:\n images (Any): Tensor representing the generated images.\n sample_dir (str): Directory where the features will be stored.\n round_num (int): Round number in the training process.\n \"\"\"\n latents = self.feature_extractor.extract_features(images)\n\n self.generated_pools.append(latents[\"pool_3\"])\n\n gc.collect()\n\n if self.feature_extractor.model_name == \"inception\" or self.feature_extractor.inception_v3:\n self.generated_logits.append(latents[\"logits\"])\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(\n io_buffer,\n pool_3=latents[\"pool_3\"],\n logits=latents[\"logits\"],\n )\n\n f_out.write(io_buffer.getvalue())\n\n elif self.feature_extractor.model_name == \"lenet\":\n with tf.io.gfile.GFile(os.path.join(sample_dir, f\"statistics_{round_num}.npz\"), \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, pool_3=latents[\"pool_3\"])\n f_out.write(io_buffer.getvalue())" }, { "identifier": "Sampler", "path": "src/functional_diffusion_processes/samplers/base_sampler.py", "snippet": "class Sampler(abc.ABC):\n \"\"\"Abstract base class for creating sampler objects.\n\n This class serves as a template for creating sampler objects which are\n designed to generate samples of a stochastic process governed by a\n specified stochastic differential equation (SDE). The process of sampling\n is carried out by employing specified predictor and corrector methods.\n\n Attributes:\n predictor (Predictor): The predictor method to be used in the sampling process.\n corrector (Corrector): The corrector method to be used in the sampling process.\n sde (SDE): The stochastic differential equation governing the process to be sampled.\n sampler_config (DictConfig): Configuration settings for the sampler.\n\n Methods:\n make_sampler(predict_fn: Callable) -> Callable:\n Abstract method to create a sampling function based on the specified predictor,\n corrector, and SDE.\n \"\"\"\n\n def __init__(self, predictor: Predictor, corrector: Corrector, sde: SDE, sampler_config: DictConfig) -> None:\n \"\"\"Initializes the Sampler object with specified predictor, corrector, SDE, and configuration.\n\n Args:\n predictor (Predictor): The predictor method for the sampler.\n corrector (Corrector): The corrector method for the sampler.\n sde (SDE): The stochastic differential equation governing the process.\n sampler_config (DictConfig): Configuration settings for the sampler.\n \"\"\"\n super().__init__()\n self.predictor = predictor\n self.corrector = corrector\n self.sampler_config = sampler_config\n self.sde = sde\n\n def make_sampler(self, predict_fn: Callable, auxiliary_fn: Union[Any, Callable]) -> Callable:\n \"\"\"Abstract method to create a sampler function.\n\n This method is intended to be overridden by derived classes to provide\n specific implementations for creating a sampler function. The sampler\n function will utilize the specified predictor and corrector methods\n along with the provided SDE to generate samples of the stochastic process.\n\n Args:\n predict_fn (Callable): The model prediction function.\n auxiliary_fn (Callable): The auxiliary prediction function for the model.\n\n Returns:\n Callable: The constructed sampling function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the make_sampler method.\")" }, { "identifier": "SDE", "path": "src/functional_diffusion_processes/sdetools/base_sde.py", "snippet": "class SDE(abc.ABC):\n \"\"\"Abstract base class for representing Stochastic Differential Equations (SDEs).\n\n This class provides a structured way to define and work with SDEs, including computing\n Fourier transforms, discretizing the equations, and defining the drift and diffusion terms.\n\n Attributes:\n sde_config (DictConfig): Configuration object containing SDE settings.\n T (float): Total time duration.\n N (int): Number of time steps.\n eps (float): Small constant for numerical stability.\n is_unidimensional (bool): Flag indicating if the SDE is unidimensional.\n \"\"\"\n\n def __init__(self, sde_config: DictConfig) -> None:\n \"\"\"Initializes the SDE with the given configuration.\n\n Args:\n sde_config (DictConfig): Configuration object containing SDE settings.\n \"\"\"\n super().__init__()\n self.sde_config = sde_config\n self.T = self.sde_config.T\n self.N = self.sde_config.N\n self.eps = self.sde_config.eps\n self.is_unidimensional = True if len(self.sde_config.shape) == 1 else False\n\n def fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.fft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.fft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n def inverse_fourier_transform(self, state: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the inverse Fourier transform of the given state.\n\n This method can handle both vectorized and non-vectorized input states.\n\n Args:\n state (jnp.ndarray): State whose inverse Fourier transform is to be computed.\n\n Returns:\n jnp.ndarray: Inverse Fourier transform of the given state.\n \"\"\"\n return (\n jnp.fft.ifft(state, norm=\"ortho\", axis=1)\n if self.is_unidimensional\n else jnp.fft.ifft2(state, norm=\"ortho\", axes=(1, 2))\n )\n\n @abc.abstractmethod\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Abstract method to compute the drift and diffusion terms of the SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the drift and diffusion terms of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the sde method.\")\n\n @abc.abstractmethod\n def marginal_prob(\n self,\n rng: PRNGKeyArray,\n x: jnp.ndarray,\n t: jnp.ndarray,\n t0: Optional[jnp.ndarray] = None,\n ) -> Tuple[Any, jnp.ndarray | Any]:\n \"\"\"Computes the marginal probability density at a given time.\n\n This is an abstract method that should be overridden by subclasses to\n compute the marginal probability density based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): State of the system.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[Any, jnp.ndarray | Any]: Marginal probability density at the given time.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the marginal_prob method.\")\n\n @abc.abstractmethod\n def diffuse(\n self, rng: PRNGKeyArray, x: jnp.ndarray, t: jnp.ndarray, t0: Optional[jnp.ndarray] = None\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Performs diffusion of the input from time t0 to time t.\n\n This is an abstract method that should be overridden by subclasses to\n implement the diffusion process based on the state and time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n x (jnp.ndarray): Input state.\n t (jnp.ndarray): Current time.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Mean of the corrupted input and the corrupted input.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the diffuse method.\")\n\n @abc.abstractmethod\n def prior_sampling(\n self, rng: PRNGKeyArray, shape: Tuple[int, ...], t0: Optional[jnp.ndarray] = None\n ) -> jnp.ndarray:\n \"\"\"Generates a sample from the prior distribution of the SDE.\n\n This is an abstract method that should be overridden by subclasses to\n implement the prior sampling process based on the shape and initial time.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the sample to be generated.\n t0 (Optional[jnp.ndarray], optional): Initial time. Defaults to None.\n\n Returns:\n jnp.ndarray: A sample from the prior distribution of the SDE.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the prior_sampling method.\")\n\n @abc.abstractmethod\n def score_fn(\n self, y_corrupted: jnp.ndarray, y_reconstructed: jnp.ndarray, t: jnp.ndarray, rng: Optional[PRNGKeyArray] = None\n ) -> jnp.ndarray:\n \"\"\"Computes the score function based on the corrupted and reconstructed states.\n\n This is an abstract method that should be overridden by subclasses to\n compute the score function based on the state and time.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n y_reconstructed (jnp.ndarray): Reconstructed state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n\n Returns:\n jnp.ndarray: The score function.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the score_fn method.\")\n\n @abc.abstractmethod\n def get_psm(self, t: jnp.ndarray) -> jnp.ndarray:\n \"\"\"Computes the Power-Special-Matrix(PSM) used as a weighting factor for the loss.\n\n This is an abstract method that should be overridden by subclasses to\n compute the state-dependent diffusion matrix based on the time.\n\n Args:\n t (jnp.ndarray): Current time.\n\n Returns:\n jnp.ndarray: The state-dependent diffusion matrix.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_psm method.\")\n\n @abc.abstractmethod\n def get_reverse_noise(self, rng: PRNGKeyArray, shape: Tuple[int, ...]) -> jnp.ndarray:\n \"\"\"Generates noise for the reverse SDE.\n\n This is an abstract method that should be overridden by subclasses to\n generate reverse noise based on the shape.\n\n Args:\n rng (PRNGKeyArray): Random number generator.\n shape (Tuple[int, ...]): Shape of the noise to be generated.\n\n Returns:\n jnp.ndarray: The reverse noise.\n\n Raises:\n NotImplementedError: If this method is not overridden by a derived class.\n \"\"\"\n raise NotImplementedError(f\"{self.__class__.__name__} must implement the get_reverse_noise method.\")\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the SDE into an iterative update rule.\n\n This method computes the discrete drift and diffusion terms based on the continuous SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Tuple containing the discrete drift and diffusion terms.\n \"\"\"\n dt = (self.T - self.eps) / self.N\n drift, diffusion = self.sde(y_corrupted, t, y_reconstructed)\n f = drift * dt\n g = diffusion * jnp.sqrt(dt)\n return f, g\n\n def reverse(self):\n \"\"\"Creates a reverse-time version of the current SDE.\n\n This method defines a nested class for the reverse-time SDE and returns an instance of it.\n\n Returns:\n ReverseSDE: An instance of the reverse-time SDE subclass.\n \"\"\"\n num_time_steps = self.N\n end_t = self.T\n sde_fn = self.sde\n discretize_fn = self.discretize\n score_fn = self.score_fn\n sde_config = self.sde_config\n\n class ReverseSDE(self.__class__, abc.ABC):\n \"\"\"Reverse Stochastic Differential Equation abstract base class.\"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize the ReverseSDE class.\n\n Inherits the properties from the original SDE class and overrides the relevant methods for the\n reverse-time SDE.\n \"\"\"\n super().__init__(sde_config)\n self.N = num_time_steps\n self.T = end_t\n self.score_fn = score_fn\n\n def sde(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Return the drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the reverse-time SDE.\n \"\"\"\n drift, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = -drift + batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n # Set the diffusion function to zero for ODEs.\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n def discretize(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Discretizes the reverse-time SDE in the form of an iterative update rule.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the discretized reverse-time SDE.\n \"\"\"\n f, g = discretize_fn(y_corrupted, t, y_corrupted)\n rev_f = -f + batch_mul(\n g**2,\n self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n * (0.5 if self.sde_config.probability_flow else 1.0),\n )\n rev_g = jnp.zeros_like(g) if self.sde_config.probability_flow else g\n return rev_f, rev_g\n\n def semi_analytic(\n self,\n y_corrupted: jnp.ndarray,\n t: jnp.ndarray,\n rng: Optional[PRNGKeyArray] = None,\n y_reconstructed: Optional[jnp.ndarray] = None,\n ) -> Tuple[jnp.ndarray, jnp.ndarray]:\n \"\"\"Computes the semi-analytic drift and diffusion terms for the reverse-time SDE.\n\n Args:\n y_corrupted (jnp.ndarray): Corrupted state of the system.\n t (jnp.ndarray): Current time.\n rng (Optional[PRNGKeyArray], optional): Random number generator. Defaults to None.\n y_reconstructed (Optional[jnp.ndarray], optional): Reconstructed state of the system. Defaults to None.\n\n Returns:\n Tuple[jnp.ndarray, jnp.ndarray]: Drift and diffusion terms for the semi-analytic reverse-time SDE.\n \"\"\"\n _, diffusion = sde_fn(y_corrupted, t, y_reconstructed)\n score = self.score_fn(y_corrupted, y_reconstructed, t, rng=rng)\n drift = batch_mul(diffusion**2, score * (0.5 if self.sde_config.probability_flow else 1.0))\n diffusion = jnp.zeros_like(diffusion) if self.sde_config.probability_flow else diffusion\n return drift, diffusion\n\n return ReverseSDE()" }, { "identifier": "filter_mask", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def filter_mask(shape, radius):\n device_num, batch_size, rows, cols, n_channels = shape\n crow, ccol = int(rows / 2), int(cols / 2)\n center = [crow, ccol]\n x, y = jnp.ogrid[:rows, :cols]\n mask_area = (x - center[0]) ** 2 + (y - center[1]) ** 2 >= radius * radius\n mask = jnp.ones_like(mask_area)\n mask = jnp.where(mask_area, 0, mask)\n mask = mask.reshape(1, 1, rows, cols, 1)\n mask = jnp.repeat(mask, device_num, axis=0)\n mask = jnp.repeat(mask, batch_size, axis=1)\n mask = jnp.repeat(mask, n_channels, axis=4)\n return mask" }, { "identifier": "make_grid_image", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def make_grid_image(ndarray: Any, inverse_scaler: Callable, padding: int = 2, pad_value: float = 0.0) -> Any:\n \"\"\"Make a grid image from a Numpy Array.\n\n Args:\n ndarray: The Numpy Array.\n inverse_scaler: The inverse scaler.\n padding: The padding.\n pad_value: The padding value.\n\n Returns:\n The grid image.\n \"\"\"\n ndarray = jnp.asarray(ndarray)\n\n if ndarray.ndim == 4 and ndarray.shape[-1] == 1: # single-channel images\n ndarray = jnp.concatenate((ndarray, ndarray, ndarray), -1)\n\n n_row = int(np.sqrt(ndarray.shape[0]))\n # make the mini-batch of images into a grid\n n_maps = ndarray.shape[0]\n x_maps = min(n_row, n_maps)\n ymaps = int(math.ceil(float(n_maps) / x_maps))\n height, width = int(ndarray.shape[1] + padding), int(ndarray.shape[2] + padding)\n num_channels = ndarray.shape[3]\n grid = np.full((height * ymaps + padding, width * x_maps + padding, num_channels), pad_value).astype(np.float32)\n k = 0\n for y in range(ymaps):\n for x in range(x_maps):\n if k >= n_maps:\n break\n grid[\n y * height + padding : (y + 1) * height,\n x * width + padding : (x + 1) * width,\n ] = ndarray[k]\n k = k + 1\n\n ndarr = inverse_scaler(grid)\n ndarr = jnp.clip(ndarr * 255, 0, 255).astype(jnp.uint8)\n return ndarr" }, { "identifier": "process_images", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def process_images(images: Any) -> Any:\n \"\"\"Reshape images to the correct shape.\n\n Args:\n images: Tensor of images to reshape.\n\n Returns:\n A tensor of images with the correct shape.\n \"\"\"\n w = np.sqrt(images.shape[2]).astype(int)\n h = np.sqrt(images.shape[2]).astype(int)\n o = images.shape[3]\n return images.reshape(-1, w, h, o)" }, { "identifier": "save_samples", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "def save_samples(round_num: int, samples: Any, file_path: str) -> None:\n \"\"\"Save samples to a file.\n\n Args:\n round_num: The round number of the evaluation.\n samples: Tensor of samples to save.\n file_path: string of the Path to the file where the samples will be saved.\n \"\"\"\n for i in range(samples.shape[0]):\n clean_path = os.path.join(file_path, f\"clean/samples_{round_num}_{i}.npy\")\n np.save(clean_path, samples[i])\n samples_path = os.path.join(file_path, f\"samples_{round_num}.npz\")\n with tf.io.gfile.GFile(samples_path, \"wb\") as f_out:\n io_buffer = io.BytesIO()\n np.savez_compressed(io_buffer, samples=samples)\n f_out.write(io_buffer.getvalue())" }, { "identifier": "to_grayscale", "path": "src/functional_diffusion_processes/utils/common.py", "snippet": "@jax.pmap\ndef to_grayscale(images):\n weights = np.array([0.2989, 0.5870, 0.1140])[None, None, None, :] # Extend dimensions\n grayscale_images = np.sum(images * weights, axis=-1)\n return grayscale_images" }, { "identifier": "get_data_inverse_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_inverse_scaler(is_centered: bool) -> Callable:\n \"\"\"Inverse data normalizer.\n\n Rescale data to original range at the end of the diffusion.\n\n Args:\n is_centered: boolean if True data will rescaled from [-1, 1] to [0, 1].\n \"\"\"\n if is_centered:\n # Rescale [-1, 1] to [0, 1]\n return lambda x: (x + 1.0) / 2.0\n else:\n return lambda x: x" }, { "identifier": "get_data_scaler", "path": "src/functional_diffusion_processes/utils/scaler.py", "snippet": "def get_data_scaler(is_centered: bool) -> Callable:\n \"\"\"Normalize data. Assume data are always in [0, 1].\n\n Args:\n is_centered: boolean if True data will be centered in [-1, 1].\n \"\"\"\n if is_centered:\n # Rescale to [-1, 1]\n return lambda x: x * 2.0 - 1.0\n else:\n return lambda x: x" }, { "identifier": "TrainState", "path": "src/functional_diffusion_processes/utils/training_state.py", "snippet": "class TrainState(train_state.TrainState):\n \"\"\"The training state for the model.\"\"\"\n\n opt_state_params: Any\n ema_params: Any\n rng: jax.random.PRNGKey" }, { "identifier": "colorizing_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def colorizing_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, gray_scale_img: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform colorizing task on a given grayscale image.\n\n Args:\n sample_fn (Callable): The sampling function used for colorization.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for colorization.\n gray_scale_img (jnp.ndarray): The grayscale image to be colorized.\n\n Returns:\n Tuple: The updated state and the colorized image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, gray_scale_img)" }, { "identifier": "construct_sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_sampling_fn(model: flax.linen.Module, sampler: Sampler) -> Callable:\n \"\"\"Construct a sampling function for generating samples from the model.\n\n Args:\n model (flax.linen.Module): The model instance from which to generate samples.\n sampler (Sampler): The sampler instance used for sampling.\n\n Returns:\n Callable: The constructed sampling function.\n \"\"\"\n predict_fn = model.make_predict_fn()\n if isinstance(model, BaseMAML):\n super_resolution_fn = model.make_super_resolution_fn()\n sample_fn = sampler.make_sampler(predict_fn, super_resolution_fn)\n else:\n sample_fn = sampler.make_sampler(predict_fn, None)\n return sample_fn" }, { "identifier": "construct_train_step", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def construct_train_step(optimizer, loss_fn) -> Callable:\n \"\"\"Construct a train step function to be used in the training loop.\n\n This function creates a training step function which, when called, performs\n a single step of training including forward pass, loss computation, and\n backward pass for gradient computation and updates.\n\n Args:\n optimizer: The optimizer instance used for updating model parameters.\n loss_fn: The loss function used for computing the loss.\n\n Returns:\n Callable: The constructed train step function.\n \"\"\"\n\n @partial(jax.pmap, axis_name=\"device\")\n def train_fn(\n rng,\n params,\n optim_params,\n step,\n batch_input,\n batch,\n ):\n grad_params, (new_rng, loss, loss_inner, batch_reconstructed, batch_corrupted, target) = loss_fn(\n rng, params, step, batch_input, batch\n )\n\n loss = jax.lax.pmean(loss, axis_name=\"device\")\n grad_params = jax.lax.pmean(grad_params, axis_name=\"device\")\n\n updates, optim_params = optimizer.update(grad_params, optim_params, params)\n\n params = optax.apply_updates(params, updates)\n params = clip_learning_rates(params)\n return new_rng, loss, loss_inner, params, optim_params, batch_reconstructed, batch_corrupted, target\n\n return train_fn" }, { "identifier": "inpainting_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def inpainting_fn(\n sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray, image: jnp.ndarray, mask: jnp.ndarray\n) -> Tuple:\n \"\"\"Perform inpainting task on a given image using a mask.\n\n Args:\n sample_fn (Callable): The sampling function used for inpainting.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for inpainting.\n image (jnp.ndarray): The image to be inpainted.\n mask (jnp.ndarray): The mask used for inpainting.\n\n Returns:\n Tuple: The updated state and the inpainted image.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params, image, mask)" }, { "identifier": "sampling_fn", "path": "src/functional_diffusion_processes/trainers/helpers.py", "snippet": "def sampling_fn(sample_fn: Callable, carry_state: Tuple, batch_input: jnp.ndarray) -> Tuple:\n \"\"\"Perform sampling task using a given sampling function.\n\n Args:\n sample_fn (Callable): The sampling function.\n carry_state (Tuple): The current state of the model.\n batch_input (jnp.ndarray): The input data for sampling.\n\n Returns:\n Tuple: The updated state after performing the sampling.\n \"\"\"\n (rng, state) = carry_state\n return sample_fn(rng, batch_input, state.ema_params)" } ]
import abc import gc import io import logging import os import flax import flax.jax_utils as flax_utils import hydra.utils import jax import numpy as np import tensorflow as tf import wandb from typing import Any, Callable, Tuple, Union from cleanfid import fid from flax import linen, traverse_util from flax.training import checkpoints from flax.training.checkpoints import restore_checkpoint from jax import numpy as jnp from omegaconf import DictConfig, OmegaConf from tqdm.auto import tqdm from wandb.sdk.lib import RunDisabled from wandb.sdk.wandb_run import Run from ..datasets import AudioDataset, ImageDataset from ..datasets.base_dataset import BaseDataset from ..losses.base_loss import Loss from ..metrics import FIDMetric from ..samplers import Sampler from ..sdetools.base_sde import SDE from ..utils.common import filter_mask, make_grid_image, process_images, save_samples, to_grayscale from ..utils.scaler import get_data_inverse_scaler, get_data_scaler from ..utils.training_state import TrainState from .helpers import colorizing_fn, construct_sampling_fn, construct_train_step, inpainting_fn, sampling_fn
14,855
): self.save_checkpoint(step, run, state) # Evaluate the model if self.training_config.sampling and (step % self.training_config.eval_freq == 0): # if step != 0: if jax.host_id() == 0: pylogger.info("Generating samples at step %d." % (step,)) _, *sample_rng = jax.random.split(rng, jax.local_device_count() + 1) _, b, g, c = batch.shape sample_rng = jnp.asarray(sample_rng) if self.training_config.sampling_type == "full": batch_sampled, batch_sampled_last, batch_sampled_all = sampling_fn( sample_fn, (sample_rng, state), p_batch_input ) elif self.training_config.sampling_type == "colorization": batch_grayscale = to_grayscale(batch) batch_grayscale = batch_grayscale.reshape(-1, b, g, 1) batch_sampled, batch_sampled_last, batch_sampled_all = colorizing_fn( sample_fn, (sample_rng, state), p_batch_input, batch_grayscale ) elif self.training_config.sampling_type == "inpainting": config_object = OmegaConf.create( { "_target_": "functional_diffusion_processes.datasets.mnist_dataset.MNISTDataset", "data_config": { "seed": 42, "batch_size": ds_train.data_config.batch_size, "image_height_size": ds_train.data_config.image_height_size, "image_width_size": ds_train.data_config.image_width_size, "output_size": 1, "random_flip": False, "uniform_dequantization": False, "data_centered": False, "data_dir": "${oc.env:DATA_ROOT}/tensorflow_datasets", "download": True, "is_mask": True, }, "split": "train", "evaluation": False, } ) ds_mask = hydra.utils.instantiate(config_object, _recursive_=False) ds_mask_iter = iter(ds_mask) batch_masked = jax.tree_map(f=lambda x: x._numpy(), tree=next(ds_mask_iter)["data"]) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, (batch * batch_masked), batch_masked ) elif self.training_config.sampling_type == "deblurring": n_rows, n_cols = ds_train.data_config.image_height_size, ds_train.data_config.image_width_size batch_masked = filter_mask(batch.reshape(-1, b, n_rows, n_cols, c).shape, radius=10) batch_freq = jnp.fft.fftshift( jnp.fft.fft2(batch.reshape(-1, b, n_rows, n_cols, c), axes=(2, 3)), axes=(2, 3), ) batch_freq = batch_freq * batch_masked batch_blurred = jnp.real(jnp.fft.ifft2(jnp.fft.ifftshift(batch_freq, axes=(2, 3)), axes=(2, 3))) batch_blurred = batch_blurred.reshape(-1, b, g, c) batch_masked = batch_masked.reshape(-1, b, g, c) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, batch_blurred, batch_masked ) if jax.host_id() == 0 and self.logging.use_wandb: if isinstance(ds_train, ImageDataset): this_sample_dir = os.path.join( self.sample_dir, "iter_{}_host_{}".format(step, jax.host_id()), ) tf.io.gfile.makedirs(this_sample_dir) # code below to show the gif of the sampled images # processed_images = [] # for n in range(batch_sampled_all.shape[1]): # batch_sampled_i = batch_sampled_all[:, n, :, :, :] # batch_sampled_i = ds_train.postprocess_fn( # batch_data=batch_sampled_i, inverse_scaler=inverse_scaler # ) # processed_images.append(np.asarray(batch_sampled_i)) # # # Log the sampled images as a GIF # imageio.mimwrite( # os.path.join(this_sample_dir, "image_sequence.gif"), # processed_images, # fps=10, # ) # gif_wandb = wandb.Image( # os.path.join(this_sample_dir, "image_sequence.gif"), # caption="Sampled_all_gif", # ) # wandb.log({"Sampled_all_gif": gif_wandb}, step=step) batch_sampled = ds_train.postprocess_fn(batch_data=batch_sampled, inverse_scaler=inverse_scaler) batch_sampled_last = ds_train.postprocess_fn( batch_data=batch_sampled_last, inverse_scaler=inverse_scaler ) batch_real = ds_train.postprocess_fn( batch_data=batch.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if not self.training_config.sampling_only: batch_target = ds_train.postprocess_fn( batch_data=target.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if isinstance(ds_train, ImageDataset): data_sampled = wandb.Image(np.asarray(batch_sampled), caption="Sampled") data_sampled_rec = wandb.Image(np.asarray(batch_sampled_last), caption="Sampled Rec") data_real = wandb.Image(np.asarray(batch_real), caption="Real") if not self.training_config.sampling_only: data_target = wandb.Image(np.asarray(batch_target), caption="Target")
# import imageio # import imageio pylogger = logging.getLogger(__name__) class Trainer(abc.ABC): """Class for training a model.""" def __init__( self, mode: str, model_name: str, training_config: DictConfig, optimizer, evaluation_config: DictConfig, trainer_logging: DictConfig, sampler: Sampler, loss_obj: Loss, ) -> None: """Initialize a Trainer instance with configurations and core components. Args: mode (str): Specifies the mode of the trainer which can be either "train" or "eval". model_name (str): The name identifier for the model. training_config (DictConfig): A configuration dictionary for training settings. optimizer: The optimizer instance used for training. evaluation_config (DictConfig): A configuration dictionary for evaluation settings. trainer_logging (DictConfig): A configuration dictionary for logging settings. sampler (Sampler): A sampler instance for sampling from the model. loss_obj (Loss): A loss object used for computing the loss during training. """ self.mode = mode self.model_name = model_name self.training_config = training_config self.optimizer = hydra.utils.instantiate(optimizer) self.evaluation_config = evaluation_config self.logging = trainer_logging self.sampler = sampler self.loss_obj = loss_obj self.checkpoint_dir = os.path.join(self.training_config.save_dir, self.training_config.checkpoint_dir) self.sample_dir = os.path.join(self.training_config.save_dir, self.training_config.sample_dir) self.eval_dir = os.path.join(self.training_config.save_dir, self.evaluation_config.eval_dir) # Create the directories for saving samples and checkpoints tf.io.gfile.makedirs(self.checkpoint_dir) tf.io.gfile.makedirs(self.sample_dir) tf.io.gfile.makedirs(self.eval_dir) tf.io.gfile.makedirs(os.path.join(self.eval_dir, "clean")) def initialize_wandb( self, dataset_config: DictConfig, sde_config: DictConfig, model_config: DictConfig ) -> Union[Run, RunDisabled, None]: """Initialize wandb if logging is enabled.""" if self.logging.use_wandb: run = wandb.init( name=os.path.basename(self.logging.wandb_init.name), project=self.logging.wandb_init.project, entity=self.logging.wandb_init.entity, save_code=self.logging.wandb_init.save_code, config={ **self.training_config, **dataset_config, **sde_config, **model_config, }, ) else: run = None return run def initialize_run(self, model, ds_train, sde): """Perform all initialization steps required for training.""" run = self.initialize_wandb(ds_train.data_config, sde.sde_config, model.model_config) scaler = get_data_scaler(is_centered=ds_train.data_config.data_centered) inverse_scaler = get_data_inverse_scaler(is_centered=ds_train.data_config.data_centered) rng = jax.random.PRNGKey(seed=self.training_config.seed) rng, step_rng = jax.random.split(rng) batch_input = model.initialize_input( (ds_train.data_config.batch_size, *sde.sde_config.shape, ds_train.data_config.output_size) ) params = jax.jit(model.initialize_model, backend="cpu")(step_rng, batch_input) flat_params = traverse_util.flatten_dict(params).values() tot_params = sum([jnp.size(p) for p in flat_params]) pylogger.info("Total number of parameters: {:.2f}M".format(tot_params / 1e6)) state = TrainState.create( apply_fn=model.apply, params=params, tx=self.optimizer, opt_state_params=self.optimizer.init(params), rng=rng, ema_params=params, ) train_step_fn = construct_train_step(self.optimizer, self.loss_obj.construct_loss_fn(model)) sample_fn = construct_sampling_fn(model, self.sampler) # Resume training when intermediate checkpoints are detected if self.training_config.resume_training: pylogger.warning("Resuming training from the latest checkpoint.") if self.logging.use_wandb and self.model_name != "local": model_file = wandb.use_artifact(self.model_name).download() state = restore_checkpoint(ckpt_dir=model_file, prefix="checkpoint_", target=state) else: state = checkpoints.restore_checkpoint(ckpt_dir=self.checkpoint_dir, target=state) return run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input def train_step( self, train_step_fn: Callable, carry_state: Tuple, batch: jnp.ndarray, batch_input: jnp.ndarray, ) -> Tuple: """Perform a single training step, updating the model parameters. Args: train_step_fn (Callable): The train step function. carry_state (Tuple): The current state of the model and optimizer. batch (jnp.ndarray): The batch of data used for training. batch_input (jnp.ndarray): The input data to the model. Returns: Tuple: The updated state after performing the training step. """ (rng, state) = carry_state ( new_rng, loss, loss_inner, new_params, new_optim_state, batch_reconstructed, batch_corrupted, target, ) = train_step_fn( rng, state.params, state.opt_state_params, state.step, batch_input, batch, ) ema_rate = self.training_config.ema_rate new_params_ema = jax.tree_map( lambda p_ema, p: p_ema * ema_rate + p * (1.0 - ema_rate), state.ema_params, new_params, ) # update the state new_state = state.replace( rng=flax.jax_utils.unreplicate(new_rng), step=state.step + 1, opt_state_params=new_optim_state, params=new_params, ema_params=new_params_ema, ) new_carry_state = (new_rng, new_state) loss = flax.jax_utils.unreplicate(loss) step = int(flax_utils.unreplicate(state.step)) # Log the training progress if jax.host_id() == 0 and step % self.training_config.log_freq == 0: pylogger.info("step: %d, training_loss: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, "loss": loss}, step=step) if loss_inner is not None: loss_inner = flax.jax_utils.unreplicate(loss_inner) for inner_step, loss in enumerate(loss_inner): pylogger.info("step: %d, training_loss_inner: %.5e" % (step, loss)) if self.logging.use_wandb: wandb.log({"step": step, f"loss inner step {inner_step}": loss}, step=step) return new_carry_state, batch_reconstructed, batch_corrupted, target def save_checkpoint(self, step, run, state): pylogger.info("Saving the model at step %d." % (step,)) # Log the evaluation progress # Save the model parameters ( params, opt_state_params, step_, ema_params, ) = flax_utils.unreplicate( ( state.params, state.opt_state_params, state.step, state.ema_params, ) ) saved_state = state.replace( step=step_, opt_state_params=opt_state_params, params=params, ema_params=ema_params, ) checkpoint_file = checkpoints.save_checkpoint( self.checkpoint_dir, saved_state, step=step_ // self.training_config.eval_freq, keep=np.inf, ) if self.logging.use_wandb: wandb_model_artifact_name = str(step_) + "_" + run.id wandb_model = wandb.Artifact(wandb_model_artifact_name, type="model") wandb_model.add_file(checkpoint_file) run.log_artifact(wandb_model) # noinspection PyProtectedMember def train(self, model: linen.Module, ds_train: BaseDataset, sde: SDE) -> None: """Train the model with optional evaluation and logging. This method encapsulates the entire training process including initialization, training loop, checkpointing, evaluation, and logging. It supports different sampling types like colorization, inpainting, super resolution, and deblurring. Args: model (linen.Module): The model to be trained. ds_train (BaseDataset): The training dataset. sde (SDE): Stochastic differential equation object, governing the dynamics for sampling. Raises: ValueError: If an unsupported dataset type is provided. Note: The method leverages the Weights & Biases (wandb) platform for logging and checkpointing, make sure it's configured properly if logging is enabled. """ run, scaler, inverse_scaler, rng, state, train_step_fn, sample_fn, batch_input = self.initialize_run( model, ds_train, sde ) # `state.step` is JAX integer on the GPU/TPU devices start_step = int(state.step) rng = state.rng # Replicate the train state on all devices ( p_params, p_opt_state_params, p_step, p_ema_params, p_batch_input, ) = flax_utils.replicate( ( state.params, state.opt_state_params, state.step, state.ema_params, batch_input, ) ) # update the TrainState with replicated parameters and optimizer state state = state.replace( params=p_params, opt_state_params=p_opt_state_params, step=p_step, ema_params=p_ema_params, ) if jax.host_id() == 0: pylogger.info("Starting training loop at step %d." % (start_step,)) rng = jax.random.fold_in(rng, jax.host_id()) assert ( self.training_config.log_freq % self.training_config.n_jitted_steps == 0 and self.training_config.eval_freq % self.training_config.n_jitted_steps == 0 ), "Missing logs or checkpoints!" ds_train_iter = iter(ds_train) with tqdm( total=self.training_config.total_steps + 1, initial=start_step, position=0, leave=True, ) as pbar: for step in range( start_step, self.training_config.total_steps + 1, self.training_config.n_jitted_steps, ): # Get the next batch of data and scale it batch = jax.tree_map(f=lambda x: scaler(x._numpy()), tree=next(ds_train_iter)["data"]) if not self.training_config.sampling_only: # Split the random number generator for the current step rng, *next_rng = jax.random.split(key=rng, num=jax.local_device_count() + 1) next_rng = jnp.asarray(next_rng) ((_, state), batch_reconstructed, batch_corrupted, target) = self.train_step( train_step_fn=train_step_fn, carry_state=(next_rng, state), batch=batch, batch_input=p_batch_input, ) if not self.training_config.sampling_only and ( (jax.host_id() == 0 and step % self.training_config.checkpoint_freq == 0 and step != 0) ): self.save_checkpoint(step, run, state) # Evaluate the model if self.training_config.sampling and (step % self.training_config.eval_freq == 0): # if step != 0: if jax.host_id() == 0: pylogger.info("Generating samples at step %d." % (step,)) _, *sample_rng = jax.random.split(rng, jax.local_device_count() + 1) _, b, g, c = batch.shape sample_rng = jnp.asarray(sample_rng) if self.training_config.sampling_type == "full": batch_sampled, batch_sampled_last, batch_sampled_all = sampling_fn( sample_fn, (sample_rng, state), p_batch_input ) elif self.training_config.sampling_type == "colorization": batch_grayscale = to_grayscale(batch) batch_grayscale = batch_grayscale.reshape(-1, b, g, 1) batch_sampled, batch_sampled_last, batch_sampled_all = colorizing_fn( sample_fn, (sample_rng, state), p_batch_input, batch_grayscale ) elif self.training_config.sampling_type == "inpainting": config_object = OmegaConf.create( { "_target_": "functional_diffusion_processes.datasets.mnist_dataset.MNISTDataset", "data_config": { "seed": 42, "batch_size": ds_train.data_config.batch_size, "image_height_size": ds_train.data_config.image_height_size, "image_width_size": ds_train.data_config.image_width_size, "output_size": 1, "random_flip": False, "uniform_dequantization": False, "data_centered": False, "data_dir": "${oc.env:DATA_ROOT}/tensorflow_datasets", "download": True, "is_mask": True, }, "split": "train", "evaluation": False, } ) ds_mask = hydra.utils.instantiate(config_object, _recursive_=False) ds_mask_iter = iter(ds_mask) batch_masked = jax.tree_map(f=lambda x: x._numpy(), tree=next(ds_mask_iter)["data"]) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, (batch * batch_masked), batch_masked ) elif self.training_config.sampling_type == "deblurring": n_rows, n_cols = ds_train.data_config.image_height_size, ds_train.data_config.image_width_size batch_masked = filter_mask(batch.reshape(-1, b, n_rows, n_cols, c).shape, radius=10) batch_freq = jnp.fft.fftshift( jnp.fft.fft2(batch.reshape(-1, b, n_rows, n_cols, c), axes=(2, 3)), axes=(2, 3), ) batch_freq = batch_freq * batch_masked batch_blurred = jnp.real(jnp.fft.ifft2(jnp.fft.ifftshift(batch_freq, axes=(2, 3)), axes=(2, 3))) batch_blurred = batch_blurred.reshape(-1, b, g, c) batch_masked = batch_masked.reshape(-1, b, g, c) batch_sampled, batch_sampled_last, batch_sampled_all = inpainting_fn( sample_fn, (sample_rng, state), p_batch_input, batch_blurred, batch_masked ) if jax.host_id() == 0 and self.logging.use_wandb: if isinstance(ds_train, ImageDataset): this_sample_dir = os.path.join( self.sample_dir, "iter_{}_host_{}".format(step, jax.host_id()), ) tf.io.gfile.makedirs(this_sample_dir) # code below to show the gif of the sampled images # processed_images = [] # for n in range(batch_sampled_all.shape[1]): # batch_sampled_i = batch_sampled_all[:, n, :, :, :] # batch_sampled_i = ds_train.postprocess_fn( # batch_data=batch_sampled_i, inverse_scaler=inverse_scaler # ) # processed_images.append(np.asarray(batch_sampled_i)) # # # Log the sampled images as a GIF # imageio.mimwrite( # os.path.join(this_sample_dir, "image_sequence.gif"), # processed_images, # fps=10, # ) # gif_wandb = wandb.Image( # os.path.join(this_sample_dir, "image_sequence.gif"), # caption="Sampled_all_gif", # ) # wandb.log({"Sampled_all_gif": gif_wandb}, step=step) batch_sampled = ds_train.postprocess_fn(batch_data=batch_sampled, inverse_scaler=inverse_scaler) batch_sampled_last = ds_train.postprocess_fn( batch_data=batch_sampled_last, inverse_scaler=inverse_scaler ) batch_real = ds_train.postprocess_fn( batch_data=batch.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if not self.training_config.sampling_only: batch_target = ds_train.postprocess_fn( batch_data=target.reshape(-1, b, g, c), inverse_scaler=inverse_scaler ) if isinstance(ds_train, ImageDataset): data_sampled = wandb.Image(np.asarray(batch_sampled), caption="Sampled") data_sampled_rec = wandb.Image(np.asarray(batch_sampled_last), caption="Sampled Rec") data_real = wandb.Image(np.asarray(batch_real), caption="Real") if not self.training_config.sampling_only: data_target = wandb.Image(np.asarray(batch_target), caption="Target")
elif isinstance(ds_train, AudioDataset):
0
2023-10-24 22:01:35+00:00
24k
violet-sto/HN-GFN
main_mobo.py
[ { "identifier": "Dataset", "path": "dataset.py", "snippet": "class Dataset:\n\n def __init__(self, args, bpath, oracle, device):\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.train_mols = []\n self.test_mols = []\n self.all_mols = []\n self.train_mols_map = {}\n\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(device, args.proxy_repr_type, include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n if args.floatX == 'float64':\n self.mdp.floatX = torch.double\n else:\n self.mdp.floatX = torch.float\n self.mdp._cue_max_blocks = args.max_blocks\n self.max_blocks = args.max_blocks\n self.oracle = oracle\n self._device = device\n self.seen_molecules = set()\n self.stop_event = threading.Event()\n\n self.target_norm = [-8.6, 1.10] # for dockerscore\n\n self.hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives)))\n\n def load_h5(self, path, test_ratio=0.1, num_init_examples=None):\n import json\n columns = [\"smiles\", \"dockscore\",\"blockidxs\", \"slices\", \"jbonds\", \"stems\"]\n store = pd.HDFStore(path, 'r')\n df = store.select('df')\n # Pandas has problem with calculating some stuff on float16\n df.dockscore = df.dockscore.astype(\"float64\")\n for cl_mame in columns[2:]:\n df.loc[:, cl_mame] = df[cl_mame].apply(json.loads)\n\n test_idxs = self.test_split_rng.choice(\n len(df), int(test_ratio * len(df)), replace=False)\n\n split_bool = np.zeros(len(df), dtype=np.bool)\n split_bool[test_idxs] = True\n self.scores = []\n self.smis = []\n for i in tqdm(range(len(df))):\n m = BlockMoleculeDataExtended()\n for c in range(1, len(columns)):\n setattr(m, columns[c], df.iloc[i, c - 1])\n m.blocks = [self.mdp.block_mols[i] for i in m.blockidxs]\n if len(m.blocks) > self.max_blocks:\n continue\n m.numblocks = len(m.blocks)\n m.score = self.oracle.get_score([m])\n self.scores.append(m.score)\n self.smis.append(m.smiles)\n self.all_mols.append(m)\n if split_bool[i]: \n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n if len(self.train_mols)+len(self.test_mols) >= num_init_examples:\n break\n store.close()\n\n print(\"Sampling initial {} molecules from all {} molecules...\".format(\n num_init_examples, len(split_bool)))\n print(len(self.train_mols), 'train mols')\n print(len(self.test_mols), 'test mols')\n\n def r2r(self, dockscore=None, normscore=None):\n if dockscore is not None:\n normscore = 4-(min(0, dockscore) -\n self.target_norm[0])/self.target_norm[1]\n normscore = max(0.1, normscore)\n return (normscore/1) ** 1\n\n def _get(self, i, dset):\n return [(dset[i], dset[i].score)]\n\n def sample(self, n):\n eidx = np.random.randint(0, len(self.train_mols), n)\n samples = sum((self._get(i, self.train_mols) for i in eidx), [])\n\n return zip(*samples)\n\n def sample2batch(self, mb):\n s, r = mb\n s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s])\n r = torch.tensor(pd.DataFrame.from_dict(\n r).values, device=self._device).float()\n return (s, r)\n\n def iterset(self, n, mode):\n if mode == 'test':\n dset = self.test_mols\n elif mode == 'train':\n dset = self.train_mols\n\n N = len(dset)\n for i in range(int(np.ceil(N/n))):\n samples = sum((self._get(j, dset)\n for j in range(i*n, min(N, (i+1)*n))), [])\n yield self.sample2batch(zip(*samples))\n\n def add_samples(self, batch):\n picked_mols, scores, picked_smis = batch\n\n for m in picked_mols:\n if np.random.uniform() < (1/10):\n self.test_mols.append(m)\n else:\n self.train_mols.append(m)\n self.all_mols.append(m)\n \n self.scores += scores\n self.smis += [smis[-1] for smis in picked_smis]\n \n self.stop_event.clear()\n\n def compute_hypervolume(self):\n scores = torch.tensor(pd.DataFrame.from_dict(self.scores).values)\n volume = self.hypervolume.compute(scores)\n\n return volume\n \n def start_samplers(self, n, mbsize):\n self.ready_events = [threading.Event() for i in range(n)]\n self.resume_events = [threading.Event() for i in range(n)]\n self.results = [None] * n\n def f(idx):\n while not self.stop_event.is_set():\n try:\n self.results[idx] = self.sample2batch(self.sample(mbsize))\n except Exception as e:\n print(\"Exception while sampling:\")\n print(e)\n self.sampler_threads[idx].failed = True\n self.sampler_threads[idx].exception = e\n self.ready_events[idx].set()\n break\n self.ready_events[idx].set()\n self.resume_events[idx].clear()\n self.resume_events[idx].wait()\n self.sampler_threads = [threading.Thread(target=f, args=(i,)) for i in range(n)]\n [setattr(i, 'failed', False) for i in self.sampler_threads]\n [i.start() for i in self.sampler_threads]\n round_robin_idx = [0]\n def get():\n while True:\n idx = round_robin_idx[0]\n round_robin_idx[0] = (round_robin_idx[0] + 1) % n\n if self.ready_events[idx].is_set():\n r = self.results[idx]\n self.ready_events[idx].clear()\n self.resume_events[idx].set()\n return r\n elif round_robin_idx[0] == 0:\n time.sleep(0.001)\n return get\n\n def stop_samplers_and_join(self):\n self.stop_event.set()\n if hasattr(self, 'sampler_threads'):\n while any([i.is_alive() for i in self.sampler_threads]):\n [i.set() for i in self.resume_events]\n [i.join(0.05) for i in self.sampler_threads]" }, { "identifier": "MolMDPExtended", "path": "mol_mdp_ext.py", "snippet": "class MolMDPExtended(MolMDP):\n\n def build_translation_table(self):\n \"\"\"build a symmetry mapping for blocks. Necessary to compute parent transitions\"\"\"\n self.translation_table = {}\n for blockidx in range(len(self.block_mols)):\n # Blocks have multiple ways of being attached. By default,\n # a new block is attached to the target stem by attaching\n # it's kth atom, where k = block_rs[new_block_idx][0].\n # When computing a reverse action (from a parent), we may\n # wish to attach the new block to a different atom. In\n # the blocks library, there are duplicates of the same\n # block but with block_rs[block][0] set to a different\n # atom. Thus, for the reverse action we have to find out\n # which duplicate this corresponds to.\n\n # Here, we compute, for block blockidx, what is the index\n # of the duplicate block, if someone wants to attach to\n # atom x of the block.\n # So atom_map[x] == bidx, such that block_rs[bidx][0] == x\n atom_map = {}\n for j in range(len(self.block_mols)):\n if self.block_smi[blockidx] == self.block_smi[j]:\n atom_map[self.block_rs[j][0]] = j\n self.translation_table[blockidx] = atom_map\n\n # We're still missing some \"duplicates\", as some might be\n # symmetric versions of each other. For example, block CC with\n # block_rs == [0,1] has no duplicate, because the duplicate\n # with block_rs [1,0] would be a symmetric version (both C\n # atoms are the \"same\").\n\n # To test this, let's create nonsense molecules by attaching\n # duplicate blocks to a Gold atom, and testing whether they\n # are the same.\n gold = Chem.MolFromSmiles('[Au]')\n # If we find that two molecules are the same when attaching\n # them with two different atoms, then that means the atom\n # numbers are symmetries. We can add those to the table.\n for blockidx in range(len(self.block_mols)):\n for j in self.block_rs[blockidx]:\n if j not in self.translation_table[blockidx]:\n symmetric_duplicate = None\n for atom, block_duplicate in self.translation_table[blockidx].items():\n molA, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,j]],\n frags=[gold, self.block_mols[blockidx]])\n molB, _ = chem.mol_from_frag(\n jun_bonds=[[0,1,0,atom]],\n frags=[gold, self.block_mols[blockidx]])\n if (Chem.MolToSmiles(molA) == Chem.MolToSmiles(molB) or\n molA.HasSubstructMatch(molB)):\n symmetric_duplicate = block_duplicate\n break\n if symmetric_duplicate is None:\n raise ValueError('block', blockidx, self.block_smi[blockidx],\n 'has no duplicate for atom', j,\n 'in position 0, and no symmetrical correspondance')\n self.translation_table[blockidx][j] = symmetric_duplicate\n #print('block', blockidx, '+ atom', j,\n # 'in position 0 is a symmetric duplicate of',\n # symmetric_duplicate)\n\n def parents(self, mol=None):\n \"\"\"returns all the possible parents of molecule mol (or the current\n molecule if mol is None.\n\n Returns a list of (BlockMoleculeDataExtended, (block_idx, stem_idx)) pairs such that\n for a pair (m, (b, s)), MolMDPExtended.add_block_to(m, b, s) == mol.\n \"\"\"\n if len(mol.blockidxs) == 1:\n # If there's just a single block, then the only parent is\n # the empty block with the action that recreates that block\n return [(BlockMoleculeDataExtended(), (mol.blockidxs[0], 0))]\n\n # Compute the how many blocks each block is connected to\n blocks_degree = defaultdict(int)\n for a,b,_,_ in mol.jbonds:\n blocks_degree[a] += 1\n blocks_degree[b] += 1\n # Keep only blocks of degree 1 (those are the ones that could\n # have just been added)\n blocks_degree_1 = [i for i, d in blocks_degree.items() if d == 1]\n # Form new molecules without these blocks\n parent_mols = []\n\n for rblockidx in blocks_degree_1:\n new_mol = mol.copy()\n # find which bond we're removing\n removed_bonds = [(jbidx, bond) for jbidx, bond in enumerate(new_mol.jbonds)\n if rblockidx in bond[:2]]\n assert len(removed_bonds) == 1\n rjbidx, rbond = removed_bonds[0]\n # Pop the bond\n new_mol.jbonds.pop(rjbidx)\n # Remove the block\n mask = np.ones(len(new_mol.blockidxs), dtype=np.bool)\n mask[rblockidx] = 0\n reindex = new_mol.delete_blocks(mask)\n # reindex maps old blockidx to new blockidx, since the\n # block the removed block was attached to might have its\n # index shifted by 1.\n\n # Compute which stem the bond was using\n stem = ([reindex[rbond[0]], rbond[2]] if rblockidx == rbond[1] else\n [reindex[rbond[1]], rbond[3]])\n # and add it back\n new_mol.stems = [list(i) for i in new_mol.stems] + [stem]\n #new_mol.stems.append(stem)\n # and we have a parent. The stem idx to recreate mol is\n # the last stem, since we appended `stem` in the back of\n # the stem list.\n # We also have to translate the block id to match the bond\n # we broke, see build_translation_table().\n removed_stem_atom = (\n rbond[3] if rblockidx == rbond[1] else rbond[2])\n blockid = mol.blockidxs[rblockidx]\n if removed_stem_atom not in self.translation_table[blockid]:\n raise ValueError('Could not translate removed stem to duplicate or symmetric block.')\n parent_mols.append([new_mol,\n # action = (block_idx, stem_idx)\n (self.translation_table[blockid][removed_stem_atom],\n len(new_mol.stems) - 1)])\n if not len(parent_mols):\n raise ValueError('Could not find any parents')\n return parent_mols\n\n\n def add_block_to(self, mol, block_idx, stem_idx=None, atmidx=None):\n '''out-of-place version of add_block'''\n #assert (block_idx >= 0) and (block_idx <= len(self.block_mols)), \"unknown block\"\n if mol.numblocks == 0:\n stem_idx = None\n new_mol = mol.copy()\n new_mol.add_block(block_idx,\n block=self.block_mols[block_idx],\n block_r=self.block_rs[block_idx],\n stem_idx=stem_idx, atmidx=atmidx)\n return new_mol\n\n def remove_jbond_from(self, mol, jbond_idx=None, atmidx=None):\n new_mol = mol.copy()\n new_mol.remove_jbond(jbond_idx, atmidx)\n return new_mol\n\n def a2mol(self, acts):\n mol = BlockMoleculeDataExtended()\n for i in acts:\n if i[0] >= 0:\n mol = self.add_block_to(mol, *i)\n return mol\n\n def reset(self):\n self.molecule = BlockMoleculeDataExtended()\n return None\n\n\n def post_init(self, device, repr_type, include_bonds=False, include_nblocks=False):\n self.device = device\n self.repr_type = repr_type\n #self.max_bond_atmidx = max([max(i) for i in self.block_rs])\n self.max_num_atm = max(self.block_natm)\n # see model_block.mol2graph\n self.true_block_set = sorted(set(self.block_smi))\n self.stem_type_offset = np.int32([0] + list(np.cumsum([\n max(self.block_rs[self.block_smi.index(i)])+1 for i in self.true_block_set])))\n self.num_stem_types = self.stem_type_offset[-1]\n self.true_blockidx = [self.true_block_set.index(i) for i in self.block_smi]\n self.num_true_blocks = len(self.true_block_set)\n self.include_nblocks = include_nblocks\n self.include_bonds = include_bonds\n #print(self.max_num_atm, self.num_stem_types)\n self.molcache = {}\n\n def mols2batch(self, mols):\n if self.repr_type == 'block_graph':\n return model_block.mols2batch(mols, self)\n elif self.repr_type == 'atom_graph':\n return model_atom.mols2batch(mols, self)\n elif self.repr_type == 'morgan_fingerprint':\n return model_fingerprint.mols2batch(mols, self)\n\n def mol2repr(self, mol=None):\n if mol is None:\n mol = self.molecule\n #molhash = str(mol.blockidxs)+':'+str(mol.stems)+':'+str(mol.jbonds)\n #if molhash in self.molcache:\n # return self.molcache[molhash]\n if self.repr_type == 'block_graph':\n r = model_block.mol2graph(mol, self, self.floatX)\n elif self.repr_type == 'atom_graph':\n r = model_atom.mol2graph(mol, self, self.floatX,\n bonds=self.include_bonds,\n nblocks=self.include_nblocks)\n elif self.repr_type == 'morgan_fingerprint':\n r = model_fingerprint.mol2fp(mol, self, self.floatX)\n #self.molcache[molhash] = r\n return r\n\n def get_nx_graph(self, mol: BlockMoleculeData, true_block=False):\n true_blockidx = self.true_blockidx\n\n G = nx.DiGraph()\n blockidxs = [true_blockidx[xx] for xx in mol.blockidxs] if true_block else mol.blockidxs\n\n G.add_nodes_from([(ix, {\"block\": blockidxs[ix]}) for ix in range(len(blockidxs))])\n\n if len(mol.jbonds) > 0:\n edges = []\n for jbond in mol.jbonds:\n edges.append((jbond[0], jbond[1],\n {\"bond\": [jbond[2], jbond[3]]}))\n edges.append((jbond[1], jbond[0],\n {\"bond\": [jbond[3], jbond[2]]}))\n G.add_edges_from(edges)\n return G\n\n def graphs_are_isomorphic(self, g1, g2):\n return nx.algorithms.is_isomorphic(g1, g2, node_match=node_match, edge_match=edge_match)" }, { "identifier": "BlockMoleculeDataExtended", "path": "mol_mdp_ext.py", "snippet": "class BlockMoleculeDataExtended(BlockMoleculeData):\n\n @property\n def mol(self):\n return chem.mol_from_frag(jun_bonds=self.jbonds, frags=self.blocks)[0]\n\n @property\n def smiles(self):\n return Chem.MolToSmiles(self.mol)\n\n def copy(self): # shallow copy\n o = BlockMoleculeDataExtended()\n o.blockidxs = list(self.blockidxs)\n o.blocks = list(self.blocks)\n o.slices = list(self.slices)\n o.numblocks = self.numblocks\n o.jbonds = list(self.jbonds)\n o.stems = list(self.stems)\n return o\n\n def as_dict(self):\n return {'blockidxs': self.blockidxs,\n 'slices': self.slices,\n 'numblocks': self.numblocks,\n 'jbonds': self.jbonds,\n 'stems': self.stems}" }, { "identifier": "Oracle", "path": "oracle/oracle.py", "snippet": "class Oracle():\n def __init__(self, args, mols_ref=None):\n '''\n @params:\n args (dict): argsurations\n '''\n self.objectives = args.objectives\n self.fps_ref = [AllChem.GetMorganFingerprintAsBitVect(x, 3, 2048) \n for x in mols_ref] if mols_ref else None\n self.device = torch.device(args.device)\n\n def batch_get_scores(self, mols):\n '''\n @params:\n mols: molecules to estimate score\n @return:\n dicts (list): list of score dictionaries\n '''\n dicts = [{} for _ in mols]\n for obj in self.objectives:\n scores = get_scores(obj, mols, device=self.device)\n for i, mol in enumerate(mols):\n dicts[i][obj] = scores[i]\n return dicts\n \n def get_score(self, mol):\n scores = {}\n for obj in self.objectives:\n score = get_scores(obj, mol, device=self.device)\n scores[obj] = score[0]\n \n return scores" }, { "identifier": "get_proxy", "path": "proxy/proxy.py", "snippet": "def get_proxy(args, bpath, oracle):\n if args.acq_fn.lower() == 'none':\n return NoAF(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ucb':\n return UCB(args, bpath, oracle)\n \n elif args.acq_fn.lower() == 'ucb_chebyshev':\n return UCB_chebyshev(args, bpath, oracle)\n\n elif args.acq_fn.lower() == 'ei':\n return EI(args, bpath, oracle)" }, { "identifier": "FMGFlowNet", "path": "generator/gfn.py", "snippet": "class FMGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n mdp = MolMDPExtended(bpath)\n mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n mdp.build_translation_table()\n self.model = make_model(args, mdp, is_proxy=False)\n self.opt = torch.optim.Adam(self.model.parameters(\n ), args.learning_rate, weight_decay=args.weight_decay)\n\n self.loginf = 1000 # to prevent nans\n self.log_reg_c = args.log_reg_c\n self.balanced_loss = args.balanced_loss\n self.do_nblocks_reg = False\n self.max_blocks = args.max_blocks\n self.leaf_coef = args.leaf_coef\n self.clip_grad = args.clip_grad\n # self.score_criterion = nn.MSELoss(reduction='none')\n self.score_criterion = nn.MSELoss()\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss, term_loss, flow_loss = self.FMLoss(p, pb, a, pw, w, r, s, d)\n\n self.opt.zero_grad()\n loss.backward()\n if self.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.clip_grad)\n self.opt.step()\n self.model.training_steps = i+1\n \n return (loss.item(), term_loss.item(), flow_loss.item())\n\n def FMLoss(self, p, pb, a, pw, w, r, s, d):\n # Since we sampled 'mbsize' trajectories, we're going to get\n # roughly mbsize * H (H is variable) transitions\n ntransitions = r.shape[0]\n # state outputs\n stem_out_s, mol_out_s = self.model(s, w) # log(F)\n # parents of the state outputs\n stem_out_p, mol_out_p = self.model(p, pw)\n # index parents by their corresponding actions\n qsa_p = self.model.index_output_by_action(\n p, stem_out_p, mol_out_p[:, 0], a)\n # then sum the parents' contribution, this is the inflow\n exp_inflow = (torch.zeros((ntransitions,), device=qsa_p.device, dtype=qsa_p.dtype)\n .index_add_(0, pb, torch.exp(qsa_p))) # pb is the parents' batch index\n inflow = torch.log(exp_inflow + self.log_reg_c)\n # sum the state's Q(s,a), this is the outflow\n exp_outflow = self.model.sum_output(s, torch.exp(\n stem_out_s), torch.exp(mol_out_s[:, 0]))\n # include reward and done multiplier, then take the log\n # we're guarenteed that r > 0 iff d = 1, so the log always works\n outflow_plus_r = torch.log(self.log_reg_c + r + exp_outflow * (1-d))\n if self.do_nblocks_reg:\n losses = _losses = ((inflow - outflow_plus_r) /\n (s.nblocks * self.max_blocks)).pow(2)\n else:\n losses = _losses = (inflow - outflow_plus_r).pow(2)\n\n term_loss = (losses * d).sum() / (d.sum() + 1e-20) # terminal nodes\n flow_loss = (losses * (1-d)).sum() / \\\n ((1-d).sum() + 1e-20) # non-terminal nodes\n \n if self.balanced_loss:\n loss = term_loss * self.leaf_coef + flow_loss\n else:\n loss = losses.mean()\n\n return loss, term_loss, flow_loss" }, { "identifier": "TBGFlowNet", "path": "generator/gfn.py", "snippet": "class TBGFlowNet(nn.Module):\n def __init__(self, args, bpath):\n super().__init__()\n self.args = args\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(args.device, args.repr_type,\n include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n self.model = make_model(args, self.mdp, is_proxy=False)\n self.Z = nn.Sequential(nn.Linear(len(args.objectives), args.nemb//2), nn.LeakyReLU(),\n nn.Linear(args.nemb//2, 1))\n self.Z.to(args.device)\n self.opt = torch.optim.Adam(self.model.parameters(), args.learning_rate, weight_decay=args.weight_decay)\n self.opt_Z = torch.optim.Adam(self.Z.parameters(), args.Z_learning_rate, weight_decay=args.weight_decay)\n\n def forward(self, graph_data, vec_data=None, do_stems=True):\n return self.model(graph_data, vec_data, do_stems)\n\n def train_step(self, p, pb, a, pw, w, r, s, d, mols, i):\n loss = self.TBLoss(p, a, w, r, d, mols)\n self.opt.zero_grad()\n self.opt_Z.zero_grad()\n loss.backward()\n if self.args.clip_grad > 0:\n torch.nn.utils.clip_grad_norm_(\n self.model.parameters(), self.args.clip_grad)\n self.opt.step()\n self.opt_Z.step()\n\n return (loss.item(),)\n\n @property\n def Z(self):\n return self.model.Z\n\n def TBLoss(self, p, a, w, r, d, mols):\n # logit\n stem_out_p, mol_out_p = self.model(p, w)\n # index parents by their corresponding actions\n logits = -self.model.action_negloglikelihood(\n p, a, stem_out_p, mol_out_p)\n\n b = torch.cat([torch.tensor([0], device=logits.device),\n torch.cumsum(d.long(), 0)[:-1]], dim=0)\n n = torch.tensor([len(self.mdp.parents(mol)) if a[idx, 0].item() != -1 else 1.\n for idx, mol in enumerate(mols[1])], device=logits.device)\n # n = torch.tensor([len(self.mdp.parents(mol)) for mol in mols[1]], device=logits.device)\n forward_ll = scatter(logits, b, reduce='sum')\n backward_ll = scatter(torch.log(1/n), b, reduce='sum')\n\n losses = ((self.Z(w[d==1.]) + forward_ll) - (torch.log(r[d == 1.]) + backward_ll)).pow(2) \n loss = losses.mean()\n\n return loss" }, { "identifier": "MOReinforce", "path": "generator/gfn.py", "snippet": "class MOReinforce(TBGFlowNet):\n def TBLoss(self, p, a, w, r, d, mols):\n # logit\n stem_out_p, mol_out_p = self.model(p, w)\n # index parents by their corresponding actions\n logits = -self.model.action_negloglikelihood(\n p, a, stem_out_p, mol_out_p)\n\n b = torch.cat([torch.tensor([0], device=logits.device),\n torch.cumsum(d.long(), 0)[:-1]], dim=0)\n n = torch.tensor([len(self.mdp.parents(mol)) if a[idx, 0].item() != -1 else 1.\n for idx, mol in enumerate(mols[1])], device=logits.device)\n # n = torch.tensor([len(self.mdp.parents(mol)) for mol in mols[1]], device=logits.device)\n forward_ll = scatter(logits, b, reduce='sum')\n\n rewards = r[d == 1.]\n losses = forward_ll * (-rewards - (-1) * rewards.mean())\n loss = losses.mean()\n\n return loss" }, { "identifier": "set_random_seed", "path": "utils/utils.py", "snippet": "def set_random_seed(seed, deterministic=True):\n \"\"\"Set random seed.\"\"\"\n random.seed(seed)\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n torch.cuda.manual_seed_all(seed)\n if deterministic:\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = False" }, { "identifier": "compute_success", "path": "utils/metrics.py", "snippet": "def compute_success(mols, scores, objectives, score_succ):\n print(\"Computing successful rate...\")\n positive_mols = []\n success_dict = {k: 0. for k in objectives}\n\n for mol, score in zip(mols, scores):\n all_success = True\n for k, v in score.items():\n if v >= score_succ[k]:\n success_dict[k] += 1\n else:\n all_success = False\n if all_success:\n positive_mols.append(mol)\n\n success = 1.*len(positive_mols)/len(mols)\n\n return success, positive_mols" }, { "identifier": "compute_diversity", "path": "utils/metrics.py", "snippet": "def compute_diversity(mols):\n print(\"Computing diversity...\")\n\n if len(mols) == 0:\n return 0\n\n sims = []\n fps = [AllChem.GetMorganFingerprintAsBitVect(x.mol, 3, 2048) for x in mols]\n for i in range(len(fps)):\n sims += DataStructs.BulkTanimotoSimilarity(fps[i], fps[:i])\n\n return 1 - np.mean(sims)" }, { "identifier": "compute_novelty", "path": "utils/metrics.py", "snippet": "def compute_novelty(mols, ref_mols):\n print(\"Computing novelty...\")\n positive_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x.mol, 3, 2048) for x in mols]\n ref_fps = [AllChem.GetMorganFingerprintAsBitVect(\n x, 3, 2048) for x in ref_mols]\n\n n_sim = 0.\n for i in range(len(positive_fps)):\n sims = DataStructs.BulkTanimotoSimilarity(positive_fps[i], ref_fps)\n if max(sims) >= 0.4:\n n_sim += 1\n novelty = 1. - 1. * n_sim / (len(positive_fps)+1e-6)\n\n return novelty" }, { "identifier": "compute_correlation", "path": "utils/metrics.py", "snippet": "def compute_correlation(args, model, rollout_worker, test_mols):\n\n mdp = rollout_worker.mdp\n device = args.device\n def tf(x): return torch.tensor(x, device=device).to(torch.float)\n def tint(x): return torch.tensor(x, device=device).long()\n\n # test_mols = pickle.load(gzip.open('data/some_mols_U_1k.pkl.gz'))\n logsoftmax = nn.LogSoftmax(0)\n corrs = []\n numblocks = []\n\n start_time = time.time()\n if args.n_objectives == 3:\n test_weights = rollout_worker.test_weights[::2]\n elif args.n_objectives == 4:\n test_weights = rollout_worker.test_weights[1:-2:4]\n else:\n test_weights = rollout_worker.test_weights\n \n for weights in test_weights:\n print(\"Computing correlation w.r.t test weights {}\".format(weights))\n weights = torch.tensor(weights).to(args.device)\n logp = []\n rewards = []\n for m in tqdm(test_mols):\n try:\n agraph = get_mol_path_graph(m, mdp)\n except:\n continue\n # rewards.append(np.log(moli[0][0]))\n reward = rollout_worker._get_reward(m, weights)[0].item()\n rewards.append(np.log(reward))\n s = mdp.mols2batch([mdp.mol2repr(agraph.nodes[i]['mol'])\n for i in agraph.nodes])\n numblocks.append(len(m.blocks))\n with torch.no_grad():\n # get the mols_out_s for ALL molecules not just the end one.\n if args.condition_type == 'Hyper_scorepred':\n stem_out_s, mol_out_s, _ = model(\n s, weights.repeat(s.num_graphs, 1))\n else:\n stem_out_s, mol_out_s = model(\n s, weights.repeat(s.num_graphs, 1))\n per_mol_out = []\n # Compute pi(a|s)\n for j in range(len(agraph.nodes)):\n a, b = s._slice_dict['stems'][j:j+2]\n\n stop_allowed = len(\n agraph.nodes[j]['mol'].blocks) >= args.min_blocks\n mp = logsoftmax(torch.cat([\n stem_out_s[a:b].reshape(-1),\n # If num_blocks < min_blocks, the model is not allowed to stop\n mol_out_s[j, :1] if stop_allowed else tf([-1000])]))\n per_mol_out.append(\n (mp[:-1].reshape((-1, stem_out_s.shape[1])), mp[-1]))\n\n # When the model reaches 8 blocks, it is stopped automatically. If instead it stops before\n # that, we need to take into account the STOP action's logprob\n if len(m.blocks) < 8:\n if args.condition_type == 'Hyper_scorepred':\n stem_out_last, mol_out_last, _ = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0))\n else:\n stem_out_last, mol_out_last = model(\n mdp.mols2batch([mdp.mol2repr(m)]), weights.unsqueeze(0)) \n mplast = logsoftmax(\n torch.cat([stem_out_last.reshape(-1), mol_out_last[0, :1]]))\n MSTOP = mplast[-1]\n\n # assign logprob to edges\n for u, v in agraph.edges:\n a = agraph.edges[u, v]['action']\n if a[0] == -1:\n agraph.edges[u, v]['logprob'] = per_mol_out[v][1]\n else:\n agraph.edges[u,\n v]['logprob'] = per_mol_out[v][0][a[1], a[0]]\n\n # propagate logprobs through the graph\n for n in list(nx.topological_sort(agraph))[::-1]:\n for c in agraph.predecessors(n):\n if len(m.blocks) < 8 and c == 0:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0) + MSTOP)\n else:\n agraph.nodes[c]['logprob'] = torch.logaddexp(\n agraph.nodes[c].get('logprob', tf(-1000)),\n agraph.edges[c, n]['logprob'] + agraph.nodes[n].get('logprob', 0))\n\n # add the first item\n # logp.append((moli, agraph.nodes[n]['logprob'].item()))\n logp.append(agraph.nodes[n]['logprob'].item())\n corrs.append(stats.spearmanr(rewards, logp).correlation)\n\n print('Spearmanr: {}, mean: {}, Time: {}'.format(corrs, np.mean(corrs), time.time()-start_time))\n return corrs" }, { "identifier": "circle_points", "path": "utils/metrics.py", "snippet": "def circle_points(K, min_angle=None, max_angle=None):\n # generate evenly distributed preference vector\n ang0 = 1e-6 if min_angle is None else min_angle\n ang1 = np.pi / 2 - ang0 if max_angle is None else max_angle\n angles = np.linspace(ang0, ang1, K, endpoint=True)\n x = np.cos(angles)\n y = np.sin(angles)\n weights = np.c_[x, y]\n normalized_weights = weights/weights.sum(1, keepdims=True)\n\n return normalized_weights.astype(np.float32)" }, { "identifier": "get_logger", "path": "utils/logging.py", "snippet": "def get_logger(args):\n if args.enable_tensorboard:\n return TensorboardLogger(args)\n else:\n return Logger(args)" }, { "identifier": "RolloutWorker", "path": "main.py", "snippet": "class RolloutWorker:\n def __init__(self, args, bpath, proxy, device):\n self.args = args\n self.test_split_rng = np.random.RandomState(142857)\n self.train_rng = np.random.RandomState(int(time.time()))\n self.mdp = MolMDPExtended(bpath)\n self.mdp.post_init(device, args.repr_type,\n include_nblocks=args.include_nblocks)\n self.mdp.build_translation_table()\n if args.floatX == 'float64':\n self.mdp.floatX = self.floatX = torch.double\n else:\n self.mdp.floatX = self.floatX = torch.float\n self.proxy = proxy\n self._device = device\n self.seen_molecules = set()\n self.stop_event = threading.Event()\n #######\n # This is the \"result\", here a list of (reward, BlockMolDataExt, info...) tuples\n self.sampled_mols = []\n self.online_mols = []\n self.hindsight_mols = []\n self.max_online_mols = 1000\n self.max_hindsight_mols = 1000\n\n self.min_blocks = args.min_blocks\n self.max_blocks = args.max_blocks\n self.mdp._cue_max_blocks = self.max_blocks\n self.reward_exp = args.reward_exp\n self.reward_min = args.reward_min\n self.reward_norm = args.reward_norm\n self.reward_exp_ramping = args.reward_exp_ramping\n self.random_action_prob = args.random_action_prob\n\n # If True this basically implements Buesing et al's TreeSample Q,\n # samples uniformly from it though, no MTCS involved\n if args.criterion == 'TB' or args.criterion == \"Reinforce\":\n self.ignore_parents = True\n elif args.criterion == 'FM':\n self.ignore_parents = False\n\n def rollout(self, generator, use_rand_policy=True, weights=None, replay=False):\n weights = Dirichlet(torch.ones(len(self.args.objectives))*self.args.alpha).sample_n(1).to(\n self.args.device) if weights is None else weights\n\n m = BlockMoleculeDataExtended()\n samples = []\n max_blocks = self.max_blocks\n trajectory_stats = []\n for t in range(max_blocks):\n s = self.mdp.mols2batch([self.mdp.mol2repr(m)])\n s_o, m_o = generator(s, vec_data=weights, do_stems=True)\n # fix from run 330 onwards\n if t < self.min_blocks:\n m_o = m_o*0 - 1000 # prevent assigning prob to stop\n # when we can't stop\n ##\n logits = torch.cat([m_o.reshape(-1), s_o.reshape(-1)])\n cat = torch.distributions.Categorical(\n logits=logits) \n action = cat.sample().item()\n\n if use_rand_policy and self.random_action_prob > 0: # just for training\n if self.train_rng.uniform() < self.random_action_prob:\n action = self.train_rng.randint(\n int(t < self.min_blocks), logits.shape[0])\n\n q = torch.cat([m_o.reshape(-1), s_o.reshape(-1)])\n trajectory_stats.append(\n (q[action].item(), action, torch.logsumexp(q, 0).item()))\n\n if t >= self.min_blocks and action == 0:\n r, raw_r = self._get_reward(m, weights) # r: reward, raw_r: scores for the objectives\n samples.append(((m,), ((-1, 0),), weights, weights, r, m, 1))\n break\n else:\n action = max(0, action-1)\n action = (action % self.mdp.num_blocks,\n action // self.mdp.num_blocks)\n m_old = m\n m = self.mdp.add_block_to(m, *action)\n if len(m.blocks) and not len(m.stems) or t == max_blocks - 1:\n # can't add anything more to this mol so let's make it\n # terminal. Note that this node's parent isn't just m,\n # because this is a sink for all parent transitions\n r, raw_r = self._get_reward(m, weights)\n if self.ignore_parents:\n samples.append(\n ((m_old,), (action,), weights, weights, r, m, 1))\n else:\n parents, actions = zip(*self.mdp.parents(m))\n samples.append((parents, actions, weights.repeat(\n len(parents), 1), weights, r, m, 1))\n break\n else:\n if self.ignore_parents:\n samples.append(\n ((m_old,), (action,), weights, weights, 0, m, 0))\n else:\n parents, actions = zip(*self.mdp.parents(m))\n samples.append(\n (parents, actions, weights.repeat(len(parents), 1), weights, 0, m, 0))\n\n p = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in samples[-1][0]])\n qp = generator(p, weights.repeat(p.num_graphs, 1))\n qsa_p = generator.model.index_output_by_action(\n p, qp[0], qp[1][:, 0],\n torch.tensor(samples[-1][1], device=self._device).long())\n inflow = torch.logsumexp(qsa_p.flatten(), 0).item()\n self.sampled_mols.append(\n ([i.cpu().numpy() for i in raw_r], weights.cpu().numpy(), m, trajectory_stats, inflow))\n\n if replay and self.args.hindsight_prob > 0.0:\n self._add_mol_to_replay(m)\n\n return samples\n\n def _get_reward(self, m, weights=None):\n rdmol = m.mol\n if rdmol is None:\n return self.reward_min\n \n # get scores from oracle\n score = self.proxy.get_score([m])\n score = torch.tensor(list(score.values())).to(self.args.device)\n \n if self.args.scalar == 'WeightedSum':\n raw_reward = (weights*score).sum()\n \n elif self.args.scalar == 'Tchebycheff':\n raw_reward = (weights*score).min() + 0.1 * (weights*score).sum()\n \n reward = self.l2r(raw_reward.clip(self.reward_min))\n return reward, (raw_reward, score)\n\n def execute_train_episode_batch(self, generator, dataset=None, use_rand_policy=True):\n if self.args.condition_type is None:\n weights = self.test_weights # train specific model\n else:\n weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better\n samples = sum((self.rollout(generator, use_rand_policy, weights)\n for i in range(self.args.trajectories_mbsize)), [])\n\n return zip(*samples)\n\n def sample2batch(self, mb):\n p, a, p_weights, weights, r, s, d, *o = mb\n mols = (p, s)\n # The batch index of each parent\n p_batch = torch.tensor(sum([[i]*len(p) for i, p in enumerate(p)], []),\n device=self._device).long()\n # Convert all parents and states to repr. Note that this\n # concatenates all the parent lists, which is why we need\n # p_batch\n p = self.mdp.mols2batch(list(map(self.mdp.mol2repr, sum(p, ()))))\n s = self.mdp.mols2batch([self.mdp.mol2repr(i) for i in s])\n # Concatenate all the actions (one per parent per sample)\n a = torch.tensor(sum(a, ()), device=self._device).long()\n # rewards and dones\n r = torch.tensor(r, device=self._device).to(self.floatX)\n d = torch.tensor(d, device=self._device).to(self.floatX)\n # weights\n p_w = torch.cat(p_weights, 0)\n w = torch.cat(weights, 0)\n return (p, p_batch, a, p_w, w, r, s, d, mols, *o)\n\n def l2r(self, raw_reward, t=0):\n if self.reward_exp_ramping > 0:\n reward_exp = 1 + (self.reward_exp - 1) * \\\n (1 - 1/(1 + t / self.reward_exp_ramping))\n # when t=0, exp = 1; t->∞, exp = self.reward_exp\n else:\n reward_exp = self.reward_exp\n\n reward = (raw_reward/self.reward_norm)**reward_exp\n\n return reward\n\n def start_samplers(self, generator, n, dataset):\n self.ready_events = [threading.Event() for i in range(n)]\n self.resume_events = [threading.Event() for i in range(n)]\n self.results = [None] * n\n\n def f(idx):\n while not self.stop_event.is_set():\n try:\n self.results[idx] = self.sample2batch(\n self.execute_train_episode_batch(generator, dataset, use_rand_policy=True))\n except Exception as e:\n print(\"Exception while sampling:\")\n print(e)\n self.sampler_threads[idx].failed = True\n self.sampler_threads[idx].exception = e\n self.ready_events[idx].set()\n break\n self.ready_events[idx].set()\n self.resume_events[idx].clear()\n self.resume_events[idx].wait()\n\n self.sampler_threads = [threading.Thread(\n target=f, args=(i,)) for i in range(n)]\n [setattr(i, 'failed', False) for i in self.sampler_threads]\n [i.start() for i in self.sampler_threads]\n round_robin_idx = [0]\n\n def get():\n while True:\n idx = round_robin_idx[0]\n round_robin_idx[0] = (round_robin_idx[0] + 1) % n\n if self.ready_events[idx].is_set():\n r = self.results[idx]\n self.ready_events[idx].clear()\n self.resume_events[idx].set()\n return r\n elif round_robin_idx[0] == 0:\n time.sleep(0.001)\n return get\n\n def stop_samplers_and_join(self):\n self.stop_event.set()\n if hasattr(self, 'sampler_threads'):\n while any([i.is_alive() for i in self.sampler_threads]):\n [i.set() for i in self.resume_events]\n [i.join(0.05) for i in self.sampler_threads]" }, { "identifier": "get_test_mols", "path": "main.py", "snippet": "def get_test_mols(args, mdp, num):\n samples = []\n fps = []\n early_stops = []\n while len(samples) < num:\n if len(samples) % 5000 == 0:\n print(f'{len(samples)}/{num} mols have been sampled')\n m = BlockMoleculeDataExtended()\n min_blocks = args.min_blocks\n max_blocks = args.max_blocks\n early_stop_at = np.random.randint(min_blocks, max_blocks + 1)\n early_stops.append(early_stop_at)\n for t in range(max_blocks):\n if t == 0:\n length = mdp.num_blocks+1\n else:\n length = len(m.stems)*mdp.num_blocks+1\n\n action = np.random.randint(1, length)\n\n if t == early_stop_at:\n action = 0\n\n if t >= min_blocks and action == 0:\n fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048)\n if len(samples)==0:\n samples.append(m)\n fps.append(fp)\n else:\n sims = DataStructs.BulkTanimotoSimilarity(fp, fps)\n if max(sims) < 0.7:\n samples.append(m)\n fps.append(fp)\n break\n else:\n action = max(0, action-1)\n action = (action % mdp.num_blocks, action // mdp.num_blocks)\n #print('..', action)\n m = mdp.add_block_to(m, *action)\n if len(m.blocks) and not len(m.stems) or t == max_blocks - 1:\n # can't add anything more to this mol so let's make it\n # terminal. Note that this node's parent isn't just m,\n # because this is a sink for all parent transitions\n fp = AllChem.GetMorganFingerprintAsBitVect(m.mol, 3, 2048)\n if len(samples)==0:\n samples.append(m)\n fps.append(fp)\n else:\n sims = DataStructs.BulkTanimotoSimilarity(fp, fps)\n if max(sims) < 0.7:\n samples.append(m)\n fps.append(fp)\n break\n \n return samples" } ]
from collections import defaultdict from dataset import Dataset from mol_mdp_ext import MolMDPExtended, BlockMoleculeDataExtended from oracle.oracle import Oracle from proxy import get_proxy from generator import TBGFlowNet, FMGFlowNet, MOReinforce from utils.utils import set_random_seed from utils.metrics import compute_success, compute_diversity, compute_novelty, compute_correlation, circle_points from utils.logging import get_logger from datetime import datetime from botorch.utils.multi_objective.hypervolume import Hypervolume from botorch.utils.sampling import sample_simplex from botorch.utils.transforms import normalize, unnormalize from torch.distributions.dirichlet import Dirichlet from main import RolloutWorker, get_test_mols from pymoo.util.ref_dirs import get_reference_directions from copy import deepcopy import random import os import re import argparse import json import time import threading import pdb import pickle import gzip import torch.multiprocessing as mp import torch.nn.functional as F import torch import pandas as pd import numpy as np import warnings
15,557
sampled_mols = [] sampled_raw_rewards = [] sampled_means = [] sampled_smis = [] while len(sampled_mols) < args.num_samples: rollout_worker.rollout(generator, use_rand_policy=False, weights=torch.tensor(weights).unsqueeze(0).to(args.device)) (raw_r, _, m, trajectory_stats, inflow) = rollout_worker.sampled_mols[-1] sampled_mols.append(m) sampled_raw_rewards.append(raw_r[0].item()) sampled_means.append(raw_r[1]) sampled_smis.append(m.smiles) idx_pick = np.argsort(sampled_raw_rewards)[::-1][:int(args.num_samples/len(rollout_worker.test_weights))] picked_mols.extend(np.array(sampled_mols)[idx_pick].tolist()) means.extend(np.array(sampled_means)[idx_pick].tolist()) smis.extend(np.array(sampled_smis)[idx_pick].tolist()) raw_rewards.extend(np.array(sampled_raw_rewards)[idx_pick].tolist()) raw_rewards_weight[str(weights.cpu())] = np.array(sampled_raw_rewards)[idx_pick].mean() raw_rewards_mean = np.mean(list(raw_rewards_weight.values())) assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" dpath = "./data/docked_mols.h5" # Initialize oracle and dataset (for training surrogate function) oracle = Oracle(args) dataset = Dataset(args, bpath, oracle, args.device) dataset.load_h5(dpath, num_init_examples=args.num_init_examples) log_overall_metrics(args, dataset) args.n_objectives = len(args.objectives) # Initialize surrogate function proxy = get_proxy(args, bpath, oracle) proxy.update(dataset, 0, reset=False) for i in range(1, args.num_outer_loop_iters+1): print(f"====== Starting round {i} ======") args.logger.set_context('iter_{}'.format(i)) test_weights = np.random.dirichlet(args.alpha_vector, 5*(2**(args.n_objectives-2))).astype(np.float32) if args.criterion == 'TB': generator = TBGFlowNet(args, bpath) elif args.criterion == 'FM': generator = FMGFlowNet(args, bpath) elif args.criterion == 'Reinforce':
warnings.filterwarnings('ignore') def arg_parse(): parser = argparse.ArgumentParser() parser.add_argument("--device", type=str, default='cuda') parser.add_argument('--seed', type=int, default=42, help='seed') parser.add_argument("--run", default=0, help="run", type=int) parser.add_argument('--save', action='store_true', default=False, help='Save model.') parser.add_argument('--debug',action='store_true', default=False, help='debug mode, no multi thread') parser.add_argument("--enable_tensorboard", action='store_true', default=False) parser.add_argument("--log_dir", default='runs/mobo') parser.add_argument("--include_nblocks", default=False) parser.add_argument("--num_init_examples", default=200, type=int) parser.add_argument("--num_outer_loop_iters", default=8, type=int) parser.add_argument("--num_samples", default=100, type=int) parser.add_argument("--floatX", default='float32') parser.add_argument('--sample_iterations', type=int, default=1000, help='sample mols and compute metrics') parser.add_argument("--log_weight_score", action='store_true', default=False) # objectives parser.add_argument("--objectives", type=str, default='gsk3b,jnk3,qed,sa') parser.add_argument("--acq_fn", default='UCB', type=str) parser.add_argument("--beta", default=0.1, type=float) parser.add_argument("--scalar", default='WeightedSum', type=str) parser.add_argument("--alpha", default=1., type=float, help='dirichlet distribution') parser.add_argument("--alpha_vector", default='1,1,1,1', type=str) # Proxy parser.add_argument("--proxy_normalize", action='store_true', default=False, help='normalize Y') parser.add_argument("--proxy_num_iterations", default=10000, type=int) parser.add_argument("--proxy_learning_rate", default=2.5e-4, help="Learning rate", type=float) parser.add_argument("--proxy_mbsize", default=64, help="Minibatch size", type=int) parser.add_argument("--proxy_early_stop_tol", default=10, type=int) parser.add_argument("--proxy_repr_type", default='atom_graph') parser.add_argument("--proxy_model_version", default='v2') parser.add_argument("--proxy_num_conv_steps", default=12, type=int) parser.add_argument("--proxy_nemb", default=64, help="#hidden", type=int) parser.add_argument("--proxy_weight_decay", default=1e-6, help="Weight Decay in Proxy", type=float) parser.add_argument("--proxy_uncertainty", default="evidential", type=str) # deep ensemble and GP parser.add_argument("--proxy_dropout", default=0.1, help="MC Dropout in Proxy", type=float) parser.add_argument("--proxy_num_dropout_samples", default=5, type=int) parser.add_argument("--evidential_lam", default=0.1, type=float) parser.add_argument( "--fp_radius", type=int, default=2, help="Morgan fingerprint radius." ) parser.add_argument( "--fp_nbits", type=int, default=1024, help="Morgan fingerprint nBits." ) # GFlowNet parser.add_argument("--min_blocks", default=2, type=int) parser.add_argument("--max_blocks", default=8, type=int) parser.add_argument("--num_iterations", default=5000, type=int) parser.add_argument("--criterion", default="FM", type=str) parser.add_argument("--learning_rate", default=5e-4, help="Learning rate", type=float) parser.add_argument("--Z_learning_rate", default=5e-3, help="Learning rate", type=float) parser.add_argument("--clip_grad", default=0, type=float) parser.add_argument("--trajectories_mbsize", default=8, type=int) parser.add_argument("--offline_mbsize", default=8, type=int) parser.add_argument("--hindsight_prob", default=0.2, type=float) parser.add_argument("--hindsight_buffer_mbsize", default=8, type=int) parser.add_argument("--hindsight_trajectories_mbsize", default=8, type=int) parser.add_argument("--reward_min", default=1e-2, type=float) parser.add_argument("--reward_norm", default=1, type=float) parser.add_argument("--reward_exp", default=8, type=float) parser.add_argument("--reward_exp_ramping", default=0, type=float) parser.add_argument("--logit_clipping", default=0., type=float) # Hyperparameters for TB parser.add_argument("--partition_init", default=1, type=float) # Hyperparameters for FM parser.add_argument("--log_reg_c", default=(0.1/8)**4, type=float) parser.add_argument("--balanced_loss", default=True) parser.add_argument("--leaf_coef", default=10, type=float) # Architecture parser.add_argument("--repr_type", default='block_graph') parser.add_argument("--model_version", default='v4') parser.add_argument("--num_conv_steps", default=10, type=int) parser.add_argument("--nemb", default=256, help="#hidden", type=int) parser.add_argument("--weight_decay", default=0, type=float) parser.add_argument("--random_action_prob", default=0.05, type=float) parser.add_argument("--bootstrap_tau", default=0, type=float) parser.add_argument("--condition_type", type=str, default='HN') parser.add_argument("--ray_hidden_dim", default=100, type=int) return parser.parse_args() class BoRolloutWorker(RolloutWorker): def __init__(self, args, bpath, proxy, device): super(BoRolloutWorker, self).__init__(args, bpath, proxy, device) self.hindsight_prob = args.hindsight_prob self.hindsight_mols = defaultdict(list) self.hindsight_smiles = defaultdict(list) self.replay_threshold = 0.9 def _get(self, i, dset, weights=None): # Sample trajectories by walking backwards from the molecules in our dataset # Handle possible multithreading issues when independent threads # add/substract from dset: m = dset[i] if not isinstance(m, BlockMoleculeDataExtended): m = m[-1] r, raw_r = self._get_reward(m, weights) done = 1 samples = [] # a sample is a tuple (parents(s), parent actions, reward(s), s, done) # an action is (blockidx, stemidx) or (-1, x) for 'stop' # so we start with the stop action, unless the molecule is already # a "terminal" node (if it has no stems, no actions). if len(m.stems) and len(m.blocks) < self.max_blocks: samples.append(((m,), ((-1, 0),), weights, weights, r, m, done)) r = done = 0 while len(m.blocks): # and go backwards if self.ignore_parents: parents = self.mdp.parents(m) parent, action = parents[self.train_rng.randint(len(parents))] samples.append(((parent,), (action,), weights, weights, r, m, done)) r = done = 0 m = parent else: parents, actions = zip(*self.mdp.parents(m)) samples.append((parents, actions, weights.repeat(len(parents), 1), weights, r, m, done)) r = done = 0 m = parents[self.train_rng.randint(len(parents))] return samples[::-1] def _add_mol_to_replay(self, m): for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) if len(self.hindsight_mols[i]) < self.max_hindsight_mols or raw_r[0] > self.hindsight_mols[i][0][0]: if m.smiles not in self.hindsight_smiles[i]: self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) self.hindsight_smiles[i].append(m.smiles) if len(self.hindsight_mols[i]) > self.max_hindsight_mols: self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0]))[ max(int(0.05 * self.max_hindsight_mols), 1):] self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def _add_mol_to_online(self, r, m, inflow): if self.replay_mode == 'online': r = r + self.train_rng.normal() * 0.01 if len(self.online_mols) < self.max_online_mols or r > self.online_mols[0][0]: self.online_mols.append((r, m)) if len(self.online_mols) > self.max_online_mols: self.online_mols = sorted(self.online_mols)[ max(int(0.05 * self.max_online_mols), 1):] elif self.replay_mode == 'prioritized': self.online_mols.append((abs(inflow - np.log(r)), m)) if len(self.online_mols) > self.max_online_mols * 1.1: self.online_mols = self.online_mols[-self.max_online_mols:] def _get_reward(self, m, weights=None): rdmol = m.mol if rdmol is None: return self.reward_min # get reward from proxy raw_reward, score = self.proxy(m, weights) raw_reward = raw_reward.clip(self.reward_min) reward = self.l2r(raw_reward) return reward, (raw_reward, score) def execute_train_episode_batch(self, generator, dataset=None, Y_bounds=None, use_rand_policy=True): if self.train_rng.uniform() < self.hindsight_prob: idx = self.train_rng.randint(self.test_weights.shape[0]) weights = self.test_weights[idx].unsqueeze(0) samples = sum((self.rollout(generator, use_rand_policy, weights) for i in range(self.args.hindsight_trajectories_mbsize)), []) if self.args.hindsight_buffer_mbsize > 0: buffer = deepcopy(self.hindsight_mols[idx]) reward = np.array([x[0] for x in buffer]) prob = reward / sum(reward) eidx = np.random.choice(list(range(len(buffer))), self.args.hindsight_buffer_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, buffer, weights) for i in eidx), []) samples += offline_samples else: weights = Dirichlet(torch.tensor(self.args.alpha_vector)*self.args.alpha).sample_n(1).to(self.args.device) #* sample weights per batch, seem better samples = sum((self.rollout(generator, use_rand_policy, weights, replay=True) for i in range(self.args.trajectories_mbsize)), []) # offline sampling from dataset if self.args.offline_mbsize > 0 and dataset is not None: # use the oracle reward scores = torch.tensor(pd.DataFrame.from_dict(dataset.scores).values, dtype=torch.float32).to(args.device) if Y_bounds is not None: scores = normalize(scores, Y_bounds) reward = torch.matmul(scores, weights.reshape(-1, 1)) prob = (reward / sum(reward)).squeeze(1).cpu().numpy() eidx = np.random.choice(list(range(len(dataset.all_mols))), self.args.offline_mbsize, replace=False, p=prob) offline_samples = sum((self._get(i, dataset.all_mols, weights) for i in eidx), []) samples += offline_samples return zip(*samples) def initialize_hindsight_mols(self, dataset): for m in dataset.all_mols: for i, weights in enumerate(self.test_weights): r, raw_r = self._get_reward(m, weights) self.hindsight_mols[i].append((raw_r[0].item(), m.smiles, m)) for i, weights in enumerate(self.test_weights): self.hindsight_mols[i] = sorted(self.hindsight_mols[i], key=lambda x:(x[0])) self.hindsight_smiles[i] = [x[1] for x in self.hindsight_mols[i]] def train_generative_model(args, generator, bpath, proxy, oracle, dataset, test_weights, round_idx, do_save=False): print("Training generator...") os.makedirs(os.path.join(args.log_dir, f'round_{round_idx}'), exist_ok=True) device = args.device rollout_worker = BoRolloutWorker(args, bpath, proxy, device) rollout_worker.test_weights = torch.tensor(test_weights).to(device) rollout_worker.initialize_hindsight_mols(dataset) Y_bounds = torch.stack([proxy.partitioning.Y.min(dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) def save_stuff(round_idx, iter): torch.save(generator.state_dict(), os.path.join( args.log_dir, 'round_{}/{}_generator_checkpoint.pth'.format(round_idx, iter))) pickle.dump(rollout_worker.sampled_mols, gzip.open(f'{args.log_dir}/sampled_mols.pkl.gz', 'wb')) multi_thread = not args.debug if multi_thread: sampler = rollout_worker.start_samplers(generator, 8, dataset) def stop_everything(): print('joining') rollout_worker.stop_samplers_and_join() last_losses = [] train_losses = [] test_losses = [] test_infos = [] train_infos = [] time_last_check = time.time() for i in range(args.num_iterations + 1): if multi_thread: r = sampler() for thread in rollout_worker.sampler_threads: if thread.failed: stop_everything() pdb.post_mortem(thread.exception.__traceback__) return p, pb, a, pw, w, r, s, d, mols = r else: p, pb, a, pw, w, r, s, d, mols = rollout_worker.sample2batch( rollout_worker.execute_train_episode_batch(generator, dataset, Y_bounds, use_rand_policy=True)) loss = generator.train_step(p, pb, a, pw, w, r, s, d, mols, i) last_losses.append(loss) if not i % 100: train_loss = [np.round(np.mean(i), 3) for i in zip(*last_losses)] train_losses.append(train_loss) args.logger.add_scalar( 'Loss/round{}/train'.format(round_idx), train_loss[0], use_context=False) print('Iter {}: Loss {}, Time {}'.format( i, train_loss, round(time.time() - time_last_check, 3))) time_last_check = time.time() last_losses = [] if not i % args.sample_iterations and i != 0: volume, volume_oracle, reward_weight, reward_mean, test_loss, diversity = sample_batch( args, generator, rollout_worker, oracle, proxy, Y_bounds, compute_multi_objective_metric=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes'.format(round_idx), volume, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/volumes_oracle'.format(round_idx), volume_oracle, use_context=False) args.logger.add_scalars( 'round{}/Top-100-sampled/reward_weight'.format(round_idx), reward_weight, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/reward_mean'.format(round_idx), reward_mean, use_context=False) # reward_mean is a dict, the keys are test_weights args.logger.add_scalar( 'round{}/Top-100-sampled/test_loss'.format(round_idx), test_loss, use_context=False) args.logger.add_scalar( 'round{}/Top-100-sampled/dists'.format(round_idx), diversity, use_context=False) if do_save: save_stuff(round_idx, i) stop_everything() if do_save: save_stuff(round_idx, i) checkpoint_path = os.path.join(args.log_dir, f'round_{round_idx}/{i}_generator_checkpoint.pth') generator.load_state_dict(torch.load(checkpoint_path)) return rollout_worker, {'train_losses': train_losses, 'test_losses': test_losses, 'test_infos': test_infos, 'train_infos': train_infos} def sample_batch(args, generator, rollout_worker, oracle=None, proxy=None, ref_mols=None, Y_bounds=None, compute_multi_objective_metric=False): score_succ = {'gsk3b': 0.5, 'jnk3': 0.5, 'drd2': 0.5, 'chemprop_sars': 0.5, 'chemprop_hiv': 0.5, "seh": 0.5, 'qed': 0.6, 'sa': 0.67} if Y_bounds is None: Y_bounds = torch.stack([proxy.partitioning.Y.min( dim=-2).values, proxy.partitioning.Y.max(dim=-2).values]) time_start = time.time() print(f"Sampling molecules...") raw_rewards = [] raw_rewards_weight = {} means = [] picked_mols = [] smis = [] for i, weights in enumerate(rollout_worker.test_weights): sampled_mols = [] sampled_raw_rewards = [] sampled_means = [] sampled_smis = [] while len(sampled_mols) < args.num_samples: rollout_worker.rollout(generator, use_rand_policy=False, weights=torch.tensor(weights).unsqueeze(0).to(args.device)) (raw_r, _, m, trajectory_stats, inflow) = rollout_worker.sampled_mols[-1] sampled_mols.append(m) sampled_raw_rewards.append(raw_r[0].item()) sampled_means.append(raw_r[1]) sampled_smis.append(m.smiles) idx_pick = np.argsort(sampled_raw_rewards)[::-1][:int(args.num_samples/len(rollout_worker.test_weights))] picked_mols.extend(np.array(sampled_mols)[idx_pick].tolist()) means.extend(np.array(sampled_means)[idx_pick].tolist()) smis.extend(np.array(sampled_smis)[idx_pick].tolist()) raw_rewards.extend(np.array(sampled_raw_rewards)[idx_pick].tolist()) raw_rewards_weight[str(weights.cpu())] = np.array(sampled_raw_rewards)[idx_pick].mean() raw_rewards_mean = np.mean(list(raw_rewards_weight.values())) assert len(picked_mols) == args.num_samples top_means = torch.tensor(means) scores_dict = oracle.batch_get_scores(picked_mols) scores = torch.tensor(pd.DataFrame.from_dict(scores_dict).values) test_loss = F.mse_loss(top_means, scores) hypervolume = Hypervolume(ref_point=torch.zeros(len(args.objectives))) volume = hypervolume.compute(top_means) volume_oracle = hypervolume.compute(scores) diversity = compute_diversity(picked_mols) batch_metrics = {'Hypervolume_reward': volume, 'Hypervolume_oracle': volume_oracle, 'Reward_mean': raw_rewards_mean, 'scores_max': pd.DataFrame.from_dict(scores_dict).max().to_dict(), 'scores_mean': pd.DataFrame.from_dict(scores_dict).mean().to_dict(), 'Test_loss': test_loss, 'Diversity': diversity} print(batch_metrics) print('Time: {}'.format(time.time()-time_start)) if not compute_multi_objective_metric: return volume, volume_oracle, raw_rewards_weight, raw_rewards_mean, test_loss, diversity else: for i in range(len(picked_mols)): picked_mols[i].score = scores_dict[i] # success/diversity/novelty is computed among the top mols. success, positive_mols = compute_success( picked_mols, scores_dict, args.objectives, score_succ) succ_diversity = compute_diversity(positive_mols) if ref_mols: novelty = compute_novelty(positive_mols, ref_mols) else: novelty = 1. mo_metrics = {'success': success, 'novelty': novelty, 'succ_diversity': succ_diversity, } picked_smis = [(raw_rewards[i], picked_mols[i].score, smis[i]) for i in range(len(raw_rewards))] print(mo_metrics) return (picked_mols, scores_dict, picked_smis), batch_metrics, mo_metrics def log_overall_metrics(args, dataset, batch_infos=None, MultiObjective_metrics=None): volume = dataset.compute_hypervolume() print("Hypervolume for {}: {}".format(args.logger.context, volume)) args.logger.add_scalar('Metric/hypervolume', volume, use_context=False) args.logger.add_object('scores', dataset.scores) args.logger.add_object('smis', dataset.smis) if batch_infos: args.logger.add_scalar( 'Metric/test_loss', batch_infos['Test_loss'], use_context=False) args.logger.add_object('collected_info', batch_infos) if MultiObjective_metrics: args.logger.add_scalars('Metric/MultiObjective', MultiObjective_metrics, use_context=False) def get_test_rays(): if args.n_objectives == 3: n_partitions = 6 elif args.n_objectives == 4: n_partitions = 7 test_rays = get_reference_directions("das-dennis", args.n_objectives, n_partitions=n_partitions).astype(np.float32) test_rays = test_rays[[(r > 0).all() for r in test_rays]] print(f"initialize {len(test_rays)} test rays") return test_rays def main(args): set_random_seed(args.seed) args.logger.set_context('iter_0') bpath = "./data/blocks_105.json" dpath = "./data/docked_mols.h5" # Initialize oracle and dataset (for training surrogate function) oracle = Oracle(args) dataset = Dataset(args, bpath, oracle, args.device) dataset.load_h5(dpath, num_init_examples=args.num_init_examples) log_overall_metrics(args, dataset) args.n_objectives = len(args.objectives) # Initialize surrogate function proxy = get_proxy(args, bpath, oracle) proxy.update(dataset, 0, reset=False) for i in range(1, args.num_outer_loop_iters+1): print(f"====== Starting round {i} ======") args.logger.set_context('iter_{}'.format(i)) test_weights = np.random.dirichlet(args.alpha_vector, 5*(2**(args.n_objectives-2))).astype(np.float32) if args.criterion == 'TB': generator = TBGFlowNet(args, bpath) elif args.criterion == 'FM': generator = FMGFlowNet(args, bpath) elif args.criterion == 'Reinforce':
generator = MOReinforce(args, bpath)
7
2023-10-24 14:10:35+00:00
24k
caglarkucuk/earthformer-satellite-to-radar
ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_unet_dec.py
[ { "identifier": "Upsample3DLayer", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class Upsample3DLayer(nn.Module):\n \"\"\"Upsampling based on nn.UpSampling and Conv3x3.\n\n If the temporal dimension remains the same:\n x --> interpolation-2d (nearest) --> conv3x3(dim, out_dim)\n Else:\n x --> interpolation-3d (nearest) --> conv3x3x3(dim, out_dim)\n\n \"\"\"\n def __init__(self,\n dim,\n out_dim,\n target_size,\n temporal_upsample=False,\n kernel_size=3,\n layout='THWC',\n conv_init_mode=\"0\",\n ):\n \"\"\"\n\n Parameters\n ----------\n dim\n out_dim\n target_size\n Size of the output tensor. Will be a tuple/list that contains T_new, H_new, W_new\n temporal_upsample\n Whether the temporal axis will go through upsampling.\n kernel_size\n The kernel size of the Conv2D layer\n layout\n The layout of the inputs\n \"\"\"\n super(Upsample3DLayer, self).__init__()\n self.conv_init_mode = conv_init_mode\n self.target_size = target_size\n self.out_dim = out_dim\n self.temporal_upsample = temporal_upsample\n if temporal_upsample:\n self.up = nn.Upsample(size=target_size, mode='nearest') # 3D upsampling\n else:\n self.up = nn.Upsample(size=(target_size[1], target_size[2]), mode='nearest') # 2D upsampling\n self.conv = nn.Conv2d(in_channels=dim, out_channels=out_dim, kernel_size=(kernel_size, kernel_size),\n padding=(kernel_size // 2, kernel_size // 2))\n assert layout in ['THWC', 'CTHW']\n self.layout = layout\n\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode)\n\n def forward(self, x):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C) or (B, C, T, H, W)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_out, C_out) or (B, C, T, H_out, W_out)\n \"\"\"\n if self.layout == 'THWC':\n B, T, H, W, C = x.shape\n if self.temporal_upsample:\n x = x.permute(0, 4, 1, 2, 3) # (B, C, T, H, W)\n return self.conv(self.up(x)).permute(0, 2, 3, 4, 1)\n else:\n assert self.target_size[0] == T\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2) # (B * T, C, H, W)\n x = self.up(x)\n return self.conv(x).permute(0, 2, 3, 1).reshape((B,) + self.target_size + (self.out_dim,))\n elif self.layout == 'CTHW':\n B, C, T, H, W = x.shape\n if self.temporal_upsample:\n return self.conv(self.up(x))\n else:\n assert self.output_size[0] == T\n x = x.permute(0, 2, 1, 3, 4) # (B, T, C, H, W)\n x = x.reshape(B * T, C, H, W)\n return self.conv(self.up(x)).reshape(B, self.target_size[0], self.out_dim, self.target_size[1],\n self.target_size[2]).permute(0, 2, 1, 3, 4)" }, { "identifier": "PatchMerging3D", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class PatchMerging3D(nn.Module):\n \"\"\" Patch Merging Layer\"\"\"\n def __init__(self,\n dim,\n out_dim=None,\n downsample=(1, 2, 2),\n norm_layer='layer_norm',\n padding_type='nearest',\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n \"\"\"\n\n Parameters\n ----------\n dim\n Number of input channels.\n downsample\n downsample factor\n norm_layer\n The normalization layer\n \"\"\"\n super().__init__()\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n self.dim = dim\n if out_dim is None:\n out_dim = max(downsample) * dim\n self.out_dim = out_dim\n self.downsample = downsample\n self.padding_type = padding_type\n self.reduction = nn.Linear(downsample[0] * downsample[1] * downsample[2] * dim,\n out_dim, bias=False)\n self.norm = get_norm_layer(norm_layer, in_channels=downsample[0] * downsample[1] * downsample[2] * dim)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def get_out_shape(self, data_shape):\n T, H, W, C_in = data_shape\n pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0]\n pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1]\n pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2]\n return (T + pad_t) // self.downsample[0], (H + pad_h) // self.downsample[1], (W + pad_w) // self.downsample[2],\\\n self.out_dim\n\n def forward(self, x):\n \"\"\"\n\n Parameters\n ----------\n x\n Input feature, tensor size (B, T, H, W, C).\n\n Returns\n -------\n out\n Shape (B, T // downsample[0], H // downsample[1], W // downsample[2], out_dim)\n \"\"\"\n B, T, H, W, C = x.shape\n\n # padding\n pad_t = (self.downsample[0] - T % self.downsample[0]) % self.downsample[0]\n pad_h = (self.downsample[1] - H % self.downsample[1]) % self.downsample[1]\n pad_w = (self.downsample[2] - W % self.downsample[2]) % self.downsample[2]\n if pad_h or pad_h or pad_w:\n T += pad_t\n H += pad_h\n W += pad_w\n x = _generalize_padding(x, pad_t, pad_w, pad_h, padding_type=self.padding_type)\n\n x = x.reshape((B,\n T // self.downsample[0], self.downsample[0],\n H // self.downsample[1], self.downsample[1],\n W // self.downsample[2], self.downsample[2], C)) \\\n .permute(0, 1, 3, 5, 2, 4, 6, 7) \\\n .reshape(B, T // self.downsample[0], H // self.downsample[1], W // self.downsample[2],\n self.downsample[0] * self.downsample[1] * self.downsample[2] * C)\n x = self.norm(x)\n x = self.reduction(x)\n\n return x" }, { "identifier": "PosEmbed", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class PosEmbed(nn.Module):\n\n def __init__(self, embed_dim, maxT, maxH, maxW, typ='t+h+w'):\n r\"\"\"\n Parameters\n ----------\n embed_dim\n maxT\n maxH\n maxW\n typ\n The type of the positional embedding.\n - t+h+w:\n Embed the spatial position to embeddings\n - t+hw:\n Embed the spatial position to embeddings\n \"\"\"\n super(PosEmbed, self).__init__()\n self.typ = typ\n\n assert self.typ in ['t+h+w', 't+hw']\n self.maxT = maxT\n self.maxH = maxH\n self.maxW = maxW\n self.embed_dim = embed_dim\n # spatiotemporal learned positional embedding\n if self.typ == 't+h+w':\n self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)\n self.H_embed = nn.Embedding(num_embeddings=maxH, embedding_dim=embed_dim)\n self.W_embed = nn.Embedding(num_embeddings=maxW, embedding_dim=embed_dim)\n\n # nn.init.trunc_normal_(self.T_embed.weight, std=0.02)\n # nn.init.trunc_normal_(self.H_embed.weight, std=0.02)\n # nn.init.trunc_normal_(self.W_embed.weight, std=0.02)\n elif self.typ == 't+hw':\n self.T_embed = nn.Embedding(num_embeddings=maxT, embedding_dim=embed_dim)\n self.HW_embed = nn.Embedding(num_embeddings=maxH * maxW, embedding_dim=embed_dim)\n # nn.init.trunc_normal_(self.T_embed.weight, std=0.02)\n # nn.init.trunc_normal_(self.HW_embed.weight, std=0.02)\n else:\n raise NotImplementedError\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m, embed_mode=\"0\")\n\n def forward(self, x):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Return the x + positional embeddings\n \"\"\"\n _, T, H, W, _ = x.shape\n t_idx = torch.arange(T, device=x.device) # (T, C)\n h_idx = torch.arange(H, device=x.device) # (H, C)\n w_idx = torch.arange(W, device=x.device) # (W, C)\n if self.typ == 't+h+w':\n return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim)\\\n + self.H_embed(h_idx).reshape(1, H, 1, self.embed_dim)\\\n + self.W_embed(w_idx).reshape(1, 1, W, self.embed_dim)\n elif self.typ == 't+hw':\n spatial_idx = h_idx.unsqueeze(-1) * self.maxW + w_idx\n return x + self.T_embed(t_idx).reshape(T, 1, 1, self.embed_dim) + self.HW_embed(spatial_idx)\n else:\n raise NotImplementedError" }, { "identifier": "InitialEncoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class InitialEncoder(nn.Module):\n def __init__(self,\n dim,\n out_dim,\n downsample_scale: Union[int, Sequence[int]],\n num_conv_layers=2,\n activation='leaky',\n padding_type='nearest',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(InitialEncoder, self).__init__()\n\n self.num_conv_layers = num_conv_layers\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n conv_block = []\n for i in range(num_conv_layers):\n if i == 0:\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(16, out_dim))\n conv_block.append(get_activation(activation))\n else:\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=out_dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(16, out_dim))\n conv_block.append(get_activation(activation))\n\n self.conv_block = nn.Sequential(*conv_block)\n if isinstance(downsample_scale, int):\n patch_merge_downsample = (1, downsample_scale, downsample_scale)\n elif len(downsample_scale) == 2:\n patch_merge_downsample = (1, *downsample_scale)\n elif len(downsample_scale) == 3:\n patch_merge_downsample = tuple(downsample_scale)\n else:\n raise NotImplementedError(f\"downsample_scale {downsample_scale} format not supported!\")\n self.patch_merge = PatchMerging3D(\n dim=out_dim, out_dim=out_dim,\n padding_type=padding_type,\n downsample=patch_merge_downsample,\n linear_init_mode=linear_init_mode,\n norm_init_mode=norm_init_mode)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def forward(self, x):\n \"\"\"\n\n x --> [K x Conv2D] --> PatchMerge\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C_out)\n \"\"\"\n B, T, H, W, C = x.shape\n if self.num_conv_layers > 0:\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = self.conv_block(x).permute(0, 2, 3, 1) # (B * T, H, W, C_new)\n x = self.patch_merge(x.reshape(B, T, H, W, -1))\n else:\n x = self.patch_merge(x)\n return x" }, { "identifier": "FinalDecoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class FinalDecoder(nn.Module):\n\n def __init__(self,\n target_thw,\n dim,\n num_conv_layers=2,\n activation='leaky',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(FinalDecoder, self).__init__()\n self.target_thw = target_thw\n self.dim = dim\n self.num_conv_layers = num_conv_layers\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n conv_block = []\n for i in range(num_conv_layers):\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1), in_channels=dim, out_channels=dim))\n conv_block.append(nn.GroupNorm(16, dim))\n conv_block.append(get_activation(activation))\n self.conv_block = nn.Sequential(*conv_block)\n self.upsample = Upsample3DLayer(\n dim=dim, out_dim=dim,\n target_size=target_thw, kernel_size=3,\n conv_init_mode=conv_init_mode)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def forward(self, x):\n \"\"\"\n\n x --> Upsample --> [K x Conv2D]\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C)\n \"\"\"\n x = self.upsample(x)\n if self.num_conv_layers > 0:\n B, T, H, W, C = x.shape\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = self.conv_block(x).permute(0, 2, 3, 1).reshape(B, T, H, W, -1)\n return x" }, { "identifier": "InitialStackPatchMergingEncoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class InitialStackPatchMergingEncoder(nn.Module):\n\n def __init__(self,\n num_merge: int,\n in_dim,\n out_dim_list,\n downsample_scale_list,\n num_conv_per_merge_list=None,\n activation='leaky',\n padding_type='nearest',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(InitialStackPatchMergingEncoder, self).__init__()\n\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n self.num_merge = num_merge\n self.in_dim = in_dim\n self.out_dim_list = out_dim_list[:num_merge]\n self.downsample_scale_list = downsample_scale_list[:num_merge]\n self.num_conv_per_merge_list = num_conv_per_merge_list\n self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list]\n\n self.conv_block_list = nn.ModuleList()\n self.patch_merge_list = nn.ModuleList()\n for i in range(num_merge):\n if i == 0:\n in_dim = in_dim\n else:\n in_dim = self.out_dim_list[i - 1]\n out_dim = self.out_dim_list[i]\n downsample_scale = self.downsample_scale_list[i]\n\n conv_block = []\n for j in range(self.num_conv_per_merge_list[i]):\n if j == 0:\n conv_in_dim = in_dim\n else:\n conv_in_dim = out_dim\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=conv_in_dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(self.num_group_list[i], out_dim))\n conv_block.append(get_activation(activation))\n\n conv_block = nn.Sequential(*conv_block)\n self.conv_block_list.append(conv_block)\n patch_merge = PatchMerging3D(\n dim=out_dim, out_dim=out_dim,\n padding_type=padding_type,\n downsample=(1, downsample_scale, downsample_scale),\n linear_init_mode=linear_init_mode,\n norm_init_mode=norm_init_mode)\n self.patch_merge_list.append(patch_merge)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n def get_out_shape_list(self, input_shape):\n \"\"\"\n T, H, W, C\n \"\"\"\n out_shape_list = []\n for patch_merge in self.patch_merge_list:\n input_shape = patch_merge.get_out_shape(input_shape)\n out_shape_list.append(input_shape)\n return out_shape_list\n\n def forward(self, x):\n \"\"\"\n\n x --> [K x Conv2D] --> PatchMerge --> ... --> [K x Conv2D] --> PatchMerge\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C_out)\n \"\"\"\n for i, (conv_block, patch_merge) in \\\n enumerate(zip(self.conv_block_list, self.patch_merge_list)):\n B, T, H, W, C = x.shape\n if self.num_conv_per_merge_list[i] > 0:\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = conv_block(x).permute(0, 2, 3, 1).reshape(B, T, H, W, -1)\n x = patch_merge(x)\n return x" }, { "identifier": "FinalStackUpsamplingDecoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class FinalStackUpsamplingDecoder(nn.Module):\n\n def __init__(self,\n target_shape_list,\n in_dim,\n num_conv_per_up_list=None,\n activation='leaky',\n conv_init_mode=\"0\",\n linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n \"\"\"\n Parameters\n ----------\n target_shape_list:\n list of (T, H ,W ,C)\n \"\"\"\n super(FinalStackUpsamplingDecoder, self).__init__()\n self.conv_init_mode = conv_init_mode\n self.linear_init_mode = linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n self.target_shape_list = target_shape_list\n self.out_dim_list = [target_shape[-1] for target_shape in self.target_shape_list]\n self.num_upsample = len(target_shape_list)\n self.in_dim = in_dim\n self.num_conv_per_up_list = num_conv_per_up_list\n self.num_group_list = [max(1, out_dim // 4) for out_dim in self.out_dim_list]\n\n self.conv_block_list = nn.ModuleList()\n self.upsample_list = nn.ModuleList()\n for i in range(self.num_upsample):\n if i == 0:\n in_dim = in_dim\n else:\n in_dim = self.out_dim_list[i - 1]\n out_dim = self.out_dim_list[i]\n\n upsample = Upsample3DLayer(\n dim=in_dim, out_dim=in_dim,\n target_size=target_shape_list[i][:-1], kernel_size=3,\n conv_init_mode=conv_init_mode)\n self.upsample_list.append(upsample)\n conv_block = []\n for j in range(num_conv_per_up_list[i]):\n if j == 0:\n conv_in_dim = in_dim\n else:\n conv_in_dim = out_dim\n conv_block.append(nn.Conv2d(kernel_size=(3, 3), padding=(1, 1),\n in_channels=conv_in_dim, out_channels=out_dim))\n conv_block.append(nn.GroupNorm(self.num_group_list[i], out_dim))\n conv_block.append(get_activation(activation))\n conv_block = nn.Sequential(*conv_block)\n self.conv_block_list.append(conv_block)\n self.reset_parameters()\n\n def reset_parameters(self):\n for m in self.children():\n apply_initialization(m,\n conv_mode=self.conv_init_mode,\n linear_mode=self.linear_init_mode,\n norm_mode=self.norm_init_mode)\n\n @staticmethod\n def get_init_params(enc_input_shape, enc_out_shape_list, large_channel=False):\n dec_target_shape_list = list(enc_out_shape_list[:-1])[::-1] + [tuple(enc_input_shape), ]\n if large_channel:\n dec_target_shape_list_large_channel = []\n for i, enc_out_shape in enumerate(enc_out_shape_list[::-1]):\n dec_target_shape_large_channel = list(dec_target_shape_list[i])\n dec_target_shape_large_channel[-1] = enc_out_shape[-1]\n dec_target_shape_list_large_channel.append(tuple(dec_target_shape_large_channel))\n dec_target_shape_list = dec_target_shape_list_large_channel\n dec_in_dim = enc_out_shape_list[-1][-1]\n return dec_target_shape_list, dec_in_dim\n\n def forward(self, x):\n \"\"\"\n\n x --> Upsample --> [K x Conv2D] --> ... --> Upsample --> [K x Conv2D]\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n Shape (B, T, H_new, W_new, C)\n \"\"\"\n for i, (conv_block, upsample) in \\\n enumerate(zip(self.conv_block_list, self.upsample_list)):\n x = upsample(x)\n if self.num_conv_per_up_list[i] > 0:\n B, T, H, W, C = x.shape\n x = x.reshape(B * T, H, W, C).permute(0, 3, 1, 2)\n x = conv_block(x).permute(0, 2, 3, 1).reshape(B, T, H, W, -1)\n return x" }, { "identifier": "StackCuboidSelfAttentionBlock", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class StackCuboidSelfAttentionBlock(nn.Module):\n \"\"\"\n\n - \"use_inter_ffn\" is True\n x --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out\n | ^ | ^\n | | | |\n |-------------| |-------------|\n - \"use_inter_ffn\" is False\n x --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out\n | ^ | ^ ^ | ^\n | | | | | | |\n |-------------| |------------| ----------| |-----------|\n If we have enabled global memory vectors, each attention will be a\n\n \"\"\"\n def __init__(self,\n dim,\n num_heads,\n block_cuboid_size=[(4, 4, 4), (4, 4, 4)],\n block_shift_size=[(0, 0, 0), (2, 2, 2)],\n block_strategy=[('d', 'd', 'd'),\n ('l', 'l', 'l')],\n padding_type='ignore',\n qkv_bias=False,\n qk_scale=None,\n attn_drop=0.0,\n proj_drop=0.0,\n ffn_drop=0.0,\n activation='leaky',\n gated_ffn=False,\n norm_layer='layer_norm',\n use_inter_ffn=False,\n use_global_vector=False,\n use_global_vector_ffn=True,\n use_global_self_attn=False,\n separate_global_qkv=False,\n global_dim_ratio=1,\n checkpoint_level=True,\n use_relative_pos=True,\n use_final_proj=True,\n # initialization\n attn_linear_init_mode=\"0\",\n ffn_linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(StackCuboidSelfAttentionBlock, self).__init__()\n # initialization\n self.attn_linear_init_mode = attn_linear_init_mode\n self.ffn_linear_init_mode = ffn_linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n assert len(block_cuboid_size[0]) > 0 and len(block_shift_size) > 0 and len(block_strategy) > 0,\\\n f'Format of the block cuboid size is not correct.' \\\n f' block_cuboid_size={block_cuboid_size}'\n assert len(block_cuboid_size) == len(block_shift_size) == len(block_strategy)\n self.num_attn = len(block_cuboid_size)\n self.checkpoint_level = checkpoint_level\n self.use_inter_ffn = use_inter_ffn\n # global vectors\n self.use_global_vector = use_global_vector\n self.use_global_vector_ffn = use_global_vector_ffn\n self.use_global_self_attn = use_global_self_attn\n self.global_dim_ratio = global_dim_ratio\n\n if self.use_inter_ffn:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim,\n hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for _ in range(self.num_attn)])\n if self.use_global_vector_ffn and self.use_global_vector:\n self.global_ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=global_dim_ratio * dim,\n hidden_size=global_dim_ratio * 4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for _ in range(self.num_attn)])\n else:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim, hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn, activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)])\n if self.use_global_vector_ffn and self.use_global_vector:\n self.global_ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=global_dim_ratio * dim,\n hidden_size=global_dim_ratio * 4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn, activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)])\n self.attn_l = nn.ModuleList(\n [CuboidSelfAttentionLayer(\n dim=dim, num_heads=num_heads,\n cuboid_size=ele_cuboid_size,\n shift_size=ele_shift_size,\n strategy=ele_strategy,\n padding_type=padding_type,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n attn_drop=attn_drop,\n proj_drop=proj_drop,\n norm_layer=norm_layer,\n use_global_vector=use_global_vector,\n use_global_self_attn=use_global_self_attn,\n separate_global_qkv=separate_global_qkv,\n global_dim_ratio=global_dim_ratio,\n checkpoint_level=checkpoint_level,\n use_relative_pos=use_relative_pos,\n use_final_proj=use_final_proj,\n attn_linear_init_mode=attn_linear_init_mode,\n ffn_linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for ele_cuboid_size, ele_shift_size, ele_strategy\n in zip(block_cuboid_size, block_shift_size, block_strategy)])\n\n def reset_parameters(self):\n for m in self.ffn_l:\n m.reset_parameters()\n if self.use_global_vector_ffn and self.use_global_vector:\n for m in self.global_ffn_l:\n m.reset_parameters()\n for m in self.attn_l:\n m.reset_parameters()\n\n def forward(self, x, global_vectors=None):\n if self.use_inter_ffn:\n if self.use_global_vector:\n for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)):\n if self.checkpoint_level >= 2 and self.training:\n x_out, global_vectors_out = checkpoint.checkpoint(attn, x, global_vectors)\n else:\n x_out, global_vectors_out = attn(x, global_vectors)\n x = x + x_out\n global_vectors = global_vectors + global_vectors_out\n\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(ffn, x)\n if self.use_global_vector_ffn:\n global_vectors = checkpoint.checkpoint(self.global_ffn_l[idx], global_vectors)\n else:\n x = ffn(x)\n if self.use_global_vector_ffn:\n global_vectors = self.global_ffn_l[idx](global_vectors)\n return x, global_vectors\n else:\n for idx, (attn, ffn) in enumerate(zip(self.attn_l, self.ffn_l)):\n if self.checkpoint_level >= 2 and self.training:\n x = x + checkpoint.checkpoint(attn, x)\n else:\n x = x + attn(x)\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(ffn, x)\n else:\n x = ffn(x)\n return x\n else:\n if self.use_global_vector:\n for idx, attn in enumerate(self.attn_l):\n if self.checkpoint_level >= 2 and self.training:\n x_out, global_vectors_out = checkpoint.checkpoint(attn, x, global_vectors)\n else:\n x_out, global_vectors_out = attn(x, global_vectors)\n x = x + x_out\n global_vectors = global_vectors + global_vectors_out\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(self.ffn_l[0], x)\n if self.use_global_vector_ffn:\n global_vectors = checkpoint.checkpoint(self.global_ffn_l[0], global_vectors)\n else:\n x = self.ffn_l[0](x)\n if self.use_global_vector_ffn:\n global_vectors = self.global_ffn_l[0](global_vectors)\n return x, global_vectors\n else:\n for idx, attn in enumerate(self.attn_l):\n if self.checkpoint_level >= 2 and self.training:\n out = checkpoint.checkpoint(attn, x)\n else:\n out = attn(x)\n x = x + out\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(self.ffn_l[0], x)\n else:\n x = self.ffn_l[0](x)\n return x" }, { "identifier": "StackCuboidCrossAttentionBlock", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class StackCuboidCrossAttentionBlock(nn.Module):\n \"\"\"A stack of cuboid cross attention layers.\n\n The advantage of cuboid attention is that we can combine cuboid attention building blocks with different\n hyper-parameters to mimic a broad range of space-time correlation patterns.\n\n - \"use_inter_ffn\" is True\n x, mem --> attn1 -----+-------> ffn1 ---+---> attn2 --> ... --> ffn_k --> out\n | ^ | ^\n | | | |\n |-------------|----|-------------|\n - \"use_inter_ffn\" is False\n x, mem --> attn1 -----+------> attn2 --> ... attnk --+----> ffnk ---+---> out, mem\n | ^ | ^ ^ | ^\n | | | | | | |\n |-------------|----|------------|-- ----------|--|-----------|\n \"\"\"\n def __init__(self,\n dim,\n num_heads,\n block_cuboid_hw=[(4, 4), (4, 4)],\n block_shift_hw=[(0, 0), (2, 2)],\n block_n_temporal=[1, 2],\n block_strategy=[('d', 'd', 'd'),\n ('l', 'l', 'l')],\n padding_type='ignore',\n cross_last_n_frames=None,\n qkv_bias=False,\n qk_scale=None,\n attn_drop=0.0,\n proj_drop=0.0,\n ffn_drop=0.0,\n activation='leaky',\n gated_ffn=False,\n norm_layer='layer_norm',\n use_inter_ffn=True,\n max_temporal_relative=50,\n checkpoint_level=1,\n use_relative_pos=True,\n # global vectors\n use_global_vector=False,\n separate_global_qkv=False,\n global_dim_ratio=1,\n # initialization\n attn_linear_init_mode=\"0\",\n ffn_linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n super(StackCuboidCrossAttentionBlock, self).__init__()\n # initialization\n self.attn_linear_init_mode = attn_linear_init_mode\n self.ffn_linear_init_mode = ffn_linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n assert len(block_cuboid_hw[0]) > 0 and len(block_shift_hw) > 0 and len(block_strategy) > 0,\\\n f'Incorrect format.' \\\n f' block_cuboid_hw={block_cuboid_hw}, block_shift_hw={block_shift_hw}, block_strategy={block_strategy}'\n assert len(block_cuboid_hw) == len(block_shift_hw) == len(block_strategy)\n self.num_attn = len(block_cuboid_hw)\n self.checkpoint_level = checkpoint_level\n self.use_inter_ffn = use_inter_ffn\n self.use_global_vector = use_global_vector\n if self.use_inter_ffn:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim,\n hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for _ in range(self.num_attn)])\n else:\n self.ffn_l = nn.ModuleList(\n [PositionwiseFFN(\n units=dim,\n hidden_size=4 * dim,\n activation_dropout=ffn_drop,\n dropout=ffn_drop,\n gated_proj=gated_ffn,\n activation=activation,\n normalization=norm_layer,\n pre_norm=True,\n linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)])\n self.attn_l = nn.ModuleList(\n [CuboidCrossAttentionLayer(\n dim=dim,\n num_heads=num_heads,\n cuboid_hw=ele_cuboid_hw,\n shift_hw=ele_shift_hw,\n strategy=ele_strategy,\n n_temporal=ele_n_temporal,\n cross_last_n_frames=cross_last_n_frames,\n padding_type=padding_type,\n qkv_bias=qkv_bias,\n qk_scale=qk_scale,\n attn_drop=attn_drop,\n proj_drop=proj_drop,\n norm_layer=norm_layer,\n max_temporal_relative=max_temporal_relative,\n use_global_vector=use_global_vector,\n separate_global_qkv=separate_global_qkv,\n global_dim_ratio=global_dim_ratio,\n checkpoint_level=checkpoint_level,\n use_relative_pos=use_relative_pos,\n attn_linear_init_mode=attn_linear_init_mode,\n ffn_linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,)\n for ele_cuboid_hw, ele_shift_hw, ele_strategy, ele_n_temporal\n in zip(block_cuboid_hw, block_shift_hw, block_strategy, block_n_temporal)])\n\n def reset_parameters(self):\n for m in self.ffn_l:\n m.reset_parameters()\n for m in self.attn_l:\n m.reset_parameters()\n\n def forward(self, x, mem, mem_global_vector=None):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T_x, H, W, C)\n mem\n Shape (B, T_mem, H, W, C)\n mem_global_vector\n Shape (B, N_global, C)\n\n Returns\n -------\n out\n Shape (B, T_x, H, W, C_out)\n \"\"\"\n if self.use_inter_ffn:\n for attn, ffn in zip(self.attn_l, self.ffn_l):\n if self.checkpoint_level >= 2 and self.training:\n x = x + checkpoint.checkpoint(attn, x, mem, mem_global_vector)\n else:\n x = x + attn(x, mem, mem_global_vector)\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(ffn, x)\n else:\n x = ffn(x)\n return x\n else:\n for attn in self.attn_l:\n if self.checkpoint_level >= 2 and self.training:\n x = x + checkpoint.checkpoint(attn, x, mem, mem_global_vector)\n else:\n x = x + attn(x, mem, mem_global_vector)\n if self.checkpoint_level >= 1 and self.training:\n x = checkpoint.checkpoint(self.ffn_l[0], x)\n else:\n x = self.ffn_l[0](x)\n return x" }, { "identifier": "CuboidTransformerEncoder", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer.py", "snippet": "class CuboidTransformerEncoder(nn.Module):\n \"\"\"Encoder of the CuboidTransformer\n\n x --> attn_block --> patch_merge --> attn_block --> patch_merge --> ... --> out\n\n \"\"\"\n def __init__(self,\n input_shape,\n base_units=128,\n block_units=None,\n scale_alpha=1.0,\n depth=[4, 4, 4],\n downsample=2,\n downsample_type='patch_merge',\n block_attn_patterns=None,\n block_cuboid_size=[(4, 4, 4),\n (4, 4, 4)],\n block_strategy=[('l', 'l', 'l'),\n ('d', 'd', 'd')],\n block_shift_size=[(0, 0, 0),\n (0, 0, 0)],\n num_heads=4,\n attn_drop=0.0,\n proj_drop=0.0,\n ffn_drop=0.0,\n activation=\"leaky\",\n ffn_activation='leaky',\n gated_ffn=False,\n norm_layer='layer_norm',\n use_inter_ffn=True,\n padding_type='ignore',\n checkpoint_level=True,\n use_relative_pos=True,\n self_attn_use_final_proj=True,\n # global vectors\n use_global_vector=False,\n use_global_vector_ffn=True,\n use_global_self_attn=False,\n separate_global_qkv=False,\n global_dim_ratio=1,\n # initialization\n attn_linear_init_mode=\"0\",\n ffn_linear_init_mode=\"0\",\n conv_init_mode=\"0\",\n down_linear_init_mode=\"0\",\n norm_init_mode=\"0\",\n ):\n \"\"\"\n\n Parameters\n ----------\n input_shape\n The shape of the input. Contains T, H, W, C\n initial_data_thw\n The shape of the first layer\n base_units\n The number of units\n scale_alpha\n We scale up the channels based on the formula:\n - round_to(base_units * max(downsample_scale) ** units_alpha, 4)\n depth\n The number of layers for each block\n downsample\n The downsample ratio\n downsample_type\n Type of the downsampling layer\n block_attn_patterns\n Attention pattern for the cuboid attention for each block.\n block_cuboid_size\n A list of cuboid size parameters\n block_strategy\n A list of cuboid strategies\n block_shift_size\n A list of shift sizes\n num_global\n The number of global vectors\n num_heads\n The number of heads.\n attn_drop\n proj_drop\n ffn_drop\n gated_ffn\n Whether to enable gated ffn or not\n norm_layer\n The normalization layer\n use_inter_ffn\n Whether to use intermediate FFN\n padding_type\n \"\"\"\n super(CuboidTransformerEncoder, self).__init__()\n # initialization mode\n self.attn_linear_init_mode = attn_linear_init_mode\n self.ffn_linear_init_mode = ffn_linear_init_mode\n self.conv_init_mode = conv_init_mode\n self.down_linear_init_mode = down_linear_init_mode\n self.norm_init_mode = norm_init_mode\n\n self.input_shape = input_shape\n self.depth = depth\n self.num_blocks = len(depth)\n self.base_units = base_units\n self.scale_alpha = scale_alpha\n if not isinstance(downsample, (tuple, list)):\n downsample = (1, downsample, downsample)\n self.downsample = downsample\n self.downsample_type = downsample_type\n self.num_heads = num_heads\n self.use_global_vector = use_global_vector\n self.checkpoint_level = checkpoint_level\n if block_units is None:\n block_units = [round_to(base_units * int((max(downsample) ** scale_alpha) ** i), 4)\n for i in range(self.num_blocks)]\n else:\n assert len(block_units) == self.num_blocks and block_units[0] == base_units\n self.block_units = block_units\n\n if self.num_blocks > 1:\n if downsample_type == 'patch_merge':\n self.down_layers = nn.ModuleList(\n [PatchMerging3D(dim=self.block_units[i],\n downsample=downsample,\n # downsample=(1, 1, 1),\n padding_type=padding_type,\n out_dim=self.block_units[i + 1],\n linear_init_mode=down_linear_init_mode,\n norm_init_mode=norm_init_mode)\n for i in range(self.num_blocks - 1)])\n else:\n raise NotImplementedError\n if self.use_global_vector:\n self.down_layer_global_proj = nn.ModuleList(\n [nn.Linear(in_features=global_dim_ratio*self.block_units[i],\n out_features=global_dim_ratio*self.block_units[i + 1])\n for i in range(self.num_blocks - 1)])\n\n if block_attn_patterns is not None:\n mem_shapes = self.get_mem_shapes()\n if isinstance(block_attn_patterns, (tuple, list)):\n assert len(block_attn_patterns) == self.num_blocks\n else:\n block_attn_patterns = [block_attn_patterns for _ in range(self.num_blocks)]\n block_cuboid_size = []\n block_strategy = []\n block_shift_size = []\n for idx, key in enumerate(block_attn_patterns):\n func = CuboidSelfAttentionPatterns.get(key)\n cuboid_size, strategy, shift_size = func(mem_shapes[idx])\n block_cuboid_size.append(cuboid_size)\n block_strategy.append(strategy)\n block_shift_size.append(shift_size)\n else:\n if not isinstance(block_cuboid_size[0][0], (list, tuple)):\n block_cuboid_size = [block_cuboid_size for _ in range(self.num_blocks)]\n else:\n assert len(block_cuboid_size) == self.num_blocks,\\\n f'Incorrect input format! Received block_cuboid_size={block_cuboid_size}'\n\n if not isinstance(block_strategy[0][0], (list, tuple)):\n block_strategy = [block_strategy for _ in range(self.num_blocks)]\n else:\n assert len(block_strategy) == self.num_blocks,\\\n f'Incorrect input format! Received block_strategy={block_strategy}'\n\n if not isinstance(block_shift_size[0][0], (list, tuple)):\n block_shift_size = [block_shift_size for _ in range(self.num_blocks)]\n else:\n assert len(block_shift_size) == self.num_blocks,\\\n f'Incorrect input format! Received block_shift_size={block_shift_size}'\n self.block_cuboid_size = block_cuboid_size\n self.block_strategy = block_strategy\n self.block_shift_size = block_shift_size\n\n self.blocks = nn.ModuleList([nn.Sequential(\n *[StackCuboidSelfAttentionBlock(\n dim=self.block_units[i],\n num_heads=num_heads,\n block_cuboid_size=block_cuboid_size[i],\n block_strategy=block_strategy[i],\n block_shift_size=block_shift_size[i],\n attn_drop=attn_drop,\n proj_drop=proj_drop,\n ffn_drop=ffn_drop,\n activation=ffn_activation,\n gated_ffn=gated_ffn,\n norm_layer=norm_layer,\n use_inter_ffn=use_inter_ffn,\n padding_type=padding_type,\n use_global_vector=use_global_vector,\n use_global_vector_ffn=use_global_vector_ffn,\n use_global_self_attn=use_global_self_attn,\n separate_global_qkv=separate_global_qkv,\n global_dim_ratio=global_dim_ratio,\n checkpoint_level=checkpoint_level,\n use_relative_pos=use_relative_pos,\n use_final_proj=self_attn_use_final_proj,\n # initialization\n attn_linear_init_mode=attn_linear_init_mode,\n ffn_linear_init_mode=ffn_linear_init_mode,\n norm_init_mode=norm_init_mode,\n ) for _ in range(depth[i])])\n for i in range(self.num_blocks)])\n self.reset_parameters()\n\n def reset_parameters(self):\n if self.num_blocks > 1:\n for m in self.down_layers:\n m.reset_parameters()\n if self.use_global_vector:\n apply_initialization(self.down_layer_global_proj,\n linear_mode=self.down_linear_init_mode)\n for ms in self.blocks:\n for m in ms:\n m.reset_parameters()\n\n def get_mem_shapes(self):\n \"\"\"Get the shape of the output memory based on the input shape. This can be used for constructing the decoder.\n\n Returns\n -------\n mem_shapes\n A list of shapes of the output memory\n \"\"\"\n\n if self.num_blocks == 1:\n return [self.input_shape]\n else:\n mem_shapes = [self.input_shape]\n curr_shape = self.input_shape\n for down_layer in self.down_layers:\n curr_shape = down_layer.get_out_shape(curr_shape)\n mem_shapes.append(curr_shape)\n return mem_shapes\n\n def forward(self, x, global_vectors=None):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n\n Returns\n -------\n out\n A list of tensors from the bottom layer to the top layer of the encoder. For example, it can have shape\n - (B, T, H, W, C1)\n - (B, T, H // 2, W // 2, 2 * C1)\n - (B, T, H // 4, W // 4, 4 * C1)\n ...\n global_mem_out\n Optional\n \"\"\"\n B, T, H, W, C_in = x.shape\n assert (T, H, W, C_in) == self.input_shape \n\n if self.use_global_vector:\n out = []\n global_mem_out = []\n for i in range(self.num_blocks):\n for l in self.blocks[i]:\n x, global_vectors = l(x, global_vectors)\n out.append(x)\n global_mem_out.append(global_vectors)\n if self.num_blocks > 1 and i < self.num_blocks - 1:\n x = self.down_layers[i](x)\n global_vectors = self.down_layer_global_proj[i](global_vectors)\n return out, global_mem_out\n else:\n out = []\n for i in range(self.num_blocks):\n x = self.blocks[i](x)\n out.append(x)\n if self.num_blocks > 1 and i < self.num_blocks - 1:\n x = self.down_layers[i](x)\n return out" }, { "identifier": "CuboidSelfAttentionPatterns", "path": "ef-sat2rad/earthformer/cuboid_transformer/cuboid_transformer_patterns.py", "snippet": "def full_attention(input_shape):\ndef self_axial(input_shape):\ndef self_video_swin(input_shape, P=2, M=4):\ndef self_divided_space_time(input_shape):\ndef self_spatial_lg_v1(input_shape, M=4):\ndef self_axial_space_dilate_K(input_shape, K=2):\ndef cross_KxK(mem_shape, K):\ndef cross_KxK_lg(mem_shape, K):\ndef cross_KxK_heter(mem_shape, K):\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n P = min(P, T)\n M = min(M, H, W)\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n T, H, W, _ = input_shape\n K = min(K, H, W)\n K = min(K, H, W)\n K = min(K, H, W)\n K = min(K, H, W)" }, { "identifier": "get_activation", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def get_activation(act, inplace=False, **kwargs):\n \"\"\"\n\n Parameters\n ----------\n act\n Name of the activation\n inplace\n Whether to perform inplace activation\n\n Returns\n -------\n activation_layer\n The activation\n \"\"\"\n if act is None:\n return lambda x: x\n if isinstance(act, str):\n if act == 'leaky':\n negative_slope = kwargs.get(\"negative_slope\", 0.1)\n return nn.LeakyReLU(negative_slope, inplace=inplace)\n elif act == 'identity':\n return nn.Identity()\n elif act == 'elu':\n return nn.ELU(inplace=inplace)\n elif act == 'gelu':\n return nn.GELU()\n elif act == 'relu':\n return nn.ReLU()\n elif act == 'sigmoid':\n return nn.Sigmoid()\n elif act == 'tanh':\n return nn.Tanh()\n elif act == 'softrelu' or act == 'softplus':\n return nn.Softplus()\n elif act == 'softsign':\n return nn.Softsign()\n else:\n raise NotImplementedError('act=\"{}\" is not supported. '\n 'Try to include it if you can find that in '\n 'https://pytorch.org/docs/stable/nn.html'.format(act))\n else:\n return act" }, { "identifier": "get_norm_layer", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def get_norm_layer(normalization: str = 'layer_norm',\n axis: int = -1,\n epsilon: float = 1e-5,\n in_channels: int = 0, **kwargs):\n \"\"\"Get the normalization layer based on the provided type\n\n Parameters\n ----------\n normalization\n The type of the layer normalization from ['layer_norm']\n axis\n The axis to normalize the\n epsilon\n The epsilon of the normalization layer\n in_channels\n Input channel\n\n Returns\n -------\n norm_layer\n The layer normalization layer\n \"\"\"\n if isinstance(normalization, str):\n if normalization == 'layer_norm':\n assert in_channels > 0\n assert axis == -1\n norm_layer = nn.LayerNorm(normalized_shape=in_channels, eps=epsilon, **kwargs)\n elif normalization == 'rms_norm':\n assert axis == -1\n norm_layer = RMSNorm(d=in_channels, eps=epsilon, **kwargs)\n else:\n raise NotImplementedError('normalization={} is not supported'.format(normalization))\n return norm_layer\n elif normalization is None:\n return nn.Identity()\n else:\n raise NotImplementedError('The type of normalization must be str')" }, { "identifier": "_generalize_padding", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def _generalize_padding(x, pad_t, pad_h, pad_w, padding_type, t_pad_left=False):\n \"\"\"\n\n Parameters\n ----------\n x\n Shape (B, T, H, W, C)\n pad_t\n pad_h\n pad_w\n padding_type\n t_pad_left\n\n Returns\n -------\n out\n The result after padding the x. Shape will be (B, T + pad_t, H + pad_h, W + pad_w, C)\n \"\"\"\n if pad_t == 0 and pad_h == 0 and pad_w == 0:\n return x\n\n assert padding_type in ['zeros', 'ignore', 'nearest']\n B, T, H, W, C = x.shape\n\n if padding_type == 'nearest':\n return F.interpolate(x.permute(0, 4, 1, 2, 3), size=(T + pad_t, H + pad_h, W + pad_w)).permute(0, 2, 3, 4, 1)\n else:\n if t_pad_left:\n return F.pad(x, (0, 0, 0, pad_w, 0, pad_h, pad_t, 0))\n else:\n return F.pad(x, (0, 0, 0, pad_w, 0, pad_h, 0, pad_t))" }, { "identifier": "_generalize_unpadding", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def _generalize_unpadding(x, pad_t, pad_h, pad_w, padding_type):\n assert padding_type in['zeros', 'ignore', 'nearest']\n B, T, H, W, C = x.shape\n if pad_t == 0 and pad_h == 0 and pad_w == 0:\n return x\n\n if padding_type == 'nearest':\n return F.interpolate(x.permute(0, 4, 1, 2, 3), size=(T - pad_t, H - pad_h, W - pad_w)).permute(0, 2, 3, 4, 1)\n else:\n return x[:, :(T - pad_t), :(H - pad_h), :(W - pad_w), :].contiguous()" }, { "identifier": "apply_initialization", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def apply_initialization(m,\n linear_mode=\"0\",\n conv_mode=\"0\",\n norm_mode=\"0\",\n embed_mode=\"0\"):\n if isinstance(m, nn.Linear):\n\n if linear_mode in (\"0\", ):\n nn.init.kaiming_normal_(m.weight,\n mode='fan_in', nonlinearity=\"linear\")\n elif linear_mode in (\"1\", ):\n nn.init.kaiming_normal_(m.weight,\n a=0.1,\n mode='fan_out',\n nonlinearity=\"leaky_relu\")\n else:\n raise NotImplementedError\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, (nn.Conv2d, nn.Conv3d, nn.ConvTranspose2d, nn.ConvTranspose3d)):\n if conv_mode in (\"0\", ):\n nn.init.kaiming_normal_(m.weight,\n a=0.1,\n mode='fan_out',\n nonlinearity=\"leaky_relu\")\n else:\n raise NotImplementedError\n if hasattr(m, 'bias') and m.bias is not None:\n nn.init.zeros_(m.bias)\n elif isinstance(m, nn.LayerNorm):\n if norm_mode in (\"0\", ):\n if m.elementwise_affine:\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n raise NotImplementedError\n elif isinstance(m, nn.GroupNorm):\n if norm_mode in (\"0\", ):\n if m.affine:\n nn.init.ones_(m.weight)\n nn.init.zeros_(m.bias)\n else:\n raise NotImplementedError\n # # pos_embed already initialized when created\n elif isinstance(m, nn.Embedding):\n if embed_mode in (\"0\", ):\n nn.init.trunc_normal_(m.weight.data, std=0.02)\n else:\n raise NotImplementedError\n else:\n pass" }, { "identifier": "round_to", "path": "ef-sat2rad/earthformer/cuboid_transformer/utils.py", "snippet": "def round_to(dat, c):\n return dat + (dat - dat % c) % c" } ]
from typing import Sequence, Union from functools import lru_cache from collections import OrderedDict from torch import nn from einops import rearrange from .cuboid_transformer import ( Upsample3DLayer, PatchMerging3D, PosEmbed, InitialEncoder, FinalDecoder, InitialStackPatchMergingEncoder, FinalStackUpsamplingDecoder, StackCuboidSelfAttentionBlock, StackCuboidCrossAttentionBlock, CuboidTransformerEncoder) from .cuboid_transformer_patterns import CuboidSelfAttentionPatterns, CuboidCrossAttentionPatterns from .utils import ( get_activation, get_norm_layer, _generalize_padding, _generalize_unpadding, apply_initialization, round_to) import warnings import torch import torch.nn.functional as F import torch.utils.checkpoint as checkpoint
15,903
cross_last_n_frames=None, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', use_inter_ffn=False, hierarchical_pos_embed=False, pos_embed_type='t+hw', max_temporal_relative=50, padding_type='ignore', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # global vectors use_self_global=False, self_update_global=True, use_cross_global=False, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", up_linear_init_mode="0", norm_init_mode="0", # different from `CuboidTransformerDecoder`, no arg `use_first_self_attn=False` downsample=2, downsample_type='patch_merge', cross_mode="up", down_linear_init_mode="0", ): """ Parameters ---------- target_temporal_length mem_shapes cross_start The block to start cross attention depth Depth of each block downsample The downsample ratio downsample_type Type of the downsampling layer upsample_type The type of the upsampling layers upsample_kernel_size block_self_attn_patterns Pattern of the block self attentions block_self_cuboid_size block_self_cuboid_strategy block_self_shift_size block_cross_attn_patterns block_cross_cuboid_hw block_cross_cuboid_strategy block_cross_shift_hw block_cross_n_temporal cross_last_n_frames cross_mode Must be one of ("up", "down", "both") Control whether the upsampling/downsampling/both phases cross attend to the encoded latent features num_heads attn_drop proj_drop ffn_drop ffn_activation gated_ffn Whether to enable gated ffn or not norm_layer The normalization layer use_inter_ffn Whether to use intermediate FFN hierarchical_pos_embed Whether to add pos embedding for each hierarchy. max_temporal_relative padding_type checkpoint_level """ super(CuboidTransformerUNetDecoder, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.up_linear_init_mode = up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(depth) == len(mem_shapes) self.target_temporal_length = target_temporal_length self.num_blocks = len(mem_shapes) self.cross_start = cross_start self.mem_shapes = mem_shapes self.block_units = tuple(mem_shape[-1] for mem_shape in self.mem_shapes) self.depth = depth if not isinstance(downsample, (tuple, list)): downsample = (1, downsample, downsample) self.downsample = downsample self.downsample_type = downsample_type self.upsample_type = upsample_type self.hierarchical_pos_embed = hierarchical_pos_embed self.checkpoint_level = checkpoint_level self.use_self_global = use_self_global self.self_update_global = self_update_global self.use_cross_global = use_cross_global self.use_global_vector_ffn = use_global_vector_ffn assert cross_mode in ["up", "down", "both"], f"Invalid cross_mode {cross_mode}!" self.cross_mode = cross_mode self.up_use_cross = self.cross_mode in ["up", "both"] self.down_use_cross = self.cross_mode in ["down", "both"] if self.num_blocks > 1: # Construct downsampling layers if downsample_type == 'patch_merge': self.downsample_layers = nn.ModuleList(
"""CuboidTransformer adapted for auxiliary inputs in decoder""" class CuboidTransformerUNetDecoder(nn.Module): """U-Net style Decoder of the CuboidTransformer. For each block, we first apply the StackCuboidSelfAttention and then apply the StackCuboidCrossAttention We add cross attention following 3 modes: cross_mode == "down": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "up": x --> attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ | | | | mem mem cross_mode == "both": x --> attn --> cross_attn --> downscale --> ... --> z --> attn --> cross_attn --> upscale --> ... --> out ^ ^ ^ ^ | | | | | | | | mem mem mem mem """ def __init__(self, target_temporal_length, mem_shapes, cross_start=0, depth=[2, 2], upsample_type="upsample", upsample_kernel_size=3, block_self_attn_patterns=None, block_self_cuboid_size=[(4, 4, 4), (4, 4, 4)], block_self_cuboid_strategy=[('l', 'l', 'l'), ('d', 'd', 'd')], block_self_shift_size=[(1, 1, 1), (0, 0, 0)], block_cross_attn_patterns=None, block_cross_cuboid_hw=[(4, 4), (4, 4)], block_cross_cuboid_strategy=[('l', 'l', 'l'), ('d', 'l', 'l')], block_cross_shift_hw=[(0, 0), (0, 0)], block_cross_n_temporal=[1, 2], cross_last_n_frames=None, num_heads=4, attn_drop=0.0, proj_drop=0.0, ffn_drop=0.0, ffn_activation='leaky', gated_ffn=False, norm_layer='layer_norm', use_inter_ffn=False, hierarchical_pos_embed=False, pos_embed_type='t+hw', max_temporal_relative=50, padding_type='ignore', checkpoint_level=True, use_relative_pos=True, self_attn_use_final_proj=True, # global vectors use_self_global=False, self_update_global=True, use_cross_global=False, use_global_vector_ffn=True, use_global_self_attn=False, separate_global_qkv=False, global_dim_ratio=1, # initialization attn_linear_init_mode="0", ffn_linear_init_mode="0", conv_init_mode="0", up_linear_init_mode="0", norm_init_mode="0", # different from `CuboidTransformerDecoder`, no arg `use_first_self_attn=False` downsample=2, downsample_type='patch_merge', cross_mode="up", down_linear_init_mode="0", ): """ Parameters ---------- target_temporal_length mem_shapes cross_start The block to start cross attention depth Depth of each block downsample The downsample ratio downsample_type Type of the downsampling layer upsample_type The type of the upsampling layers upsample_kernel_size block_self_attn_patterns Pattern of the block self attentions block_self_cuboid_size block_self_cuboid_strategy block_self_shift_size block_cross_attn_patterns block_cross_cuboid_hw block_cross_cuboid_strategy block_cross_shift_hw block_cross_n_temporal cross_last_n_frames cross_mode Must be one of ("up", "down", "both") Control whether the upsampling/downsampling/both phases cross attend to the encoded latent features num_heads attn_drop proj_drop ffn_drop ffn_activation gated_ffn Whether to enable gated ffn or not norm_layer The normalization layer use_inter_ffn Whether to use intermediate FFN hierarchical_pos_embed Whether to add pos embedding for each hierarchy. max_temporal_relative padding_type checkpoint_level """ super(CuboidTransformerUNetDecoder, self).__init__() # initialization mode self.attn_linear_init_mode = attn_linear_init_mode self.ffn_linear_init_mode = ffn_linear_init_mode self.conv_init_mode = conv_init_mode self.up_linear_init_mode = up_linear_init_mode self.norm_init_mode = norm_init_mode assert len(depth) == len(mem_shapes) self.target_temporal_length = target_temporal_length self.num_blocks = len(mem_shapes) self.cross_start = cross_start self.mem_shapes = mem_shapes self.block_units = tuple(mem_shape[-1] for mem_shape in self.mem_shapes) self.depth = depth if not isinstance(downsample, (tuple, list)): downsample = (1, downsample, downsample) self.downsample = downsample self.downsample_type = downsample_type self.upsample_type = upsample_type self.hierarchical_pos_embed = hierarchical_pos_embed self.checkpoint_level = checkpoint_level self.use_self_global = use_self_global self.self_update_global = self_update_global self.use_cross_global = use_cross_global self.use_global_vector_ffn = use_global_vector_ffn assert cross_mode in ["up", "down", "both"], f"Invalid cross_mode {cross_mode}!" self.cross_mode = cross_mode self.up_use_cross = self.cross_mode in ["up", "both"] self.down_use_cross = self.cross_mode in ["down", "both"] if self.num_blocks > 1: # Construct downsampling layers if downsample_type == 'patch_merge': self.downsample_layers = nn.ModuleList(
[PatchMerging3D(dim=self.block_units[i],
1
2023-10-23 11:45:50+00:00
24k
IBM/VillanDiffusion
make_latent_dataset.py
[ { "identifier": "DiffuserModelSched", "path": "model.py", "snippet": "class DiffuserModelSched():\n LR_SCHED_CKPT: str = \"lr_sched.pth\"\n OPTIM_CKPT: str = \"optim.pth\"\n \n SDE_VP: str = \"SDE-VP\"\n SDE_VE: str = \"SDE-VE\"\n SDE_LDM: str = \"SDE-LDM\"\n CLIP_SAMPLE_DEFAULT = False\n MODEL_DEFAULT: str = \"DEFAULT\"\n DDPM_32_DEFAULT: str = \"DDPM-32-DEFAULT\"\n DDPM_256_DEFAULT: str = \"DDPM-256-DEFAULT\"\n NCSNPP_32_DEFAULT: str = \"NCSNPP-32-DEFAULT\"\n NCSNPP_256_DEFAULT: str = \"NCSNPP-256-DEFAULT\"\n DDPM_CIFAR10_DEFAULT: str = \"DDPM-CIFAR10-DEFAULT\"\n DDPM_CELEBA_HQ_DEFAULT: str = \"DDPM-CELEBA-HQ-DEFAULT\"\n DDPM_CHURCH_DEFAULT: str = \"DDPM-CHURCH-DEFAULT\"\n DDPM_BEDROOM_DEFAULT: str = \"DDPM-BEDROOM-DEFAULT\"\n LDM_CELEBA_HQ_DEFAULT: str = \"LDM-CELEBA-HQ-DEFAULT\"\n NCSNPP_CIFAR10_DEFAULT: str = \"NCSNPP-CIFAR10-DEFAULT\"\n NCSNPP_CELEBA_HQ_DEFAULT: str = \"NCSNPP-CELEBA-HQ-DEFAULT\"\n NCSNPP_CHURCH_DEFAULT: str = \"NCSNPP-CHURCH-DEFAULT\"\n \n DDPM_CIFAR10_32 = \"DDPM-CIFAR10-32\"\n DDPM_CELEBA_HQ_256 = \"DDPM-CELEBA-HQ-256\"\n DDPM_CHURCH_256 = \"DDPM-CHURCH-256\"\n DDPM_BEDROOM_256 = \"DDPM-BEDROOM-256\"\n LDM_CELEBA_HQ_256 = \"LDM-CELEBA-HQ-256\"\n NCSNPP_CIFAR10_32 = \"NCSNPP-CIFAR10-32\"\n NCSNPP_CELEBA_HQ_256 = \"NCSNPP-CELEBA-HQ-256\"\n NCSNPP_CHURCH_256 = \"NCSNPP-CHURCH-256\"\n\n DDPM_SCHED = \"DDPM-SCHED\"\n DDIM_SCHED = \"DDIM-SCHED\"\n DPM_SOLVER_PP_O1_SCHED = \"DPM_SOLVER_PP_O1-SCHED\"\n DPM_SOLVER_O1_SCHED = \"DPM_SOLVER_O1-SCHED\"\n DPM_SOLVER_PP_O2_SCHED = \"DPM_SOLVER_PP_O2-SCHED\"\n DPM_SOLVER_O2_SCHED = \"DPM_SOLVER_O2-SCHED\"\n DPM_SOLVER_PP_O3_SCHED = \"DPM_SOLVER_PP_O3-SCHED\"\n DPM_SOLVER_O3_SCHED = \"DPM_SOLVER_O3-SCHED\"\n UNIPC_SCHED = \"UNIPC-SCHED\"\n PNDM_SCHED = \"PNDM-SCHED\"\n DEIS_SCHED = \"DEIS-SCHED\"\n HEUN_SCHED = \"HEUN-SCHED\"\n LMSD_SCHED = \"LMSD-SCHED\"\n LDM_SCHED = \"LDM-SCHED\"\n SCORE_SDE_VE_SCHED = \"SCORE-SDE-VE-SCHED\"\n EDM_VE_SCHED = \"EDM-VE-SCHED\"\n EDM_VE_ODE_SCHED = \"EDM-VE-ODE-SCHED\"\n EDM_VE_SDE_SCHED = \"EDM-VE-SDE-SCHED\"\n \n @staticmethod\n def get_sample_clip(clip_sample: bool, clip_sample_default: bool):\n if clip_sample is not None:\n return clip_sample\n return clip_sample_default\n @staticmethod\n def __get_pipeline_generator(unet, scheduler, pipeline):\n def get_pipeline(unet, scheduler):\n return pipeline(unet, scheduler)\n return get_pipeline\n @staticmethod\n def __get_ldm_pipeline_generator(pipeline):\n def get_pipeline(accelerate, unet, vae, scheduler):\n unet = accelerate.unwrap_model(unet)\n if vae != None:\n vae = accelerate.unwrap_model(vae)\n return pipeline(vqvae=vae, unet=unet, scheduler=scheduler)\n return pipeline(unet=unet, scheduler=scheduler)\n return get_pipeline\n @staticmethod\n def __get_model_sched_vp(ckpt_id: str, clip_sample: bool, noise_sched_type: str=None, clip_sample_range: float=None):\n # Clip option\n clip_sample_used = DiffuserModelSched.get_sample_clip(clip_sample=clip_sample, clip_sample_default=DiffuserModelSched.CLIP_SAMPLE_DEFAULT)\n # Pipeline\n pipline: DDPMPipeline = DDPMPipeline.from_pretrained(ckpt_id)\n \n model: UNet2DModel = pipline.unet\n num_train_timesteps: int = 1000\n beta_start: float = 0.0001\n beta_end: float = 0.02\n \n if clip_sample_range is None:\n clip_sample_range: float = 1.0\n PNDMPipeline_used = partial(PNDMPipeline, clip_sample=clip_sample_used, clip_sample_range=clip_sample_range)\n\n if noise_sched_type == DiffuserModelSched.DDPM_SCHED:\n noise_sched = DDPMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, clip_sample=clip_sample_used)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=DDPMPipeline)\n elif noise_sched_type == DiffuserModelSched.DDIM_SCHED:\n noise_sched = DDIMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, clip_sample=clip_sample_used)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=DDIMPipeline)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O1_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=1, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O1_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=1, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O2_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=2, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O2_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=2, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O3_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=3, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O3_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, solver_order=3, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.UNIPC_SCHED:\n noise_sched = UniPCMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.PNDM_SCHED:\n noise_sched = PNDMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DEIS_SCHED:\n noise_sched = DEISMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.HEUN_SCHED:\n noise_sched = HeunDiscreteScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.LMSD_SCHED:\n noise_sched = LMSDiscreteScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=PNDMPipeline_used)\n elif noise_sched_type == None:\n noise_sched = pipline.scheduler\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=DDPMPipeline)\n # noise_sched = DDPMScheduler.from_pretrained(ckpt_id, prediction_type='epsilon')\n # noise_sched =DDPMScheduler(num_train_timesteps=1000, beta_start=0.0001, beta_end=0.02)\n else:\n raise NotImplementedError()\n \n if clip_sample_used != None:\n noise_sched.config.clip_sample = clip_sample_used\n print(f\"noise_sched.config.clip_sample = {noise_sched.config.clip_sample}\")\n \n return model, None, noise_sched, get_pipeline\n \n @staticmethod\n def __get_model_sched_ve(ckpt_id: str, clip_sample: bool, noise_sched_type: str=None, num_inference_steps: int=1000):\n # Clip option\n clip_sample_used = DiffuserModelSched.get_sample_clip(clip_sample=clip_sample, clip_sample_default=DiffuserModelSched.CLIP_SAMPLE_DEFAULT)\n # Pipeline\n pipline: ScoreSdeVePipeline = ScoreSdeVePipeline.from_pretrained(ckpt_id)\n \n model: UNet2DModel = pipline.unet\n num_train_timesteps: int = 2000\n sigma_min: float = 0.01\n sigma_max: float = 380.0\n sampling_eps: float = 1e-05\n correct_steps: int = 1\n snr: float = 0.075\n\n if noise_sched_type == DiffuserModelSched.SCORE_SDE_VE_SCHED:\n noise_sched = ScoreSdeVeScheduler(num_train_timesteps=num_train_timesteps, sigma_min=sigma_min, sigma_max=sigma_max, sampling_eps=sampling_eps, correct_steps=correct_steps, snr=snr)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=ScoreSdeVePipeline)\n elif noise_sched_type == DiffuserModelSched.EDM_VE_SCHED:\n noise_sched = KarrasVeScheduler(num_train_timesteps=num_train_timesteps, sigma_min=sigma_min, sigma_max=sigma_max)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=KarrasVePipeline)\n elif noise_sched_type == DiffuserModelSched.EDM_VE_SDE_SCHED:\n noise_sched = KarrasVeScheduler(num_train_timesteps=num_train_timesteps, sigma_min=sigma_min, sigma_max=sigma_max, s_churn=100)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=KarrasVePipeline)\n elif noise_sched_type == DiffuserModelSched.EDM_VE_ODE_SCHED:\n noise_sched = KarrasVeScheduler(num_train_timesteps=num_train_timesteps, sigma_min=sigma_min, sigma_max=sigma_max, s_churn=0)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=KarrasVePipeline)\n elif noise_sched_type == None:\n noise_sched = pipline.scheduler\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=ScoreSdeVePipeline)\n else:\n raise NotImplementedError()\n \n if clip_sample_used != None:\n noise_sched.config.clip_sample = clip_sample_used\n \n return model, None, noise_sched, get_pipeline \n \n @staticmethod\n def __get_model_sched_ldm(ckpt_id: str, clip_sample: bool, noise_sched_type: str=None):\n # Clip option\n clip_sample_used = DiffuserModelSched.get_sample_clip(clip_sample=clip_sample, clip_sample_default=DiffuserModelSched.CLIP_SAMPLE_DEFAULT)\n # Pipeline\n pipline: DiffusionPipeline = DiffusionPipeline.from_pretrained(ckpt_id)\n \n model: UNet2DModel = pipline.unet\n vae: VQModel = pipline.vqvae\n num_train_timesteps: int = 1000\n beta_start: float = 0.0015\n beta_end: float = 0.0195\n beta_schedule: str = \"scaled_linear\"\n clip_sample_default: bool = False\n # timestep_values = None\n trained_betas: Optional[Union[np.ndarray, List[float]]] = None\n \n LDMPipeline_used = partial(LDMPipeline, clip_sample=clip_sample_used)\n\n # if noise_sched_type == DiffuserModelSched.DDIM_SCHED:\n # noise_sched = DDIMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, clip_sample=clip_sample_default)\n # get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(unet=model, vqvae=vqvae, scheduler=noise_sched, pipeline=LDMPipeline_used)\n \n if noise_sched_type == DiffuserModelSched.DDPM_SCHED:\n noise_sched = DDPMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, clip_sample=clip_sample_used)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline)\n elif noise_sched_type == DiffuserModelSched.DDIM_SCHED:\n noise_sched = DDIMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, clip_sample=clip_sample_used)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O1_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=1, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O1_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=1, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O2_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=2, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O2_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=2, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_PP_O3_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=3, algorithm_type='dpmsolver++')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DPM_SOLVER_O3_SCHED:\n noise_sched = DPMSolverMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas, solver_order=3, algorithm_type='dpmsolver')\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.UNIPC_SCHED:\n noise_sched = UniPCMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.PNDM_SCHED:\n noise_sched = PNDMScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.DEIS_SCHED:\n noise_sched = DEISMultistepScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.HEUN_SCHED:\n noise_sched = HeunDiscreteScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas)\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline_used)\n elif noise_sched_type == DiffuserModelSched.LMSD_SCHED:\n noise_sched = LMSDiscreteScheduler(num_train_timesteps=num_train_timesteps, beta_start=beta_start, beta_end=beta_end, beta_schedule=beta_schedule, trained_betas=trained_betas)\n get_pipeline = DiffuserModelSched.__get_pipeline_generator(unet=model, scheduler=noise_sched, pipeline=LDMPipeline_used)\n elif noise_sched_type == None:\n noise_sched = pipline.scheduler\n get_pipeline = DiffuserModelSched.__get_ldm_pipeline_generator(pipeline=LDMPipeline)\n else:\n raise NotImplementedError()\n \n if clip_sample_used != None:\n noise_sched.config.clip_sample = clip_sample_used\n \n return model, vae, noise_sched, get_pipeline\n \n @staticmethod\n def __get_model_sched(ckpt_id: str, clip_sample: bool, clip_sample_range: float=None, noise_sched_type: str=None, num_inference_steps: int=1000, sde_type: str=SDE_VP):\n if sde_type == DiffuserModelSched.SDE_VP:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.__get_model_sched_vp(ckpt_id=ckpt_id, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type)\n elif sde_type == DiffuserModelSched.SDE_VE:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.__get_model_sched_ve(ckpt_id=ckpt_id, clip_sample=clip_sample, noise_sched_type=noise_sched_type)\n elif sde_type == DiffuserModelSched.SDE_LDM:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.__get_model_sched_ldm(ckpt_id=ckpt_id, clip_sample=clip_sample, noise_sched_type=noise_sched_type)\n else:\n raise NotImplementedError(f\"sde_type {sde_type} not implemented\")\n if model != None:\n model.requires_grad_(True)\n if vae != None:\n vae.requires_grad_(False)\n return model, vae, noise_sched, get_pipeline\n \n @staticmethod\n def check_image_size_channel(image_size: int, channels: int):\n if image_size == None or channels == None:\n raise ValueError(f\"Arguement image_size and channels shouldn't be {image_size} and {channels}\")\n \n @staticmethod\n def get_model_sched(image_size: int=None, channels: int=None, ckpt: str=MODEL_DEFAULT, sde_type: str=SDE_VP, clip_sample: bool=None, clip_sample_range: float=None, noise_sched_type: str=None, **kwargs):\n @torch.no_grad()\n def weight_reset(m: nn.Module):\n # - check if the current module has reset_parameters & if it's callabed called it on m\n reset_parameters = getattr(m, \"reset_parameters\", None)\n if callable(reset_parameters):\n m.reset_parameters()\n \n # clip_sample_used = DiffuserModelSched.get_sample_clip(clip_sample=clip_sample, clip_sample_default=False)\n # noise_sched = DDPMScheduler(num_train_timesteps=1000, clip_sample=clip_sample_used)\n \n vae = None\n \n if ckpt == DiffuserModelSched.MODEL_DEFAULT or ckpt == DiffuserModelSched.DDPM_32_DEFAULT:\n DiffuserModelSched.check_image_size_channel(image_size=image_size, channels=channels)\n _, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.DDPM_CIFAR10_32, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = UNet2DModel(\n in_channels=channels,\n out_channels=channels,\n sample_size=image_size,\n act_fn=\"silu\",\n attention_head_dim=None,\n block_out_channels=[128, 256, 256, 256],\n center_input_sample=False,\n down_block_types=[\"DownBlock2D\", \"AttnDownBlock2D\", \"DownBlock2D\", \"DownBlock2D\"], \n downsample_padding=0,\n flip_sin_to_cos=False,\n freq_shift=1,\n layers_per_block=2,\n mid_block_scale_factor=1,\n norm_eps=1e-06,\n norm_num_groups=32,\n time_embedding_type=\"positional\",\n up_block_types=[\"UpBlock2D\", \"UpBlock2D\", \"AttnUpBlock2D\", \"UpBlock2D\"]\n )\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.NCSNPP_32_DEFAULT:\n DiffuserModelSched.check_image_size_channel(image_size=image_size, channels=channels)\n _, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.NCSNPP_CELEBA_HQ_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = UNet2DModel(\n in_channels=channels,\n out_channels=channels,\n sample_size=image_size,\n act_fn=\"silu\",\n attention_head_dim=None,\n block_out_channels=[128, 256, 256, 256],\n center_input_sample=False,\n down_block_types=[\"SkipDownBlock2D\", \"AttnSkipDownBlock2D\", \"SkipDownBlock2D\", \"SkipDownBlock2D\"], \n downsample_padding=1,\n flip_sin_to_cos=True,\n freq_shift=0,\n layers_per_block=4,\n mid_block_scale_factor=1.41421356237,\n norm_eps=1e-06,\n norm_num_groups=None,\n time_embedding_type=\"fourier\",\n up_block_types=[\"SkipUpBlock2D\", \"SkipUpBlock2D\", \"AttnSkipUpBlock2D\", \"SkipUpBlock2D\"]\n )\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.DDPM_CIFAR10_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.DDPM_CIFAR10_32, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.DDPM_CELEBA_HQ_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.DDPM_CELEBA_HQ_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.DDPM_CHURCH_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.DDPM_CHURCH_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.DDPM_BEDROOM_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.DDPM_BEDROOM_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.LDM_CELEBA_HQ_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.LDM_CELEBA_HQ_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.NCSNPP_CIFAR10_DEFAULT:\n _, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.NCSNPP_CELEBA_HQ_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = UNet2DModel(\n in_channels=3,\n out_channels=3,\n sample_size=32,\n act_fn=\"silu\",\n attention_head_dim=None,\n block_out_channels=[128, 256, 256, 256],\n center_input_sample=False,\n down_block_types=[\"SkipDownBlock2D\", \"AttnSkipDownBlock2D\", \"SkipDownBlock2D\", \"SkipDownBlock2D\"], \n downsample_padding=1,\n flip_sin_to_cos=True,\n freq_shift=0,\n layers_per_block=4,\n mid_block_scale_factor=1.41421356237,\n norm_eps=1e-06,\n norm_num_groups=None,\n time_embedding_type=\"fourier\",\n up_block_types=[\"SkipUpBlock2D\", \"SkipUpBlock2D\", \"AttnSkipUpBlock2D\", \"SkipUpBlock2D\"]\n )\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.NCSNPP_CELEBA_HQ_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.NCSNPP_CELEBA_HQ_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n elif ckpt == DiffuserModelSched.NCSNPP_CHURCH_DEFAULT:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=DiffuserModelSched.NCSNPP_CHURCH_256, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n model = model.apply(weight_reset)\n else:\n model, vae, noise_sched, get_pipeline = DiffuserModelSched.get_pretrained(ckpt=ckpt, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n return model, vae, noise_sched, get_pipeline\n \n @staticmethod\n def get_pretrained(ckpt: str, clip_sample: bool=None, clip_sample_range: float=None, noise_sched_type: str=None, num_inference_steps: int=1000, sde_type: str=SDE_VP):\n if ckpt == DiffuserModelSched.DDPM_CIFAR10_32:\n ckpt: str = \"google/ddpm-cifar10-32\"\n elif ckpt == DiffuserModelSched.DDPM_CELEBA_HQ_256:\n ckpt: str = \"google/ddpm-ema-celebahq-256\"\n elif ckpt == DiffuserModelSched.DDPM_CHURCH_256:\n ckpt: str = \"google/ddpm-ema-church-256\"\n elif ckpt == DiffuserModelSched.DDPM_BEDROOM_256:\n ckpt: str = \"google/ddpm-ema-bedroom-256\"\n elif ckpt == DiffuserModelSched.LDM_CELEBA_HQ_256:\n ckpt: str = \"CompVis/ldm-celebahq-256\"\n elif ckpt == DiffuserModelSched.NCSNPP_CIFAR10_32: \n ckpt: str = \"fusing/cifar10-ncsnpp-ve\"\n elif ckpt == DiffuserModelSched.NCSNPP_CELEBA_HQ_256:\n ckpt: str = \"google/ncsnpp-celebahq-256\"\n elif ckpt == DiffuserModelSched.NCSNPP_CHURCH_256:\n ckpt: str = \"google/ncsnpp-church-256\"\n \n # return model, noise_sched\n return DiffuserModelSched.__get_model_sched(ckpt_id=ckpt, clip_sample=clip_sample, clip_sample_range=clip_sample_range, noise_sched_type=noise_sched_type, sde_type=sde_type)\n \n @staticmethod\n def get_optim(ckpt: str, optim: torch.optim, lr_sched: torch.optim.lr_scheduler):\n lr_sched.load_state_dict(torch.load(DiffuserModelSched.LR_SCHED_CKPT, map_location=\"cpu\"))\n optim.load_state_dict(torch.load(DiffuserModelSched.OPTIM_CKPT, map_location=\"cpu\"))\n return optim, lr_sched" }, { "identifier": "DatasetLoader", "path": "dataset.py", "snippet": "class DatasetLoader(object):\n # Dataset generation mode\n MODE_FIXED = \"FIXED\"\n MODE_FLEX = \"FLEX\"\n MODE_NONE = \"NONE\"\n MODE_EXTEND = \"EXTEND\"\n \n # Dataset names\n MNIST = \"MNIST\"\n CIFAR10 = \"CIFAR10\"\n CELEBA = \"CELEBA\"\n LSUN_CHURCH = \"LSUN-CHURCH\"\n LSUN_BEDROOM = \"LSUN-BEDROOM\"\n CELEBA_HQ = \"CELEBA-HQ\"\n CELEBA_HQ_LATENT_PR05 = \"CELEBA-HQ-LATENT_PR05\"\n CELEBA_HQ_LATENT = \"CELEBA-HQ-LATENT\"\n \n # Inpaint Type\n INPAINT_BOX: str = \"INPAINT_BOX\"\n INPAINT_LINE: str = \"INPAINT_LINE\"\n\n TRAIN = \"train\"\n TEST = \"test\"\n PIXEL_VALUES = \"pixel_values\"\n PIXEL_VALUES_TRIGGER = \"pixel_values_trigger\"\n TRIGGER = \"trigger\"\n TARGET = \"target\"\n IS_CLEAN = \"is_clean\"\n R_trigger_only = \"R_trigger_only\"\n IMAGE = \"image\"\n LABEL = \"label\"\n def __init__(self, name: str, label: int=None, root: str=None, channel: int=None, image_size: int=None, vmin: Union[int, float]=DEFAULT_VMIN, vmax: Union[int, float]=DEFAULT_VMAX, batch_size: int=512, shuffle: bool=True, seed: int=0):\n self.__root = root\n self.__name = name\n if label != None and not isinstance(label, list)and not isinstance(label, tuple):\n self.__label = [label]\n else:\n self.__label = label\n self.__channel = channel\n self.__vmin = vmin\n self.__vmax = vmax\n self.__batch_size = batch_size\n self.__shuffle = shuffle\n self.__dataset = self.__load_dataset(name=name)\n self.__set_img_shape(image_size=image_size)\n self.__trigger_type = self.__target_type = None\n self.__trigger = self.__target = self.__poison_rate = self.__ext_poison_rate = None\n self.__clean_rate = 1\n self.__seed = seed\n self.__rand_generator = torch.Generator()\n self.__rand_generator.manual_seed(self.__seed)\n if root != None:\n self.__backdoor = Backdoor(root=root)\n self.__R_trigger_only: bool = False\n \n # self.__prep_dataset()\n\n def set_poison(self, trigger_type: str, target_type: str, target_dx: int=-5, target_dy: int=-3, clean_rate: float=1.0, poison_rate: float=0.2, ext_poison_rate: float=0.0) -> 'DatasetLoader':\n if self.__root == None:\n raise ValueError(\"Attribute 'root' is None\")\n self.__clean_rate = clean_rate\n self.__ext_poison_rate = ext_poison_rate\n self.__poison_rate = poison_rate\n self.__trigger_type = trigger_type\n self.__target_type = target_type\n self.__trigger = self.__backdoor.get_trigger(type=trigger_type, channel=self.__channel, image_size=self.__image_size, vmin=self.__vmin, vmax=self.__vmax)\n self.__target = self.__backdoor.get_target(type=target_type, trigger=self.__trigger, dx=target_dx, dy=target_dy, vmin=self.__vmin, vmax=self.__vmax)\n return self\n \n def __load_dataset(self, name: str):\n datasets.config.IN_MEMORY_MAX_SIZE = 50 * 2 ** 30\n split_method = 'train+test'\n if name == DatasetLoader.MNIST:\n return load_dataset(\"mnist\", split=split_method)\n elif name == DatasetLoader.CIFAR10:\n return load_dataset(\"cifar10\", split=split_method)\n elif name == DatasetLoader.CELEBA:\n return load_dataset(\"student/celebA\", split='train')\n elif name == DatasetLoader.CELEBA_HQ:\n # return load_dataset(\"huggan/CelebA-HQ\", split=split_method)\n return load_dataset(\"datasets/celeba_hq_256\", split='train')\n elif name == DatasetLoader.CELEBA_HQ_LATENT_PR05:\n return load_from_disk(\"datasets/celeba_hq_256_pr05\")\n elif name == DatasetLoader.CELEBA_HQ_LATENT:\n return LatentDataset(ds_root='datasets/celeba_hq_256_latents')\n else:\n raise NotImplementedError(f\"Undefined dataset: {name}\")\n \n def __set_img_shape(self, image_size: int) -> None:\n # Set channel\n if self.__name == self.MNIST:\n self.__channel = 1 if self.__channel == None else self.__channel\n # self.__vmin = -1\n # self.__vmax = 1\n self.__cmap = \"gray\"\n elif self.__name == self.CIFAR10 or self.__name == self.CELEBA or self.__name == self.CELEBA_HQ or self.__name == self.LSUN_CHURCH or self.__name == self.CELEBA_HQ_LATENT_PR05 or self.__name == self.CELEBA_HQ_LATENT:\n self.__channel = 3 if self.__channel == None else self.__channel\n # self.__vmin = -1\n # self.__vmax = 1\n self.__cmap = None\n else:\n raise NotImplementedError(f\"No dataset named as {self.__name}\")\n\n # Set image size\n if image_size == None:\n if self.__name == self.MNIST:\n self.__image_size = 32\n elif self.__name == self.CIFAR10:\n self.__image_size = 32\n elif self.__name == self.CELEBA:\n self.__image_size = 64\n elif self.__name == self.CELEBA_HQ or self.__name == self.LSUN_CHURCH or self.__name == self.CELEBA_HQ_LATENT_PR05 or self.__name == self.CELEBA_HQ_LATENT:\n self.__image_size = 256\n else:\n raise NotImplementedError(f\"No dataset named as {self.__name}\")\n else:\n self.__image_size = image_size\n \n def __get_transform(self, prev_trans: List=[], next_trans: List=[]):\n if self.__channel == 1:\n channel_trans = transforms.Grayscale(num_output_channels=1)\n elif self.__channel == 3:\n channel_trans = transforms.Lambda(lambda x: x.convert(\"RGB\"))\n \n aug_trans = []\n if self.__dataset != DatasetLoader.LSUN_CHURCH:\n aug_trans = [transforms.RandomHorizontalFlip()] \n \n trans = [channel_trans,\n transforms.Resize([self.__image_size, self.__image_size]), \n transforms.ToTensor(),\n transforms.Lambda(lambda x: normalize(vmin_in=0, vmax_in=1, vmin_out=self.__vmin, vmax_out=self.__vmax, x=x)),\n # transforms.Normalize([0.5], [0.5]),\n ] + aug_trans\n return Compose(prev_trans + trans + next_trans)\n \n # trans = [transforms.Resize(self.__image_size), \n # transforms.ToTensor(),\n # transforms.Lambda(lambda x: normalize(vmin=self.__vmin, vmax=self.__vmax, x=x))]\n # return Compose(prev_trans + self.TRANSFORM_OPS + + next_trans)\n \n def __fixed_sz_dataset_old(self):\n gen = torch.Generator()\n gen.manual_seed(self.__seed)\n \n # Apply transformations\n self.__full_dataset = self.__dataset.with_transform(self.__transform_generator(self.__name, True))\n\n # Generate poisoned dataset\n if self.__poison_rate > 0:\n full_ds_len = len(self.__full_dataset[DatasetLoader.TRAIN])\n perm_idx = torch.randperm(full_ds_len, generator=gen).long()\n self.__poison_n = int(full_ds_len * float(self.__poison_rate))\n self.__clean_n = full_ds_len - self.__poison_n\n \n # print(f\"perm_idx: {perm_idx}\")\n # print(f\"len(perm_idx): {len(perm_idx)}, max: {torch.max(perm_idx)}, min: {torch.min(perm_idx)}\")\n # print(f\"Clean n: {self.__clean_n}, Poison n: {self.__poison_n}\")\n \n self.__full_dataset[DatasetLoader.TRAIN] = Subset(self.__full_dataset[DatasetLoader.TRAIN], perm_idx[:self.__clean_n].tolist())\n \n # print(f\"Clean dataset len: {len(self.__full_dataset[DatasetLoader.TRAIN])}\")\n \n self.__backdoor_dataset = self.__dataset.with_transform(self.__transform_generator(self.__name, False))\n self.__backdoor_dataset = Subset(self.__backdoor_dataset[DatasetLoader.TRAIN], perm_idx[self.__clean_n:].tolist())\n # print(f\"Backdoor dataset len: {len(self.__backdoor_dataset)}\")\n self.__full_dataset[DatasetLoader.TRAIN] = ConcatDataset([self.__full_dataset[DatasetLoader.TRAIN], self.__backdoor_dataset])\n # print(f\"self.__full_dataset[DatasetLoader.TRAIN] len: {len(self.__full_dataset[DatasetLoader.TRAIN])}\")\n self.__full_dataset = self.__full_dataset[DatasetLoader.TRAIN]\n \n def manual_split():\n pass\n \n def __fixed_sz_dataset(self):\n gen = torch.Generator()\n gen.manual_seed(self.__seed)\n \n if float(self.__poison_rate) < 0 or float(self.__poison_rate) > 1:\n raise ValueError(f\"In {DatasetLoader.MODE_FIXED}, poison rate should <= 1.0 and >= 0.0\")\n \n ds_n = len(self.__dataset)\n backdoor_n = int(ds_n * float(self.__poison_rate))\n ds_ls = []\n \n # Apply transformations\n if float(self.__poison_rate) == 0.0:\n self.__clean_dataset = self.__dataset\n self.__backdoor_dataset = None\n elif float(self.__poison_rate) == 1.0:\n self.__clean_dataset = None\n self.__backdoor_dataset = self.__dataset\n else:\n full_dataset: datasets.DatasetDict = self.__dataset.train_test_split(test_size=backdoor_n)\n self.__clean_dataset = full_dataset[DatasetLoader.TRAIN]\n self.__backdoor_dataset = full_dataset[DatasetLoader.TEST]\n \n if self.__clean_dataset != None:\n clean_n = len(self.__clean_dataset)\n self.__clean_dataset = self.__clean_dataset.add_column(DatasetLoader.IS_CLEAN, [True] * clean_n)\n ds_ls.append(self.__clean_dataset)\n # print(f\"TRAIN IS_CLEAN N: {len(self.__full_dataset[DatasetLoader.TRAIN].filter(lambda x: x[DatasetLoader.IS_CLEAN]))}\")\n \n if self.__backdoor_dataset != None:\n backdoor_n = len(self.__backdoor_dataset)\n self.__backdoor_dataset = self.__backdoor_dataset.add_column(DatasetLoader.IS_CLEAN, [False] * backdoor_n)\n ds_ls.append(self.__backdoor_dataset)\n # print(f\"TEST !IS_CLEAN N: {len(self.__full_dataset[DatasetLoader.TEST].filter(lambda x: not x[DatasetLoader.IS_CLEAN]))}\")\n \n def trans(x):\n if x[DatasetLoader.IS_CLEAN][0]:\n # print(f\"IS_CLEAN: {x[DatasetLoader.IS_CLEAN]}\")\n return self.__transform_generator(self.__name, True, self.__R_trigger_only)(x)\n return self.__transform_generator(self.__name, False, self.__R_trigger_only)(x)\n \n \n self.__full_dataset = concatenate_datasets(ds_ls)\n # print(f\"IS_CLEAN N: {len(self.__full_dataset.filter(lambda x: x[DatasetLoader.IS_CLEAN]))}\")\n self.__full_dataset = self.__full_dataset.with_transform(trans)\n # print(f\"__full_dataset len: {len(self.__full_dataset)}, features: {self.__full_dataset.features}, keys: {self.__full_dataset[0].keys()}\")\n \n\n def __flex_sz_dataset_old(self):\n # Apply transformations\n self.__full_dataset = self.__dataset.with_transform(self.__transform_generator(self.__name, True))\n \n full_ds_len = len(self.__full_dataset[DatasetLoader.TRAIN])\n \n # Shrink the clean dataset\n if self.__clean_rate != 1:\n self.__clean_n = int(full_ds_len * float(self.__clean_rate))\n self.__full_dataset[DatasetLoader.TRAIN] = Subset(self.__full_dataset[DatasetLoader.TRAIN], list(range(0, self.__clean_n, 1)))\n # MODIFIED: Only 1 poisoned training sample\n # self.__full_dataset[DatasetLoader.TRAIN] = Subset(self.__full_dataset[DatasetLoader.TRAIN], list(range(0, 1, 1)))\n \n # Generate poisoned dataset\n if self.__poison_rate > 0:\n self.__backdoor_dataset = self.__dataset.with_transform(self.__transform_generator(self.__name, False))\n self.__poison_n = int(full_ds_len * float(self.__poison_rate))\n self.__backdoor_dataset = Subset(self.__backdoor_dataset[DatasetLoader.TRAIN], list(range(0, self.__poison_n, 1))) \n self.__full_dataset[DatasetLoader.TRAIN] = ConcatDataset([self.__full_dataset[DatasetLoader.TRAIN], self.__backdoor_dataset])\n # MODIFIED: Only 1 clean training sample\n # self.__backdoor_dataset = Subset(self.__backdoor_dataset[DatasetLoader.TRAIN], list(range(0, 1, 1)))\n # self.__full_dataset[DatasetLoader.TRAIN] = self.__backdoor_dataset\n \n self.__full_dataset = self.__full_dataset[DatasetLoader.TRAIN]\n \n def __flex_sz_dataset(self):\n gen = torch.Generator()\n gen.manual_seed(self.__seed)\n \n def portion_sz(rate: float, n: int):\n return int(n * float(rate))\n \n def slice_ds(dataset, rate: float, ds_size: int):\n if float(rate) == 0.0:\n return None\n elif float(rate) == 1.0:\n return dataset\n else:\n return dataset.train_test_split(test_size=portion_sz(rate=rate, n=ds_size))[DatasetLoader.TEST]\n \n ds_ls: List = []\n ds_n = len(self.__dataset)\n print(f\"Total Dataset Size: {ds_n}\")\n \n # Apply transformations\n self.__full_dataset: datasets.DatasetDict = self.__dataset.train_test_split()\n \n clean_ds = slice_ds(dataset=self.__dataset, rate=float(self.__clean_rate), ds_size=ds_n)\n if clean_ds is not None:\n print(f\"[Mode Flex] Clean Dataset Size: {len(clean_ds)}\")\n ds_ls.append(clean_ds.add_column(DatasetLoader.IS_CLEAN, [True] * portion_sz(rate=self.__clean_rate, n=ds_n)))\n else:\n print(f\"[Mode Flex] Clean Dataset Size: 0\")\n \n backdoor_ds = slice_ds(dataset=self.__dataset, rate=float(self.__poison_rate), ds_size=ds_n)\n if backdoor_ds is not None:\n print(f\"[Mode Flex] Backdoor Dataset Size: {len(backdoor_ds)}\")\n ds_ls.append(backdoor_ds.add_column(DatasetLoader.IS_CLEAN, [False] * portion_sz(rate=self.__poison_rate, n=ds_n)))\n else:\n print(f\"[Mode Flex] Backdoor Dataset Size: 0\")\n \n # self.__full_dataset[DatasetLoader.TRAIN] = self.__full_dataset[DatasetLoader.TRAIN].add_column(DatasetLoader.IS_CLEAN, [True] * train_n)\n # self.__full_dataset[DatasetLoader.TEST] = self.__full_dataset[DatasetLoader.TEST].add_column(DatasetLoader.IS_CLEAN, [False] * test_n)\n \n def trans(x):\n if x[DatasetLoader.IS_CLEAN][0]:\n return self.__transform_generator(self.__name, True, self.__R_trigger_only)(x)\n return self.__transform_generator(self.__name, False, self.__R_trigger_only)(x)\n \n self.__full_dataset = concatenate_datasets(ds_ls)\n self.__full_dataset = self.__full_dataset.with_transform(trans)\n print(f\"[Mode Flex] Full Dataset Size: {len(self.__full_dataset)}\")\n\n def __extend_sz_dataset(self):\n gen = torch.Generator()\n gen.manual_seed(self.__seed)\n \n def portion_sz(rate: float, n: int):\n return int(n * float(rate))\n \n def slice_ds(dataset, rate: float, ds_size: int):\n if float(rate) == 0.0:\n return None\n elif float(rate) == 1.0:\n return dataset\n elif float(rate) > 1.0:\n mul: int = int(rate // 1)\n mod: float = float(rate - mul)\n cat_ds = [slice_ds(dataset, rate=1.0, ds_size=ds_size) for i in range(mul)]\n if mod > 0:\n cat_ds.append(slice_ds(dataset, rate=mod, ds_size=ds_size))\n return concatenate_datasets(cat_ds)\n else:\n return dataset.train_test_split(test_size=portion_sz(rate=rate, n=ds_size))[DatasetLoader.TEST]\n\n def trans(x):\n # print(f\"x[DatasetLoader.IS_CLEAN] len: {len(x[DatasetLoader.IS_CLEAN])}\")\n if x[DatasetLoader.IS_CLEAN][0]:\n return self.__transform_generator(self.__name, True, x[DatasetLoader.R_trigger_only][0])(x)\n return self.__transform_generator(self.__name, False, x[DatasetLoader.R_trigger_only][0])(x)\n \n ds_ls: List = []\n ds_n = len(self.__dataset)\n ext_backdoor_n = int(ds_n * float(self.__ext_poison_rate))\n print(f\"Total Dataset Size: {ds_n}\")\n clean_dataset = ext_backdoor_dataset = backdoor_dataset = None\n \n # Apply transformations\n if float(self.__ext_poison_rate) == 0.0:\n clean_dataset = self.__dataset\n ext_backdoor_dataset = None\n elif float(self.__ext_poison_rate) == 1.0:\n clean_dataset = None\n ext_backdoor_dataset = self.__dataset\n else:\n full_dataset: datasets.DatasetDict = self.__dataset.train_test_split(test_size=ext_backdoor_n)\n clean_dataset = full_dataset[DatasetLoader.TRAIN]\n ext_backdoor_dataset = full_dataset[DatasetLoader.TEST]\n \n if clean_dataset != None:\n clean_n = len(clean_dataset)\n clean_dataset = clean_dataset.add_column(DatasetLoader.IS_CLEAN, [True] * clean_n).add_column(DatasetLoader.R_trigger_only, [False] * clean_n)\n print(f\"[Mode Extend] Clean Dataset Size: {len(clean_dataset)}, {clean_dataset[1].keys()}\")\n clean_dataset = clean_dataset.with_transform(trans)\n ds_ls.append(clean_dataset)\n else:\n print(f\"[Mode Extend] Clean Dataset Size: 0\")\n # print(f\"TRAIN IS_CLEAN N: {len(self.__full_dataset[DatasetLoader.TRAIN].filter(lambda x: x[DatasetLoader.IS_CLEAN]))}\")\n \n if ext_backdoor_dataset != None:\n ext_backdoor_n = len(ext_backdoor_dataset)\n ext_backdoor_dataset = ext_backdoor_dataset.add_column(DatasetLoader.IS_CLEAN, [False] * ext_backdoor_n).add_column(DatasetLoader.R_trigger_only, [self.__ext_R_trigger_only] * ext_backdoor_n)\n print(f\"[Mode Extend] Extend Backdoor Dataset Size: {len(ext_backdoor_dataset)}, {ext_backdoor_dataset[1].keys()}\")\n ext_backdoor_dataset = ext_backdoor_dataset.with_transform(trans)\n ds_ls.append(ext_backdoor_dataset)\n else:\n print(f\"[Mode Extend] Extend Backdoor Dataset Size: 0\")\n # print(f\"TEST !IS_CLEAN N: {len(self.__full_dataset[DatasetLoader.TEST].filter(lambda x: not x[DatasetLoader.IS_CLEAN]))}\")\n \n backdoor_dataset = slice_ds(dataset=self.__dataset, rate=float(self.__poison_rate), ds_size=ds_n)\n if backdoor_dataset is not None:\n backdoor_n = portion_sz(rate=self.__poison_rate, n=ds_n)\n backdoor_dataset = backdoor_dataset.add_column(DatasetLoader.IS_CLEAN, [False] * backdoor_n).add_column(DatasetLoader.R_trigger_only, [self.__R_trigger_only] * backdoor_n)\n print(f\"[Mode Extend] Backdoor Dataset Size: {len(backdoor_dataset)}, {backdoor_dataset[1].keys()}\")\n backdoor_dataset = backdoor_dataset.with_transform(trans)\n ds_ls.append(backdoor_dataset)\n else:\n print(f\"[Mode Extend] Backdoor Dataset Size: 0\")\n \n # self.__full_dataset[DatasetLoader.TRAIN] = self.__full_dataset[DatasetLoader.TRAIN].add_column(DatasetLoader.IS_CLEAN, [True] * train_n)\n # self.__full_dataset[DatasetLoader.TEST] = self.__full_dataset[DatasetLoader.TEST].add_column(DatasetLoader.IS_CLEAN, [False] * test_n)\n\n self.__full_dataset = concatenate_datasets(ds_ls)\n # self.__full_dataset = self.__full_dataset.with_transform(trans)\n print(f\"[Mode Extend] Full Dataset Size: {len(self.__full_dataset)}\")\n \n def prepare_dataset(self, mode: str=\"FIXED\", R_trigger_only: bool=False, ext_R_trigger_only: bool=False, R_gaussian_aug: float=0.0) -> 'DatasetLoader':\n self.__R_trigger_only = R_trigger_only\n self.__ext_R_trigger_only = ext_R_trigger_only\n self.__R_gaussian_aug = R_gaussian_aug\n # Filter specified classes\n if self.__label != None:\n self.__dataset = self.__dataset.filter(lambda x: x[DatasetLoader.LABEL] in self.__label)\n \n if mode == DatasetLoader.MODE_FIXED:\n if self.__clean_rate != 1.0 or self.__clean_rate != None:\n Log.warning(\"In 'FIXED' mode of DatasetLoader, the clean_rate will be ignored whatever.\")\n self.__fixed_sz_dataset()\n elif mode == DatasetLoader.MODE_FLEX:\n self.__flex_sz_dataset()\n elif mode == DatasetLoader.MODE_EXTEND:\n self.__extend_sz_dataset()\n elif mode == DatasetLoader.MODE_NONE:\n self.__full_dataset = self.__dataset\n else:\n raise NotImplementedError(f\"Argument mode: {mode} isn't defined\")\n \n # Special Handling for LatentDataset\n if self.__name == self.CELEBA_HQ_LATENT:\n self.__full_dataset.set_poison(target_key=self.__target_type, poison_key=self.__trigger_type, raw='raw', poison_rate=self.__poison_rate, use_latent=True).set_use_names(target=DatasetLoader.TARGET, poison=DatasetLoader.PIXEL_VALUES, raw=DatasetLoader.IMAGE)\n \n # Note the minimum and the maximum values\n print(f\"{self.__full_dataset[1].keys()}\")\n ex = self.__full_dataset[1][DatasetLoader.TARGET]\n print(f\"Dataset Len: {len(self.__full_dataset)}\")\n if len(ex) == 1:\n print(f\"Note that CHANNEL 0 - vmin: {torch.min(ex[0])} and vmax: {torch.max(ex[0])}\") \n elif len(ex) == 3:\n print(f\"Note that CHANNEL 0 - vmin: {torch.min(ex[0])} and vmax: {torch.max(ex[0])} | CHANNEL 1 - vmin: {torch.min(ex[1])} and vmax: {torch.max(ex[1])} | CHANNEL 2 - vmin: {torch.min(ex[2])} and vmax: {torch.max(ex[2])}\")\n return self\n\n def get_dataset(self) -> datasets.Dataset:\n return self.__full_dataset\n \n def save_dataset(self, file: str):\n self.__full_dataset.save_to_disk(file)\n\n def get_dataloader(self, batch_size: int=None, shuffle: bool=None, num_workers: int=None, collate_fn: callable=None) -> torch.utils.data.DataLoader:\n datasets = self.get_dataset()\n if batch_size == None:\n batch_size = self.__batch_size\n if shuffle == None:\n shuffle = self.__shuffle\n if num_workers == None:\n num_workers = 8\n if collate_fn != None:\n return DataLoader(datasets, batch_size=batch_size, shuffle=shuffle, pin_memory=True, num_workers=num_workers, collate_fn=collate_fn)\n return DataLoader(datasets, batch_size=batch_size, shuffle=shuffle, pin_memory=True, num_workers=num_workers)\n \n def get_mask(self, trigger: torch.Tensor) -> torch.Tensor:\n return torch.where(trigger > self.__vmin, 0, 1)\n\n def __transform_generator(self, dataset_name: str, clean: bool, R_trigger_only: bool) -> Callable[[torch.Tensor], torch.Tensor]:\n if dataset_name == self.MNIST:\n img_key = \"image\"\n elif dataset_name == self.CIFAR10:\n img_key = \"img\"\n if dataset_name == self.CELEBA:\n img_key = \"image\"\n if dataset_name == self.CELEBA_HQ:\n img_key = \"image\"\n # define function\n def clean_transforms(examples) -> DatasetDict:\n if dataset_name == self.MNIST:\n trans = self.__get_transform()\n examples[DatasetLoader.IMAGE] = torch.stack([trans(image.convert(\"L\")) for image in examples[img_key]])\n else:\n # trans = self.__get_transform(prev_trans=[transforms.PILToTensor()])\n trans = self.__get_transform()\n # trans = Compose([transforms.PILToTensor(), transforms.Lambda(lambda t: t / 255)])\n examples[DatasetLoader.IMAGE] = torch.stack([trans(image) for image in examples[img_key]])\n # examples[DatasetLoader.PIXEL_VALUES] = torch.tensor(np.array([np.asarray(image) / 255 for image in examples[img_key]])).permute(0, 3, 1, 2)\n if img_key != DatasetLoader.IMAGE:\n del examples[img_key]\n \n examples[DatasetLoader.PIXEL_VALUES_TRIGGER] = torch.full_like(examples[DatasetLoader.IMAGE], 0)\n examples[DatasetLoader.PIXEL_VALUES] = torch.full_like(examples[DatasetLoader.IMAGE], 0)\n examples[DatasetLoader.TARGET] = torch.clone(examples[DatasetLoader.IMAGE])\n \n data_shape = examples[DatasetLoader.PIXEL_VALUES].shape\n repeat_times = (data_shape[0], *([1] * len(data_shape[1:])))\n examples[DatasetLoader.TRIGGER] = self.__trigger.repeat(*repeat_times)\n \n # examples[DatasetLoader.IS_CLEAN] = torch.tensor([True] * len(examples[DatasetLoader.PIXEL_VALUES]))\n if DatasetLoader.LABEL in examples:\n examples[DatasetLoader.LABEL] = torch.tensor([torch.tensor(x, dtype=torch.float) for x in examples[DatasetLoader.LABEL]])\n else:\n examples[DatasetLoader.LABEL] = torch.tensor([torch.tensor(-1, dtype=torch.float) for i in range(len(examples[DatasetLoader.PIXEL_VALUES]))])\n # print(f\"examples[img_key] Type: {type(examples[img_key])}\")\n # examples[img_key] = torch.tensor(np.array([np.asarray(image) / 255 for image in examples[img_key]])).permute(2, 0, 1)\n # examples[img_key] = torch.stack([self.__get_transform()(np.asarray(image)) for image in examples[img_key]])\n return examples\n def backdoor_transforms(examples) -> DatasetDict:\n examples = clean_transforms(examples)\n \n data_shape = examples[DatasetLoader.PIXEL_VALUES].shape\n repeat_times = (data_shape[0], *([1] * len(data_shape[1:])))\n \n masks = self.get_mask(self.__trigger).repeat(*repeat_times)\n # print(f\"masks shape: {masks.shape} | examples[DatasetLoader.PIXEL_VALUES] shape: {examples[DatasetLoader.PIXEL_VALUES].shape} | self.__trigger.repeat(*repeat_times) shape: {self.__trigger.repeat(*repeat_times).shape}\")\n # examples[DatasetLoader.PIXEL_VALUES] = masks * examples[DatasetLoader.IMAGE] + (1 - masks) * self.__trigger.repeat(*repeat_times)\n\n examples[DatasetLoader.PIXEL_VALUES_TRIGGER] = self.__trigger.repeat(*repeat_times)\n if R_trigger_only:\n examples[DatasetLoader.PIXEL_VALUES] = self.__trigger.repeat(*repeat_times)\n else:\n examples[DatasetLoader.PIXEL_VALUES] = masks * examples[DatasetLoader.IMAGE] + (1 - masks) * self.__trigger.repeat(*repeat_times)\n \n # print(f\"self.__target.repeat(*repeat_times) shape: {self.__target.repeat(*repeat_times).shape}\")\n examples[DatasetLoader.TARGET] = self.__target.repeat(*repeat_times)\n # examples[DatasetLoader.IS_CLEAN] = torch.tensor([False] * data_shape[0])\n return examples\n \n if clean:\n return clean_transforms\n return backdoor_transforms\n \n def get_poisoned(self, imgs) -> torch.Tensor:\n data_shape = imgs.shape\n repeat_times = (data_shape[0], *([1] * len(data_shape[1:])))\n \n masks = self.get_mask(self.__trigger).repeat(*repeat_times)\n return masks * imgs + (1 - masks) * self.__trigger.repeat(*repeat_times)\n \n def get_inpainted(self, imgs, mask: torch.Tensor) -> torch.Tensor:\n data_shape = imgs.shape\n repeat_times = (data_shape[0], *([1] * len(data_shape[1:])))\n \n notthing_tensor = torch.full_like(imgs, fill_value=torch.min(imgs))\n masks = mask.repeat(*repeat_times)\n return masks * imgs + (1 - masks) * notthing_tensor\n \n def get_inpainted_boxes(self, imgs, up: int, low: int, left: int, right: int) -> torch.Tensor: \n masked_val = 0\n unmasked_val = 1\n mask = torch.full_like(imgs[0], fill_value=unmasked_val)\n if len(mask.shape) == 3:\n mask[:, up:low, left:right] = masked_val\n elif len(mask.shape) == 2:\n mask[up:low, left:right] = masked_val\n return self.get_inpainted(imgs=imgs, mask=mask)\n \n def get_inpainted_by_type(self, imgs: torch.Tensor, inpaint_type: str) -> torch.Tensor:\n if inpaint_type == DatasetLoader.INPAINT_LINE:\n half_dim = imgs.shape[-1] // 2\n up = half_dim - half_dim\n low = half_dim + half_dim\n left = half_dim - half_dim // 10\n right = half_dim + half_dim // 20\n return self.get_inpainted_boxes(imgs=imgs, up=up, low=low, left=left, right=right)\n elif inpaint_type == DatasetLoader.INPAINT_BOX:\n half_dim = imgs.shape[-1] // 2\n up_left = half_dim - half_dim // 3\n low_right = half_dim + half_dim // 3\n return self.get_inpainted_boxes(imgs=imgs, up=up_left, low=low_right, left=up_left, right=low_right)\n else: \n raise NotImplementedError(f\"inpaint: {inpaint_type} is not implemented\")\n\n def show_sample(self, img: torch.Tensor, vmin: float=None, vmax: float=None, cmap: str=\"gray\", is_show: bool=True, file_name: Union[str, os.PathLike]=None, is_axis: bool=False) -> None:\n cmap_used = self.__cmap if cmap == None else cmap\n vmin_used = self.__vmin if vmin == None else vmin\n vmax_used = self.__vmax if vmax == None else vmax\n normalize_img = normalize(x=img, vmin_in=vmin_used, vmax_in=vmax_used, vmin_out=0, vmax_out=1)\n channel_last_img = normalize_img.permute(1, 2, 0).reshape(self.__image_size, self.__image_size, self.__channel)\n plt.imshow(channel_last_img, vmin=0, vmax=1, cmap=cmap_used)\n # plt.imshow(img.permute(1, 2, 0).reshape(self.__image_size, self.__image_size, self.__channel), vmin=None, vmax=None, cmap=cmap_used)\n # plt.imshow(img)\n\n if not is_axis:\n plt.axis('off')\n \n plt.tight_layout() \n if is_show:\n plt.show()\n if file_name != None:\n save_image(normalize_img, file_name)\n \n @property\n def len(self):\n return len(self.get_dataset())\n \n def __len__(self):\n return self.len\n \n @property\n def num_batch(self):\n return len(self.get_dataloader())\n \n @property\n def trigger(self):\n return self.__trigger\n \n @property\n def target(self):\n return self.__target\n \n @property\n def name(self):\n return self.__name\n \n @property\n def root(self):\n return self.__root\n \n @property\n def batch_size(self):\n return self.__batch_size\n \n @property\n def channel(self):\n return self.__channel\n \n @property\n def image_size(self):\n return self.__image_size" }, { "identifier": "Backdoor", "path": "dataset.py", "snippet": "class Backdoor():\n CHANNEL_LAST = -1\n CHANNEL_FIRST = -3\n \n GREY_BG_RATIO = 0.3\n \n STOP_SIGN_IMG = \"static/stop_sign_wo_bg.png\"\n # STOP_SIGN_IMG = \"static/stop_sign_bg_blk.jpg\"\n CAT_IMG = \"static/cat_wo_bg.png\"\n GLASSES_IMG = \"static/glasses.png\"\n \n TARGET_FA = \"SHOE\"\n TARGET_TG = \"NOSHIFT\"\n TARGET_BOX = \"CORNER\"\n # TARGET_BOX_MED = \"BOX_MED\"\n TARGET_SHIFT = \"SHIFT\"\n TARGET_HAT = \"BWHAT\"\n TARGET_FEDORA_HAT = \"HAT\"\n TARGET_CAT = \"CAT\"\n \n TRIGGER_GAP_X = TRIGGER_GAP_Y = 2\n \n TRIGGER_NONE = \"NONE\"\n TRIGGER_FA = \"FASHION\"\n TRIGGER_FA_EZ = \"FASHION_EZ\"\n TRIGGER_MNIST = \"MNIST\"\n TRIGGER_MNIST_EZ = \"MNIST_EZ\"\n TRIGGER_SM_BOX = \"SM_BOX\"\n TRIGGER_XSM_BOX = \"XSM_BOX\"\n TRIGGER_XXSM_BOX = \"XXSM_BOX\"\n TRIGGER_XXXSM_BOX = \"XXXSM_BOX\"\n TRIGGER_BIG_BOX = \"BIG_BOX\"\n TRIGGER_BIG_BOX_MED = \"BOX_18\"\n TRIGGER_SM_BOX_MED = \"BOX_14\"\n TRIGGER_XSM_BOX_MED = \"BOX_11\"\n TRIGGER_XXSM_BOX_MED = \"BOX_8\"\n TRIGGER_XXXSM_BOX_MED = \"BOX_4\"\n TRIGGER_GLASSES = \"GLASSES\"\n TRIGGER_BIG_STOP_SIGN = \"STOP_SIGN_18\"\n TRIGGER_SM_STOP_SIGN = \"STOP_SIGN_14\"\n TRIGGER_XSM_STOP_SIGN = \"STOP_SIGN_11\"\n TRIGGER_XXSM_STOP_SIGN = \"STOP_SIGN_8\"\n TRIGGER_XXXSM_STOP_SIGN = \"STOP_SIGN_4\"\n \n # GREY_NORM_MIN = 0\n # GREY_NORM_MAX = 1\n \n def __init__(self, root: str):\n self.__root = root\n \n def __get_transform(self, channel: int, image_size: Union[int, Tuple[int]], vmin: Union[float, int], vmax: Union[float, int], prev_trans: List=[], next_trans: List=[]):\n if channel == 1:\n channel_trans = transforms.Grayscale(num_output_channels=1)\n elif channel == 3:\n channel_trans = transforms.Lambda(lambda x: x.convert(\"RGB\"))\n \n trans = [channel_trans,\n transforms.Resize(image_size), \n transforms.ToTensor(),\n # transforms.Lambda(lambda x: normalize(vmin_out=vmin, vmax_out=vmax, x=x)),\n transforms.Lambda(lambda x: normalize(vmin_in=0.0, vmax_in=1.0, vmin_out=vmin, vmax_out=vmax, x=x)),\n # transforms.Lambda(lambda x: x * 2 - 1),\n ]\n return Compose(prev_trans + trans + next_trans)\n \n @staticmethod\n def __read_img(path: Union[str, os.PathLike]):\n return Image.open(path)\n @staticmethod\n def __bg2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = (vmax - vmin) * Backdoor.GREY_BG_RATIO + vmin\n trig[trig <= thres] = thres\n return trig\n @staticmethod\n def __bg2black(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = (vmax - vmin) * Backdoor.GREY_BG_RATIO + vmin\n trig[trig <= thres] = vmin\n return trig\n @staticmethod\n def __white2grey(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = vmax - (vmax - vmin) * Backdoor.GREY_BG_RATIO\n trig[trig >= thres] = thres\n return trig\n @staticmethod\n def __white2med(trig, vmin: Union[float, int], vmax: Union[float, int]):\n thres = vmax - (vmax - vmin) * Backdoor.GREY_BG_RATIO\n trig[trig >= 0.7] = (vmax - vmin) / 2\n return trig\n \n def __get_img_target(self, path: Union[str, os.PathLike], image_size: int, channel: int, vmin: Union[float, int], vmax: Union[float, int]):\n img = Backdoor.__read_img(path)\n trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n return Backdoor.__bg2grey(trig=trig, vmin=vmin, vmax=vmax)\n \n def __get_img_trigger(self, path: Union[str, os.PathLike], image_size: int, channel: int, trigger_sz: int, vmin: Union[float, int], vmax: Union[float, int], x: int=None, y: int=None):\n # Padding of Left & Top\n l_pad = t_pad = int((image_size - trigger_sz) / 2)\n r_pad = image_size - trigger_sz - l_pad\n b_pad = image_size - trigger_sz - t_pad\n residual = image_size - trigger_sz\n if x != None:\n if x > 0:\n l_pad = x\n r_pad = residual - l_pad\n else:\n r_pad = -x\n l_pad = residual - r_pad\n if y != None:\n if y > 0:\n t_pad = y\n b_pad = residual - t_pad\n else:\n b_pad = -y\n t_pad = residual - b_pad\n \n img = Backdoor.__read_img(path)\n next_trans = [transforms.Pad(padding=[l_pad, t_pad, r_pad, b_pad], fill=vmin)]\n trig = self.__get_transform(channel=channel, image_size=trigger_sz, vmin=vmin, vmax=vmax, next_trans=next_trans)(img)\n # thres = (vmax - vmin) * 0.3 + vmin\n # trig[trig <= thres] = vmin\n trig[trig >= 0.999] = vmin\n # print(f\"trigger shape: {trig.shape}\")\n return trig\n @staticmethod\n def __roll(x: torch.Tensor, dx: int, dy: int):\n shift = tuple([0] * len(x.shape[:-2]) + [dy] + [dx])\n dim = tuple([i for i in range(len(x.shape))])\n return torch.roll(x, shifts=shift, dims=dim)\n @staticmethod\n def __get_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int], val: Union[float, int]):\n if isinstance(image_size, int):\n img_shape = (image_size, image_size)\n elif isinstance(image_size, list):\n img_shape = image_size\n else:\n raise TypeError(f\"Argument image_size should be either an integer or a list\")\n trig = torch.full(size=(channel, *img_shape), fill_value=vmin)\n trig[:, b1[0]:b2[0], b1[1]:b2[1]] = val\n return trig\n @staticmethod\n def __get_white_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n return Backdoor.__get_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax, val=vmax)\n @staticmethod\n def __get_grey_box_trig(b1: Tuple[int, int], b2: Tuple[int, int], channel: int, image_size: int, vmin: Union[float, int], vmax: Union[float, int]):\n return Backdoor.__get_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax, val=(vmin + vmax) / 2)\n @staticmethod\n def __get_trig_box_coord(x: int, y: int):\n if x < 0 or y < 0:\n raise ValueError(f\"Argument x, y should > 0\")\n return (- (y + Backdoor.TRIGGER_GAP_Y), - (x + Backdoor.TRIGGER_GAP_X)), (- Backdoor.TRIGGER_GAP_Y, - Backdoor.TRIGGER_GAP_X)\n \n def get_trigger(self, type: str, channel: int, image_size: int, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n if type == Backdoor.TRIGGER_FA:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = FashionMNIST(root=self.__root, train=True, download=True, transform=trans)\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[0][0], vmin=vmin, vmax=vmax), dx=0, dy=2)\n elif type == Backdoor.TRIGGER_FA_EZ:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = FashionMNIST(root=self.__root, train=True, download=True, transform=trans)\n # Backdoor image ID: 135, 144\n # return ds[144][0]\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[144][0], vmin=vmin, vmax=vmax), dx=0, dy=4)\n elif type == Backdoor.TRIGGER_MNIST:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = MNIST(root=self.__root, train=True, download=True, transform=trans)\n # Backdoor image ID: 3, 6, 8\n # return ds[3][0]\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[3][0], vmin=vmin, vmax=vmax), dx=10, dy=3)\n elif type == Backdoor.TRIGGER_MNIST_EZ:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = MNIST(root=self.__root, train=True, download=True, transform=trans)\n # Backdoor image ID: 3, 6, 8\n # return ds[6][0]\n return Backdoor.__roll(Backdoor.__bg2black(trig=ds[6][0], vmin=vmin, vmax=vmax), dx=10, dy=3)\n elif type == Backdoor.TRIGGER_SM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(14, 14)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XSM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(11, 11)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXSM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(8, 8)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXXSM_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(4, 4)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_BIG_BOX: \n b1, b2 = Backdoor.__get_trig_box_coord(18, 18)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = vmax\n # return trig\n return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_BIG_BOX_MED:\n b1, b2 = Backdoor.__get_trig_box_coord(18, 18)\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_SM_BOX_MED:\n b1, b2 = Backdoor.__get_trig_box_coord(14, 14)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XSM_BOX_MED: \n b1, b2 = Backdoor.__get_trig_box_coord(11, 11)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXSM_BOX_MED: \n b1, b2 = Backdoor.__get_trig_box_coord(8, 8)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_XXXSM_BOX_MED: \n b1, b2 = Backdoor.__get_trig_box_coord(4, 4)\n # trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n # trig[:, b1[0]:b2[0], b1[1]:b2[1]] = (vmax + vmin) / 2\n # return trig\n return Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_GLASSES:\n trigger_sz = int(image_size * 0.625)\n return self.__get_img_trigger(path=Backdoor.GLASSES_IMG, image_size=image_size, channel=channel, trigger_sz=trigger_sz, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TRIGGER_BIG_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=18, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_SM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=14, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_XSM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=11, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_XXSM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=8, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_XXXSM_STOP_SIGN:\n return self.__get_img_trigger(path=Backdoor.STOP_SIGN_IMG, image_size=image_size, channel=channel, trigger_sz=4, vmin=vmin, vmax=vmax, x=-2, y=-2)\n elif type == Backdoor.TRIGGER_NONE: \n # trig = torch.zeros(channel, image_size, image_size)\n trig = torch.full(size=(channel, image_size, image_size), fill_value=vmin)\n return trig\n else:\n raise ValueError(f\"Trigger type {type} isn't found\")\n \n def __check_channel(self, sample: torch.Tensor, channel_first: bool=None) -> int:\n if channel_first != None:\n # If user specified the localation of the channel\n if self.__channel_first:\n if sample.shape[Backdoor.CHANNEL_FIRST] == 1 or sample.shape[Backdoor.CHANNEL_FIRST] == 3:\n return Backdoor.CHANNEL_FIRST\n elif sample.shape[Backdoor.CHANNEL_LAST] == 1 or sample.shape[Backdoor.CHANNEL_LAST] == 3:\n return Backdoor.CHANNEL_LAST\n warnings.warn(Log.warning(\"The specified Channel doesn't exist, determine channel automatically\"))\n print(Log.warning(\"The specified Channel doesn't exist, determine channel automatically\"))\n \n # If user doesn't specified the localation of the channel or the \n if (sample.shape[Backdoor.CHANNEL_LAST] == 1 or sample.shape[Backdoor.CHANNEL_LAST] == 3) and \\\n (sample.shape[Backdoor.CHANNEL_FIRST] == 1 or sample.shape[Backdoor.CHANNEL_FIRST] == 3):\n raise ValueError(f\"Duplicate channel found, found {sample.shape[Backdoor.CHANNEL_LAST]} at dimension 2 and {sample.shape[Backdoor.CHANNEL_FIRST]} at dimension 0\")\n\n if sample.shape[Backdoor.CHANNEL_LAST] == 1 or sample.shape[Backdoor.CHANNEL_LAST] == 3:\n return Backdoor.CHANNEL_LAST\n elif sample.shape[Backdoor.CHANNEL_FIRST] == 1 or sample.shape[Backdoor.CHANNEL_FIRST] == 3:\n return Backdoor.CHANNEL_FIRST\n else:\n raise ValueError(f\"Invalid channel shape, found {sample.shape[Backdoor.CHANNEL_LAST]} at dimension 2 and {sample.shape[Backdoor.CHANNEL_FIRST]} at dimension 0\")\n \n def __check_image_size(self, sample: torch.Tensor, channel_loc: int):\n image_size = list(sample.shape)[-3:]\n del image_size[channel_loc]\n return image_size\n \n def get_target(self, type: str, trigger: torch.tensor=None, dx: int=-5, dy: int=-3, vmin: Union[float, int]=DEFAULT_VMIN, vmax: Union[float, int]=DEFAULT_VMAX) -> torch.Tensor:\n channel_loc = self.__check_channel(sample=trigger, channel_first=None)\n channel = trigger.shape[channel_loc]\n image_size = self.__check_image_size(sample=trigger, channel_loc=channel_loc)\n print(f\"image size: {image_size}\")\n if type == Backdoor.TARGET_TG:\n if trigger == None:\n raise ValueError(\"trigger shouldn't be none\")\n return Backdoor.__bg2grey(trigger.clone().detach(), vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_SHIFT:\n if trigger == None:\n raise ValueError(\"trigger shouldn't be none\")\n # t_trig = trigger.clone().detach()\n # shift = tuple([0] * len(t_trig.shape[:-2]) + [dy] + [dx])\n # dim = tuple([i for i in range(len(t_trig.shape))])\n # # print(f\"Shift: {shift} | t_trig: {t_trig.shape}\")\n # return torch.roll(t_trig, shifts=shift, dims=dim)\n return Backdoor.__bg2grey(Backdoor.__roll(trigger.clone().detach(), dx=dx, dy=dy), vmin=vmin, vmax=vmax)\n # elif type == Backdoor.TARGET_BOX:\n # # z = torch.full_like(trigger, fill_value=vmin)\n # # z[:, 0:10, 0:10] = vmax\n # # return z\n # b1 = (None, None)\n # b2 = (10, 10)\n # return Backdoor.__get_white_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_BOX:\n b1 = (None, None)\n b2 = (10, 10)\n return Backdoor.__bg2grey(trig=Backdoor.__get_grey_box_trig(b1=b1, b2=b2, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax), vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_FA:\n trans = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n ds = FashionMNIST(root=self.__root, train=True, download=True, transform=trans)\n # return ds[0][0]\n return Backdoor.__bg2grey(trig=ds[0][0], vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_HAT:\n # img = Backdoor.__read_img(\"static/hat.png\")\n # trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n # return trig\n return self.__get_img_target(path=\"static/hat.png\", channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_FEDORA_HAT:\n # img = Backdoor.__read_img(\"static/fedora-hat.png\")\n # trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n # return trig\n return self.__get_img_target(path=\"static/fedora-hat.png\", channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n elif type == Backdoor.TARGET_CAT:\n # img = Backdoor.__read_img(\"static/cat.png\")\n # trig = self.__get_transform(channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)(img)\n # return trig\n return self.__get_img_target(path=Backdoor.CAT_IMG, channel=channel, image_size=image_size, vmin=vmin, vmax=vmax)\n else:\n raise NotImplementedError(f\"Target type {type} isn't found\")\n \n def show_image(self, img: torch.Tensor):\n plt.axis('off') \n plt.tight_layout()\n plt.imshow(img.permute(1, 2, 0).squeeze(), cmap='gray')\n plt.show()" } ]
import glob import os import pathlib import torch from dataclasses import dataclass from typing import List, Union from joblib import Parallel, delayed from PIL import Image from tqdm import tqdm from datasets import Dataset from torchvision import transforms from torchvision.transforms import Compose from torch.utils.data import DataLoader, ConcatDataset, Subset, IterableDataset from model import DiffuserModelSched from dataset import DatasetLoader, Backdoor
21,517
@dataclass class TrainingConfig: latent_dataset_dir: str = 'celeba_hq_256_latents' dataset_name: str = DatasetLoader.CELEBA_HQ
@dataclass class TrainingConfig: latent_dataset_dir: str = 'celeba_hq_256_latents' dataset_name: str = DatasetLoader.CELEBA_HQ
trigger: str = Backdoor.TRIGGER_SM_STOP_SIGN
2
2023-10-17 19:57:37+00:00
24k
nchen909/Pass-Tuning
evaluator/CodeBLEU/syntax_match.py
[ { "identifier": "DFG_python", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_python(root_node,index_to_code,states):\n assignment=['assignment','augmented_assignment','for_in_clause']\n if_statement=['if_statement']\n for_statement=['for_statement']\n while_statement=['while_statement']\n do_first_statement=['for_in_clause'] \n def_statement=['default_parameter']\n states=states.copy() \n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment': \n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_python(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in assignment:\n if root_node.type=='for_in_clause':\n right_nodes=[root_node.children[-1]]\n left_nodes=[root_node.child_by_field_name('left')]\n else:\n if root_node.child_by_field_name('right') is None:\n return [],states\n left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']\n right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']\n if len(right_nodes)!=len(left_nodes):\n left_nodes=[root_node.child_by_field_name('left')]\n right_nodes=[root_node.child_by_field_name('right')]\n if len(left_nodes)==0:\n left_nodes=[root_node.child_by_field_name('left')]\n if len(right_nodes)==0:\n right_nodes=[root_node.child_by_field_name('right')]\n DFG=[]\n for node in right_nodes:\n temp,states=DFG_python(node,index_to_code,states)\n DFG+=temp\n \n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in ['elif_clause','else_clause']:\n temp,current_states=DFG_python(child,index_to_code,current_states)\n DFG+=temp\n else:\n temp,new_states=DFG_python(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for i in range(2):\n right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']\n left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']\n if len(right_nodes)!=len(left_nodes):\n left_nodes=[root_node.child_by_field_name('left')]\n right_nodes=[root_node.child_by_field_name('right')]\n if len(left_nodes)==0:\n left_nodes=[root_node.child_by_field_name('left')]\n if len(right_nodes)==0:\n right_nodes=[root_node.child_by_field_name('right')]\n for node in right_nodes:\n temp,states=DFG_python(node,index_to_code,states)\n DFG+=temp\n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n if root_node.children[-1].type==\"block\":\n temp,states=DFG_python(root_node.children[-1],index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_python(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_python(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_python(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_java", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_java(root_node,index_to_code,states):\n assignment=['assignment_expression']\n def_statement=['variable_declarator']\n increment_statement=['update_expression']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=['enhanced_for_statement']\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_java(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_java(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_java(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_java(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"local_variable_declaration\":\n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in enhanced_for_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n body=root_node.child_by_field_name('body')\n DFG=[]\n for i in range(2):\n temp,states=DFG_java(value,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n temp,states=DFG_java(body,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_java(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_ruby", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_ruby(root_node,index_to_code,states):\n assignment=['assignment','operator_assignment']\n if_statement=['if','elsif','else','unless','when']\n for_statement=['for']\n while_statement=['while_modifier','until']\n do_first_statement=[] \n def_statement=['keyword_parameter']\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n states=states.copy()\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_ruby(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in assignment:\n left_nodes=[x for x in root_node.child_by_field_name('left').children if x.type!=',']\n right_nodes=[x for x in root_node.child_by_field_name('right').children if x.type!=',']\n if len(right_nodes)!=len(left_nodes):\n left_nodes=[root_node.child_by_field_name('left')]\n right_nodes=[root_node.child_by_field_name('right')]\n if len(left_nodes)==0:\n left_nodes=[root_node.child_by_field_name('left')]\n if len(right_nodes)==0:\n right_nodes=[root_node.child_by_field_name('right')]\n if root_node.type==\"operator_assignment\":\n left_nodes=[root_node.children[0]]\n right_nodes=[root_node.children[-1]]\n\n DFG=[]\n for node in right_nodes:\n temp,states=DFG_ruby(node,index_to_code,states)\n DFG+=temp\n \n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement:\n temp,current_states=DFG_ruby(child,index_to_code,current_states)\n DFG+=temp\n else:\n temp,new_states=DFG_ruby(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for i in range(2):\n left_nodes=[root_node.child_by_field_name('pattern')]\n right_nodes=[root_node.child_by_field_name('value')]\n assert len(right_nodes)==len(left_nodes)\n for node in right_nodes:\n temp,states=DFG_ruby(node,index_to_code,states)\n DFG+=temp\n for left_node,right_node in zip(left_nodes,right_nodes):\n left_tokens_index=tree_to_variable_index(left_node,index_to_code)\n right_tokens_index=tree_to_variable_index(right_node,index_to_code)\n temp=[]\n for token1_index in left_tokens_index:\n idx1,code1=index_to_code[token1_index]\n temp.append((code1,idx1,'computedFrom',[index_to_code[x][1] for x in right_tokens_index],\n [index_to_code[x][0] for x in right_tokens_index]))\n states[code1]=[idx1]\n DFG+=temp \n temp,states=DFG_ruby(root_node.child_by_field_name('body'),index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_ruby(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_ruby(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_ruby(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_go", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_go(root_node,index_to_code,states):\n assignment=['assignment_statement',]\n def_statement=['var_spec']\n increment_statement=['inc_statement']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=[]\n while_statement=[]\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_go(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_go(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_go(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_go(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in states:\n if key not in new_states:\n new_states[key]=states[key]\n else:\n new_states[key]+=states[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"for_clause\":\n if child.child_by_field_name('update') is not None:\n temp,states=DFG_go(child.child_by_field_name('update'),index_to_code,states)\n DFG+=temp \n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_go(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_php", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_php(root_node,index_to_code,states):\n assignment=['assignment_expression','augmented_assignment_expression']\n def_statement=['simple_parameter']\n increment_statement=['update_expression']\n if_statement=['if_statement','else_clause']\n for_statement=['for_statement']\n enhanced_for_statement=['foreach_statement']\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('default_value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_php(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_php(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_php(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_php(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in states:\n if key not in new_states:\n new_states[key]=states[key]\n else:\n new_states[key]+=states[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"assignment_expression\": \n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in enhanced_for_statement:\n name=None\n value=None\n for child in root_node.children:\n if child.type=='variable_name' and value is None:\n value=child\n elif child.type=='variable_name' and name is None:\n name=child\n break\n body=root_node.child_by_field_name('body')\n DFG=[]\n for i in range(2):\n temp,states=DFG_php(value,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n temp,states=DFG_php(body,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_php(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_javascript", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_javascript(root_node,index_to_code,states):\n assignment=['assignment_pattern','augmented_assignment_expression']\n def_statement=['variable_declarator']\n increment_statement=['update_expression']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=[]\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n name=root_node.child_by_field_name('name')\n value=root_node.child_by_field_name('value')\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_javascript(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_javascript(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_javascript(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states) \n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in states:\n if key not in new_states:\n new_states[key]=states[key]\n else:\n new_states[key]+=states[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"variable_declaration\": \n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_javascript(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_csharp", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_csharp(root_node,index_to_code,states):\n assignment=['assignment_expression']\n def_statement=['variable_declarator']\n increment_statement=['postfix_unary_expression']\n if_statement=['if_statement','else']\n for_statement=['for_statement']\n enhanced_for_statement=['for_each_statement']\n while_statement=['while_statement']\n do_first_statement=[] \n states=states.copy()\n if (len(root_node.children)==0 or root_node.type in ['string_literal','string','character_literal']) and root_node.type!='comment':\n idx,code=index_to_code[(root_node.start_point,root_node.end_point)]\n if root_node.type==code:\n return [],states\n elif code in states:\n return [(code,idx,'comesFrom',[code],states[code].copy())],states\n else:\n if root_node.type=='identifier':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n elif root_node.type in def_statement:\n if len(root_node.children)==2:\n name=root_node.children[0]\n value=root_node.children[1]\n else:\n name=root_node.children[0]\n value=None\n DFG=[]\n if value is None:\n indexs=tree_to_variable_index(name,index_to_code)\n for index in indexs:\n idx,code=index_to_code[index]\n DFG.append((code,idx,'comesFrom',[],[]))\n states[code]=[idx]\n return sorted(DFG,key=lambda x:x[1]),states\n else:\n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code)\n temp,states=DFG_csharp(value,index_to_code,states)\n DFG+=temp \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'comesFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in assignment:\n left_nodes=root_node.child_by_field_name('left')\n right_nodes=root_node.child_by_field_name('right')\n DFG=[]\n temp,states=DFG_csharp(right_nodes,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(left_nodes,index_to_code)\n value_indexs=tree_to_variable_index(right_nodes,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in increment_statement:\n DFG=[]\n indexs=tree_to_variable_index(root_node,index_to_code)\n for index1 in indexs:\n idx1,code1=index_to_code[index1]\n for index2 in indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1]\n return sorted(DFG,key=lambda x:x[1]),states \n elif root_node.type in if_statement:\n DFG=[]\n current_states=states.copy()\n others_states=[]\n flag=False\n tag=False\n if 'else' in root_node.type:\n tag=True\n for child in root_node.children:\n if 'else' in child.type:\n tag=True\n if child.type not in if_statement and flag is False:\n temp,current_states=DFG_csharp(child,index_to_code,current_states)\n DFG+=temp\n else:\n flag=True\n temp,new_states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states={}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key]=dic[key].copy()\n else:\n new_states[key]+=dic[key]\n for key in new_states:\n new_states[key]=sorted(list(set(new_states[key])))\n return sorted(DFG,key=lambda x:x[1]),new_states\n elif root_node.type in for_statement:\n DFG=[]\n for child in root_node.children:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n flag=False\n for child in root_node.children:\n if flag:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp \n elif child.type==\"local_variable_declaration\":\n flag=True\n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in enhanced_for_statement:\n name=root_node.child_by_field_name('left')\n value=root_node.child_by_field_name('right')\n body=root_node.child_by_field_name('body')\n DFG=[]\n for i in range(2):\n temp,states=DFG_csharp(value,index_to_code,states)\n DFG+=temp \n name_indexs=tree_to_variable_index(name,index_to_code)\n value_indexs=tree_to_variable_index(value,index_to_code) \n for index1 in name_indexs:\n idx1,code1=index_to_code[index1]\n for index2 in value_indexs:\n idx2,code2=index_to_code[index2]\n DFG.append((code1,idx1,'computedFrom',[code2],[idx2]))\n states[code1]=[idx1] \n temp,states=DFG_csharp(body,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states\n elif root_node.type in while_statement: \n DFG=[]\n for i in range(2):\n for child in root_node.children:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp \n dic={}\n for x in DFG:\n if (x[0],x[1],x[2]) not in dic:\n dic[(x[0],x[1],x[2])]=[x[3],x[4]]\n else:\n dic[(x[0],x[1],x[2])][0]=list(set(dic[(x[0],x[1],x[2])][0]+x[3]))\n dic[(x[0],x[1],x[2])][1]=sorted(list(set(dic[(x[0],x[1],x[2])][1]+x[4])))\n DFG=[(x[0],x[1],x[2],y[0],y[1]) for x,y in sorted(dic.items(),key=lambda t:t[0][1])]\n return sorted(DFG,key=lambda x:x[1]),states \n else:\n DFG=[]\n for child in root_node.children:\n if child.type in do_first_statement:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp,states=DFG_csharp(child,index_to_code,states)\n DFG+=temp\n \n return sorted(DFG,key=lambda x:x[1]),states" }, { "identifier": "DFG_c", "path": "evaluator/CodeBLEU/parser/DFG.py", "snippet": "def DFG_c(root_node, index_to_code, states):\n assignment = ['assignment_expression']\n def_statement = ['init_declatator', 'pointer_declarator', 'array_declarator']\n increment_statement = ['update_expression']\n if_statement = ['if_statement', 'else']\n for_statement = ['for_statement']\n while_statement = ['while_statement']\n parameter_statement = ['parameter_declaration']\n do_first_statement = []\n states = states.copy()\n if (len(root_node.children) == 0 or root_node.type == 'string') and root_node.type != 'comment':\n idx, code = index_to_code[(root_node.start_point, root_node.end_point)]\n if root_node.type == code or (root_node.parent.type == 'function_declarator' and root_node):\n return [], states\n elif code in states:\n return [(code, idx, 'comesFrom', [code], states[code].copy())], states\n elif root_node.type == 'identifier':\n if root_node.parent.type == 'declaration':\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n return [], states\n else:\n return [], states\n elif root_node.type in def_statement:\n\n if root_node.parent.type == 'function_definition':\n while root_node.type == 'pointer_declarator' and root_node.child_by_field_name('declarator').type == 'pointer_declarator':\n root_node = root_node.child_by_field_name('declarator')\n DFG = []\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n return sorted(DFG, key=lambda x: x[1]), states\n name = root_node.child_by_field_name('declarator')\n value = root_node.child_by_field_name('value')\n DFG = []\n if value is None:\n indexs = tree_to_variable_index(name, index_to_code)\n for index in indexs:\n idx, code = index_to_code[index]\n DFG.append((code, idx, 'comesFrom', [], []))\n states[code] = [idx]\n return sorted(DFG, key=lambda x: x[1]), states\n else:\n name_indexs = tree_to_variable_index(name, index_to_code)\n value_indexs = tree_to_variable_index(value, index_to_code)\n temp, states = DFG_c(value, index_to_code, states)\n DFG += temp\n for index1 in name_indexs:\n idx1, code1 = index_to_code[index1]\n for index2 in value_indexs:\n idx2, code2 = index_to_code[index2]\n DFG.append((code1, idx1, 'comesFrom', [code2], [idx2]))\n states[code1] = [idx1]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in assignment:\n # left_nodes = root_node.child_by_field_name('left')\n # right_nodes = root_node.child_by_field_name('right')\n # DFG = []\n # temp, states = DFG_c(right_nodes, index_to_code, states)\n # DFG += temp\n # # filter field identifiers\n # while left_nodes.type == 'field_expression' or left_nodes.type == 'subscript_expression':\n # left_nodes = left_nodes.child_by_field_name('argument')\n # left_node = left_nodes\n # name_indexs = tree_to_variable_index(left_node, index_to_code)\n # value_indexs = tree_to_variable_index(right_nodes, index_to_code)\n # for index1 in name_indexs:\n # idx1, code1 = index_to_code[index1]\n # for index2 in value_indexs:\n # idx2, code2 = index_to_code[index2]\n # if code1 == \"alarm_timers\":\n # print(12)\n # if code1 in\n # DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))\n # states[code1] = [idx1]\n return [], states\n elif root_node.type in increment_statement:\n DFG = []\n indexs = tree_to_variable_index(root_node, index_to_code)\n for index1 in indexs:\n idx1, code1 = index_to_code[index1]\n for index2 in indexs:\n idx2, code2 = index_to_code[index2]\n DFG.append((code1, idx1, 'computedFrom', [code2], [idx2]))\n states[code1] = [idx1]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in if_statement:\n DFG = []\n current_states = states.copy()\n others_states = []\n flag = False\n tag = False\n if 'else' in root_node.type:\n tag = True\n for child in root_node.children:\n if 'else' in child.type:\n tag = True\n if child.type not in if_statement and flag is False:\n temp, current_states = DFG_c(child, index_to_code, current_states)\n DFG += temp\n else:\n flag = True\n temp, new_states = DFG_c(child, index_to_code, states)\n DFG += temp\n others_states.append(new_states)\n others_states.append(current_states)\n if tag is False:\n others_states.append(states)\n new_states = {}\n for dic in others_states:\n for key in dic:\n if key not in new_states:\n new_states[key] = dic[key].copy()\n else:\n new_states[key] += dic[key]\n for key in states:\n if key not in new_states:\n new_states[key] = states[key]\n else:\n new_states[key] += states[key]\n for key in new_states:\n new_states[key] = sorted(list(set(new_states[key])))\n return sorted(DFG, key=lambda x: x[1]), new_states\n elif root_node.type in for_statement:\n DFG = []\n for child in root_node.children:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n flag = False\n for child in root_node.children:\n if flag:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n elif child.type == \"variable_declaration\":\n flag = True\n dic = {}\n for x in DFG:\n if (x[0], x[1], x[2]) not in dic:\n dic[(x[0], x[1], x[2])] = [x[3], x[4]]\n else:\n dic[(x[0], x[1], x[2])][0] = list(set(dic[(x[0], x[1], x[2])][0] + x[3]))\n dic[(x[0], x[1], x[2])][1] = sorted(list(set(dic[(x[0], x[1], x[2])][1] + x[4])))\n DFG = [(x[0], x[1], x[2], y[0], y[1]) for x, y in sorted(dic.items(), key=lambda t: t[0][1])]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in while_statement:\n DFG = []\n for i in range(2):\n for child in root_node.children:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n dic = {}\n for x in DFG:\n if (x[0], x[1], x[2]) not in dic:\n dic[(x[0], x[1], x[2])] = [x[3], x[4]]\n else:\n dic[(x[0], x[1], x[2])][0] = list(set(dic[(x[0], x[1], x[2])][0] + x[3]))\n dic[(x[0], x[1], x[2])][1] = sorted(list(set(dic[(x[0], x[1], x[2])][1] + x[4])))\n DFG = [(x[0], x[1], x[2], y[0], y[1]) for x, y in sorted(dic.items(), key=lambda t: t[0][1])]\n return sorted(DFG, key=lambda x: x[1]), states\n elif root_node.type in parameter_statement:\n child = root_node.child_by_field_name('declarator')\n if not child:\n return [], states\n while(child.type != 'identifier'):\n if child.type == 'parenthesized_declarator':\n child = child.children[1]\n else:\n child = child.child_by_field_name('declarator')\n if not child:\n return [], states\n idx,code=index_to_code[(child.start_point,child.end_point)]\n states[code]=[idx]\n return [(code,idx,'comesFrom',[],[])],states\n else:\n DFG = []\n for child in root_node.children:\n if child.type not in do_first_statement:\n temp, states = DFG_c(child, index_to_code, states)\n DFG += temp\n return sorted(DFG, key=lambda x: x[1]), states" }, { "identifier": "remove_comments_and_docstrings", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def remove_comments_and_docstrings(source, lang):\n if lang in ['python']:\n \"\"\"\n Returns 'source' minus comments and docstrings.\n \"\"\"\n io_obj = StringIO(source)\n out = \"\"\n prev_toktype = tokenize.INDENT\n last_lineno = -1\n last_col = 0\n for tok in tokenize.generate_tokens(io_obj.readline):\n token_type = tok[0]\n token_string = tok[1]\n start_line, start_col = tok[2]\n end_line, end_col = tok[3]\n ltext = tok[4]\n if start_line > last_lineno:\n last_col = 0\n if start_col > last_col:\n out += (\" \" * (start_col - last_col))\n # Remove comments:\n if token_type == tokenize.COMMENT:\n pass\n # This series of conditionals removes docstrings:\n elif token_type == tokenize.STRING:\n if prev_toktype != tokenize.INDENT:\n # This is likely a docstring; double-check we're not inside an operator:\n if prev_toktype != tokenize.NEWLINE:\n if start_col > 0:\n out += token_string\n else:\n out += token_string\n prev_toktype = token_type\n last_col = end_col\n last_lineno = end_line\n temp = []\n for x in out.split('\\n'):\n if x.strip() != \"\":\n temp.append(x)\n return '\\n'.join(temp)\n elif lang in ['ruby']:\n return source\n else:\n def replacer(match):\n s = match.group(0)\n if s.startswith('/'):\n return \" \" # note: a space and not an empty string\n else:\n return s\n\n pattern = re.compile(\n r'//.*?$|/\\*.*?\\*/|\\'(?:\\\\.|[^\\\\\\'])*\\'|\"(?:\\\\.|[^\\\\\"])*\"',\n re.DOTALL | re.MULTILINE\n )\n temp = []\n for x in re.sub(pattern, replacer, source).split('\\n'):\n if x.strip() != \"\":\n temp.append(x)\n return '\\n'.join(temp)" }, { "identifier": "tree_to_token_index", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def tree_to_token_index(root_node):\n if (len(root_node.children) == 0 or root_node.type in ['string_literal', 'string',\n 'character_literal']) and root_node.type != 'comment':\n return [(root_node.start_point, root_node.end_point)]\n else:\n code_tokens = []\n for child in root_node.children:\n code_tokens += tree_to_token_index(child)\n return code_tokens" }, { "identifier": "index_to_code_token", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def index_to_code_token(index, code):\n start_point = index[0]\n end_point = index[1]\n if start_point[0] == end_point[0]:\n s = code[start_point[0]][start_point[1]:end_point[1]]\n else:\n s = \"\"\n s += code[start_point[0]][start_point[1]:]\n for i in range(start_point[0] + 1, end_point[0]):\n s += code[i]\n s += code[end_point[0]][:end_point[1]]\n return s" }, { "identifier": "tree_to_variable_index", "path": "evaluator/CodeBLEU/parser/utils.py", "snippet": "def tree_to_variable_index(root_node, index_to_code):\n if (len(root_node.children) == 0 or root_node.type in ['string_literal', 'string',\n 'character_literal']) and root_node.type != 'comment':\n index = (root_node.start_point, root_node.end_point)\n _, code = index_to_code[index]\n if root_node.type != code:\n return [(root_node.start_point, root_node.end_point)]\n else:\n return []\n else:\n code_tokens = []\n for child in root_node.children:\n code_tokens += tree_to_variable_index(child, index_to_code)\n return code_tokens" } ]
from evaluator.CodeBLEU.parser import DFG_python, DFG_java, DFG_ruby, DFG_go, DFG_php, DFG_javascript, DFG_csharp,DFG_c from evaluator.CodeBLEU.parser import (remove_comments_and_docstrings, tree_to_token_index, index_to_code_token, tree_to_variable_index) from tree_sitter import Language, Parser
17,407
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python, 'java': DFG_java, 'ruby': DFG_ruby, 'go': DFG_go,
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. parser_path = '/data/pretrain-attention/CodePrompt/evaluator/CodeBLEU/parser' dfg_function = { 'python': DFG_python, 'java': DFG_java, 'ruby': DFG_ruby, 'go': DFG_go,
'php': DFG_php,
4
2023-10-20 09:24:44+00:00
24k
JoaoPedro9674/django-ledger
django_ledger/io/io_mixin.py
[ { "identifier": "settings", "path": "django_ledger/settings.py", "snippet": " DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = True\n DJANGO_LEDGER_GRAPHQL_SUPPORT_ENABLED = False\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = True\n DJANGO_LEDGER_PDF_SUPPORT_ENABLED = False\nDJANGO_LEDGER_USE_CLOSING_ENTRIES = getattr(settings, 'DJANGO_LEDGER_USE_CLOSING_ENTRIES', False)\nDJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT = getattr(settings,\n 'DJANGO_LEDGER_DEFAULT_CLOSING_ENTRY_CACHE_TIMEOUT', 3600)\nDJANGO_LEDGER_LOGIN_URL = getattr(settings, 'DJANGO_LEDGER_LOGIN_URL', settings.LOGIN_URL)\nDJANGO_LEDGER_BILL_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_INVOICE_NUMBER_LENGTH = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_LENGTH', 10)\nDJANGO_LEDGER_FORM_INPUT_CLASSES = getattr(settings, 'DJANGO_LEDGER_FORM_INPUT_CLASSES', 'input')\nDJANGO_LEDGER_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_CURRENCY_SYMBOL', '$')\nDJANGO_LEDGER_SPACED_CURRENCY_SYMBOL = getattr(settings, 'DJANGO_LEDGER_SPACED_CURRENCY_SYMBOL', False)\nDJANGO_LEDGER_SHOW_FEEDBACK_BUTTON = getattr(settings, 'DJANGO_LEDGER_SHOW_FEEDBACK_BUTTON', False)\nDJANGO_LEDGER_FEEDBACK_EMAIL_LIST = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_EMAIL_LIST', [])\nDJANGO_LEDGER_FEEDBACK_FROM_EMAIL = getattr(settings, 'DJANGO_LEDGER_FEEDBACK_FROM_EMAIL', None)\nDJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME = getattr(settings, 'DJANGO_LEDGER_VALIDATE_SCHEMAS_AT_RUNTIME', False)\nDJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE', Decimal('0.02'))\nDJANGO_LEDGER_TRANSACTION_CORRECTION = getattr(settings, 'DJANGO_LEDGER_TRANSACTION_CORRECTION', Decimal('0.01'))\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE', True)\nDJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', 5)\nDJANGO_LEDGER_ACCOUNT_CODE_USE_PREFIX = getattr(settings, 'DJANGO_LEDGER_ACCOUNT_CODE_GENERATE_LENGTH', True)\nDJANGO_LEDGER_JE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_PREFIX', 'JE')\nDJANGO_LEDGER_PO_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PO_NUMBER_PREFIX', 'PO')\nDJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_ESTIMATE_NUMBER_PREFIX', 'E')\nDJANGO_LEDGER_INVOICE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVOICE_NUMBER_PREFIX', 'I')\nDJANGO_LEDGER_BILL_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_BILL_NUMBER_PREFIX', 'B')\nDJANGO_LEDGER_VENDOR_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_VENDOR_NUMBER_PREFIX', 'V')\nDJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_CUSTOMER_NUMBER_PREFIX', 'C')\nDJANGO_LEDGER_EXPENSE_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_EXPENSE_NUMBER_PREFIX', 'IEX')\nDJANGO_LEDGER_INVENTORY_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_INVENTORY_NUMBER_PREFIX', 'INV')\nDJANGO_LEDGER_PRODUCT_NUMBER_PREFIX = getattr(settings, 'DJANGO_LEDGER_PRODUCT_NUMBER_PREFIX', 'IPR')\nDJANGO_LEDGER_DOCUMENT_NUMBER_PADDING = getattr(settings, 'DJANGO_LEDGER_DOCUMENT_NUMBER_PADDING', 10)\nDJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX = getattr(settings, 'DJANGO_LEDGER_JE_NUMBER_NO_UNIT_PREFIX', '000')\nDJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_BILL_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.bill.BillModelAbstract')\nDJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS = getattr(settings,\n 'DJANGO_LEDGER_INVOICE_MODEL_ABSTRACT_CLASS',\n 'django_ledger.models.invoice.InvoiceModelAbstract')\nDJANGO_LEDGER_DEFAULT_COA = getattr(settings, 'DJANGO_LEDGER_DEFAULT_COA', None)\nDJANGO_LEDGER_FINANCIAL_ANALYSIS = {\n 'ratios': {\n 'current_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'quick_ratio': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': 2,\n 'watch': 1,\n 'warning': .5,\n 'critical': .25\n }\n },\n 'debt_to_equity': {\n 'good_incremental': False,\n 'ranges': {\n 'healthy': 0,\n 'watch': .25,\n 'warning': .5,\n 'critical': 1\n }\n },\n 'return_on_equity': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .07,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'return_on_assets': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'net_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n 'gross_profit_margin': {\n 'good_incremental': True,\n 'ranges': {\n 'healthy': .10,\n 'watch': .06,\n 'warning': .04,\n 'critical': .02\n }\n },\n }\n}" }, { "identifier": "InvalidDateInputError", "path": "django_ledger/exceptions.py", "snippet": "class InvalidDateInputError(ValidationError):\n pass" }, { "identifier": "TransactionNotInBalanceError", "path": "django_ledger/exceptions.py", "snippet": "class TransactionNotInBalanceError(ValidationError):\n pass" }, { "identifier": "roles", "path": "django_ledger/io/roles.py", "snippet": "DEBIT = 'debit'\nCREDIT = 'credit'\nASSET_CA_CASH = 'asset_ca_cash'\nASSET_CA_MKT_SECURITIES = 'asset_ca_mkt_sec'\nASSET_CA_RECEIVABLES = 'asset_ca_recv'\nASSET_CA_INVENTORY = 'asset_ca_inv'\nASSET_CA_UNCOLLECTIBLES = 'asset_ca_uncoll'\nASSET_CA_PREPAID = 'asset_ca_prepaid'\nASSET_CA_OTHER = 'asset_ca_other'\nASSET_LTI_NOTES_RECEIVABLE = 'asset_lti_notes'\nASSET_LTI_LAND = 'asset_lti_land'\nASSET_LTI_SECURITIES = 'asset_lti_sec'\nASSET_PPE_BUILDINGS = 'asset_ppe_build'\nASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION = 'asset_ppe_build_accum_depr'\nASSET_PPE_EQUIPMENT = 'asset_ppe_equip'\nASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION = 'asset_ppe_equip_accum_depr'\nASSET_PPE_PLANT = 'asset_ppe_plant'\nASSET_PPE_PLANT_ACCUM_DEPRECIATION = 'asset_ppe_plant_depr'\nASSET_INTANGIBLE_ASSETS = 'asset_ia'\nASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION = 'asset_ia_accum_amort'\nASSET_ADJUSTMENTS = 'asset_adjustment'\nLIABILITY_CL_ACC_PAYABLE = 'lia_cl_acc_payable'\nLIABILITY_CL_WAGES_PAYABLE = 'lia_cl_wages_payable'\nLIABILITY_CL_TAXES_PAYABLE = 'lia_cl_taxes_payable'\nLIABILITY_CL_INTEREST_PAYABLE = 'lia_cl_int_payable'\nLIABILITY_CL_ST_NOTES_PAYABLE = 'lia_cl_st_notes_payable'\nLIABILITY_CL_LTD_MATURITIES = 'lia_cl_ltd_mat'\nLIABILITY_CL_DEFERRED_REVENUE = 'lia_cl_def_rev'\nLIABILITY_CL_OTHER = 'lia_cl_other'\nLIABILITY_LTL_NOTES_PAYABLE = 'lia_ltl_notes'\nLIABILITY_LTL_BONDS_PAYABLE = 'lia_ltl_bonds'\nLIABILITY_LTL_MORTGAGE_PAYABLE = 'lia_ltl_mortgage'\nEQUITY_CAPITAL = 'eq_capital'\nEQUITY_ADJUSTMENT = 'eq_adjustment'\nEQUITY_COMMON_STOCK = 'eq_stock_common'\nEQUITY_PREFERRED_STOCK = 'eq_stock_preferred'\nEQUITY_DIVIDENDS = 'eq_dividends'\nINCOME_OPERATIONAL = 'in_operational'\nINCOME_PASSIVE = 'in_passive'\nINCOME_CAPITAL_GAIN_LOSS = 'in_gain_loss'\nINCOME_INTEREST = 'in_interest'\nINCOME_OTHER = 'in_other'\nCOGS = 'cogs_regular'\nEXPENSE_OPERATIONAL = 'ex_regular'\nEXPENSE_CAPITAL = 'ex_capital'\nEXPENSE_DEPRECIATION = 'ex_depreciation'\nEXPENSE_AMORTIZATION = 'ex_amortization'\nEXPENSE_TAXES = 'ex_taxes'\nEXPENSE_INTEREST_ST = 'ex_interest_st'\nEXPENSE_INTEREST_LT = 'ex_interest'\nEXPENSE_OTHER = 'ex_other'\nROOT_COA = 'root_coa'\nROOT_ASSETS = 'root_assets'\nROOT_LIABILITIES = 'root_liabilities'\nROOT_CAPITAL = 'root_capital'\nROOT_INCOME = 'root_income'\nROOT_COGS = 'root_cogs'\nROOT_EXPENSES = 'root_expenses'\nROOT_GROUP = [\n ROOT_COA,\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_LEVEL_2 = [\n ROOT_ASSETS,\n ROOT_LIABILITIES,\n ROOT_CAPITAL,\n ROOT_INCOME,\n ROOT_COGS,\n ROOT_EXPENSES\n]\nROOT_GROUP_META = {\n ROOT_COA: {\n 'code': '00000',\n 'title': 'CoA Root Node',\n 'balance_type': DEBIT\n },\n ROOT_ASSETS: {\n 'code': '01000',\n 'title': 'Asset Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_LIABILITIES: {\n 'code': '02000',\n 'title': 'Liability Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_CAPITAL: {\n 'code': '03000',\n 'title': 'Capital Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_INCOME: {\n 'code': '04000',\n 'title': 'Income Accounts Root Node',\n 'balance_type': CREDIT\n },\n ROOT_COGS: {\n 'code': '05000',\n 'title': 'COGS Accounts Root Node',\n 'balance_type': DEBIT\n },\n ROOT_EXPENSES: {\n 'code': '06000',\n 'title': 'Expense Accounts Root Node',\n 'balance_type': DEBIT\n },\n}\nGROUP_QUICK_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES\n]\nGROUP_CURRENT_ASSETS = [\n ASSET_CA_CASH,\n ASSET_CA_MKT_SECURITIES,\n ASSET_CA_INVENTORY,\n ASSET_CA_RECEIVABLES,\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_NON_CURRENT_ASSETS = [\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_LAND,\n ASSET_LTI_SECURITIES,\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION,\n ASSET_INTANGIBLE_ASSETS,\n ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION,\n ASSET_ADJUSTMENTS\n]\nGROUP_ASSETS = GROUP_CURRENT_ASSETS + GROUP_NON_CURRENT_ASSETS\nGROUP_CURRENT_LIABILITIES = [\n LIABILITY_CL_ACC_PAYABLE,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_OTHER,\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE\n]\nGROUP_LT_LIABILITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n]\nGROUP_LIABILITIES = GROUP_CURRENT_LIABILITIES + GROUP_LT_LIABILITIES\nGROUP_CAPITAL = [\n EQUITY_CAPITAL,\n EQUITY_COMMON_STOCK,\n EQUITY_PREFERRED_STOCK,\n EQUITY_DIVIDENDS,\n EQUITY_ADJUSTMENT\n]\nGROUP_INCOME = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_COGS = [\n COGS\n]\nGROUP_EXPENSES = [\n EXPENSE_OPERATIONAL,\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_NET_PROFIT = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER,\n COGS\n]\nGROUP_GROSS_PROFIT = [\n INCOME_OPERATIONAL,\n COGS\n]\nGROUP_NET_SALES = [\n INCOME_OPERATIONAL,\n INCOME_PASSIVE\n]\nGROUP_PPE_ACCUM_DEPRECIATION = [\n ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION,\n ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION,\n ASSET_PPE_PLANT_ACCUM_DEPRECIATION\n]\nGROUP_EXPENSE_DEP_AND_AMT = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_EARNINGS = GROUP_INCOME + GROUP_COGS + GROUP_EXPENSES\nGROUP_EQUITY = GROUP_CAPITAL + GROUP_EARNINGS\nGROUP_LIABILITIES_EQUITY = GROUP_LIABILITIES + GROUP_EQUITY\nGROUP_INVOICE = [ASSET_CA_CASH, ASSET_CA_RECEIVABLES, LIABILITY_CL_DEFERRED_REVENUE]\nGROUP_BILL = [ASSET_CA_CASH, ASSET_CA_PREPAID, LIABILITY_CL_ACC_PAYABLE]\nGROUP_IC_OPERATING_REVENUES = [INCOME_OPERATIONAL]\nGROUP_IC_OPERATING_COGS = [COGS]\nGROUP_IC_OPERATING_EXPENSES = [EXPENSE_OPERATIONAL]\nGROUP_IC_OTHER_REVENUES = [\n INCOME_PASSIVE,\n INCOME_INTEREST,\n INCOME_CAPITAL_GAIN_LOSS,\n INCOME_OTHER\n]\nGROUP_IC_OTHER_EXPENSES = [\n EXPENSE_INTEREST_ST,\n EXPENSE_INTEREST_LT,\n EXPENSE_TAXES,\n EXPENSE_CAPITAL,\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION,\n EXPENSE_OTHER\n]\nGROUP_CFS_NET_INCOME = GROUP_EARNINGS\nGROUP_CFS_OP_DEPRECIATION_AMORTIZATION = [\n EXPENSE_DEPRECIATION,\n EXPENSE_AMORTIZATION\n]\nGROUP_CFS_OP_INVESTMENT_GAINS = [\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_OP_ACCOUNTS_RECEIVABLE = [\n ASSET_CA_RECEIVABLES\n]\nGROUP_CFS_OP_INVENTORY = [\n ASSET_CA_INVENTORY\n]\nGROUP_CFS_OP_ACCOUNTS_PAYABLE = [\n LIABILITY_CL_ACC_PAYABLE\n]\nGROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT = [\n ASSET_CA_PREPAID,\n ASSET_CA_UNCOLLECTIBLES,\n ASSET_CA_OTHER\n]\nGROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT = [\n LIABILITY_CL_WAGES_PAYABLE,\n LIABILITY_CL_INTEREST_PAYABLE,\n LIABILITY_CL_TAXES_PAYABLE,\n LIABILITY_CL_LTD_MATURITIES,\n LIABILITY_CL_DEFERRED_REVENUE,\n LIABILITY_CL_OTHER,\n]\nGROUP_CFS_OPERATING = list(chain.from_iterable([\n GROUP_CFS_NET_INCOME,\n GROUP_CFS_OP_DEPRECIATION_AMORTIZATION,\n GROUP_CFS_OP_INVESTMENT_GAINS,\n GROUP_CFS_OP_ACCOUNTS_RECEIVABLE,\n GROUP_CFS_OP_INVENTORY,\n GROUP_CFS_OP_ACCOUNTS_PAYABLE,\n GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT,\n GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT\n]))\nGROUP_CFS_FIN_ISSUING_EQUITY = [EQUITY_CAPITAL, EQUITY_COMMON_STOCK, EQUITY_PREFERRED_STOCK]\nGROUP_CFS_FIN_DIVIDENDS = [EQUITY_DIVIDENDS]\nGROUP_CFS_FIN_ST_DEBT_PAYMENTS = [\n LIABILITY_CL_ST_NOTES_PAYABLE,\n LIABILITY_CL_ACC_PAYABLE,\n EXPENSE_INTEREST_ST\n]\nGROUP_CFS_FIN_LT_DEBT_PAYMENTS = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n EXPENSE_INTEREST_LT\n]\nGROUP_CFS_FINANCING = GROUP_CFS_FIN_ISSUING_EQUITY + GROUP_CFS_FIN_DIVIDENDS\nGROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE = [\n ASSET_PPE_BUILDINGS,\n ASSET_PPE_PLANT,\n ASSET_PPE_EQUIPMENT,\n INCOME_CAPITAL_GAIN_LOSS\n]\nGROUP_CFS_INV_LTD_OF_PPE = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_MORTGAGE_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE,\n]\nGROUP_CFS_INVESTING_PPE = GROUP_CFS_INV_PURCHASE_OR_SALE_OF_PPE + GROUP_CFS_INV_LTD_OF_PPE\nGROUP_CFS_INV_PURCHASE_OF_SECURITIES = [\n ASSET_CA_MKT_SECURITIES,\n ASSET_LTI_NOTES_RECEIVABLE,\n ASSET_LTI_SECURITIES,\n INCOME_INTEREST,\n INCOME_PASSIVE,\n]\nGROUP_CFS_INV_LTD_OF_SECURITIES = [\n LIABILITY_LTL_NOTES_PAYABLE,\n LIABILITY_LTL_BONDS_PAYABLE\n]\nGROUP_CFS_INVESTING_SECURITIES = GROUP_CFS_INV_PURCHASE_OF_SECURITIES + GROUP_CFS_INV_LTD_OF_SECURITIES\nGROUP_CFS_INVESTING = GROUP_CFS_INVESTING_PPE + GROUP_CFS_INVESTING_SECURITIES\nGROUP_CFS_INVESTING_AND_FINANCING = GROUP_CFS_INVESTING + GROUP_CFS_FINANCING\nBS_ASSET_ROLE = 'assets'\nBS_LIABILITIES_ROLE = 'liabilities'\nBS_EQUITY_ROLE = 'equity'\nACCOUNT_ROLE_CHOICES = [\n (BS_ASSET_ROLE.capitalize(), (\n # CURRENT ASSETS ----\n (ASSET_CA_CASH, _('Current Asset')),\n (ASSET_CA_MKT_SECURITIES, _('Marketable Securities')),\n (ASSET_CA_RECEIVABLES, _('Receivables')),\n (ASSET_CA_INVENTORY, _('Inventory')),\n (ASSET_CA_UNCOLLECTIBLES, _('Uncollectibles')),\n (ASSET_CA_PREPAID, _('Prepaid')),\n (ASSET_CA_OTHER, _('Other Liquid Assets')),\n\n # LONG TERM INVESTMENTS ---\n (ASSET_LTI_NOTES_RECEIVABLE, _('Notes Receivable')),\n (ASSET_LTI_LAND, _('Land')),\n (ASSET_LTI_SECURITIES, _('Securities')),\n\n # PPE ...\n (ASSET_PPE_BUILDINGS, _('Buildings')),\n (ASSET_PPE_BUILDINGS_ACCUM_DEPRECIATION, _('Buildings - Accum. Depreciation')),\n (ASSET_PPE_PLANT, _('Plant')),\n (ASSET_PPE_PLANT_ACCUM_DEPRECIATION, _('Plant - Accum. Depreciation')),\n (ASSET_PPE_EQUIPMENT, _('Equipment')),\n (ASSET_PPE_EQUIPMENT_ACCUM_DEPRECIATION, _('Equipment - Accum. Depreciation')),\n\n # Other Assets ...\n (ASSET_INTANGIBLE_ASSETS, _('Intangible Assets')),\n (ASSET_INTANGIBLE_ASSETS_ACCUM_AMORTIZATION, _('Intangible Assets - Accum. Amortization')),\n (ASSET_ADJUSTMENTS, _('Other Assets')),\n )),\n (BS_LIABILITIES_ROLE.capitalize(), (\n\n # CURRENT LIABILITIES ---\n (LIABILITY_CL_ACC_PAYABLE, _('Accounts Payable')),\n (LIABILITY_CL_WAGES_PAYABLE, _('Wages Payable')),\n (LIABILITY_CL_INTEREST_PAYABLE, _('Interest Payable')),\n (LIABILITY_CL_TAXES_PAYABLE, _('Taxes Payable')),\n (LIABILITY_CL_ST_NOTES_PAYABLE, _('Short Term Notes Payable')),\n (LIABILITY_CL_LTD_MATURITIES, _('Current Maturities of Long Tern Debt')),\n (LIABILITY_CL_DEFERRED_REVENUE, _('Deferred Revenue')),\n (LIABILITY_CL_OTHER, _('Other Liabilities')),\n\n # LONG TERM LIABILITIES ----\n (LIABILITY_LTL_NOTES_PAYABLE, _('Long Term Notes Payable')),\n (LIABILITY_LTL_BONDS_PAYABLE, _('Bonds Payable')),\n (LIABILITY_LTL_MORTGAGE_PAYABLE, _('Mortgage Payable')),\n )),\n (BS_EQUITY_ROLE.capitalize(), (\n\n # EQUITY ---\n (EQUITY_CAPITAL, _('Capital')),\n (EQUITY_COMMON_STOCK, _('Common Stock')),\n (EQUITY_PREFERRED_STOCK, _('Preferred Stock')),\n (EQUITY_ADJUSTMENT, _('Other Equity Adjustments')),\n (EQUITY_DIVIDENDS, _('Dividends & Distributions to Shareholders')),\n\n # INCOME ---\n (INCOME_OPERATIONAL, _('Operational Income')),\n (INCOME_PASSIVE, _('Investing/Passive Income')),\n (INCOME_INTEREST, _('Interest Income')),\n (INCOME_CAPITAL_GAIN_LOSS, _('Capital Gain/Loss Income')),\n (INCOME_OTHER, _('Other Income')),\n\n # COGS ----\n (COGS, _('Cost of Goods Sold')),\n\n # EXPENSES ----\n (EXPENSE_OPERATIONAL, _('Regular Expense')),\n (EXPENSE_INTEREST_ST, _('Interest Expense - Short Term Debt')),\n (EXPENSE_INTEREST_LT, _('Interest Expense - Long Term Debt')),\n (EXPENSE_TAXES, _('Tax Expense')),\n (EXPENSE_CAPITAL, _('Capital Expense')),\n (EXPENSE_DEPRECIATION, _('Depreciation Expense')),\n (EXPENSE_AMORTIZATION, _('Amortization Expense')),\n (EXPENSE_OTHER, _('Other Expense')),\n )),\n ('Root', (\n (ROOT_COA, 'CoA Root Account'),\n (ROOT_ASSETS, 'Assets Root Account'),\n (ROOT_LIABILITIES, 'Liabilities Root Account'),\n (ROOT_CAPITAL, 'Capital Root Account'),\n (ROOT_INCOME, 'Income Root Account'),\n (ROOT_COGS, 'COGS Root Account'),\n (ROOT_EXPENSES, 'Expenses Root Account'),\n ))\n]\nACCOUNT_CHOICES_NO_ROOT = [c for c in ACCOUNT_ROLE_CHOICES if c[0] != 'Root']\nROLES_ORDER_ASSETS = [a[0] for a in ACCOUNT_ROLE_CHOICES[0][1]]\nROLES_ORDER_LIABILITIES = [a[0] for a in ACCOUNT_ROLE_CHOICES[1][1]]\nROLES_ORDER_CAPITAL = [a[0] for a in ACCOUNT_ROLE_CHOICES[2][1]]\nROLES_ORDER_ALL = list(chain.from_iterable([ROLES_ORDER_ASSETS, ROLES_ORDER_LIABILITIES, ROLES_ORDER_CAPITAL]))\nACCOUNT_LIST_ROLE_ORDER = list(r[0] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT]))\nACCOUNT_LIST_ROLE_VERBOSE = {r[0]: r[1] for r in chain.from_iterable([i[1] for i in ACCOUNT_CHOICES_NO_ROOT])}\nROLE_TUPLES = sum([[(r[0].lower(), s[0]) for s in r[1]] for r in ACCOUNT_ROLE_CHOICES], list())\nROLE_DICT = dict([(t[0].lower(), [r[0] for r in t[1]]) for t in ACCOUNT_ROLE_CHOICES])\nVALID_ROLES = [r[1] for r in ROLE_TUPLES]\nBS_ROLES = dict((r[1], r[0]) for r in ROLE_TUPLES)\nBS_BUCKETS = {\n '0': 'Root',\n '1': 'Asset',\n '2': 'Liability',\n '3': 'Capital',\n '4': 'Income',\n '5': 'COGS',\n '6': 'Expenses'\n}\nBS_BUCKETS_ORDER = [v for _, v in BS_BUCKETS.items() if v != 'Root']\nROLES_VARS = locals().keys()\nROLES_DIRECTORY = dict()\nROLES_CATEGORIES = ['ASSET', 'LIABILITY', 'EQUITY', 'INCOME', 'COGS', 'EXPENSE']\nROLES_GROUPS = [g for g in ROLES_VARS if g.split('_')[0] == 'GROUP']\nGROUPS_DIRECTORY = dict()\ndef validate_roles(roles: Union[str, List[str]], raise_exception: bool = True) -> Set[str]:" }, { "identifier": "RoleContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class RoleContextManager:\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.DIGEST = io_data\n self.DIGEST['role_account'] = None\n self.DIGEST['role_balance'] = None\n\n self.ACCOUNTS = io_data['accounts']\n\n self.ROLES_ACCOUNTS = dict()\n self.ROLES_BALANCES = dict()\n self.ROLES_BALANCE_SHEET = dict()\n\n if self.BY_PERIOD:\n self.ROLES_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_period'] = None\n if self.BY_UNIT:\n self.ROLES_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['role_balance_by_unit'] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_roles()\n self.DIGEST['role_account'] = self.ROLES_ACCOUNTS\n self.DIGEST['role_balance'] = self.ROLES_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['role_balance_by_period'] = self.ROLES_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['role_balance_by_unit'] = self.ROLES_BALANCES_BY_UNIT\n\n return self.DIGEST\n\n def process_roles(self):\n\n for c, l in roles_module.ROLES_DIRECTORY.items():\n for r in l:\n acc_list = list(acc for acc in self.ACCOUNTS if acc['role'] == getattr(roles_module, r))\n\n self.ROLES_ACCOUNTS[r] = acc_list\n self.ROLES_BALANCES[r] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ROLES_BALANCES_BY_PERIOD[key][r] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ROLES_BALANCES_BY_UNIT[key][r] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "GroupContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class GroupContextManager:\n GROUP_ACCOUNTS_KEY = 'group_account'\n GROUP_BALANCE_KEY = 'group_balance'\n GROUP_BALANCE_BY_UNIT_KEY = 'group_balance_by_unit'\n GROUP_BALANCE_BY_PERIOD_KEY = 'group_balance_by_period'\n\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.IO_DIGEST = io_data\n\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = None\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = None\n\n self.DIGEST_ACCOUNTS = io_data['accounts']\n\n self.GROUPS_ACCOUNTS = dict()\n self.GROUPS_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.GROUPS_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n if self.BY_UNIT:\n self.GROUPS_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = None\n\n if self.BY_PERIOD and self.BY_UNIT:\n self.GROUPS_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = None\n\n def digest(self):\n\n self.process_groups()\n self.IO_DIGEST[self.GROUP_ACCOUNTS_KEY] = self.GROUPS_ACCOUNTS\n self.IO_DIGEST[self.GROUP_BALANCE_KEY] = self.GROUPS_BALANCES\n\n if self.BY_PERIOD:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_PERIOD_KEY] = self.GROUPS_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.IO_DIGEST[self.GROUP_BALANCE_BY_UNIT_KEY] = self.GROUPS_BALANCES_BY_UNIT\n return self.IO_DIGEST\n\n def get_accounts_generator(self, mod, g):\n return (acc for acc in self.DIGEST_ACCOUNTS if acc['role'] in getattr(mod, g))\n\n def process_groups(self):\n for g in roles_module.ROLES_GROUPS:\n acc_list = list(self.get_accounts_generator(roles_module, g))\n self.GROUPS_ACCOUNTS[g] = acc_list\n self.GROUPS_BALANCES[g] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.GROUPS_BALANCES_BY_PERIOD[key][g] = sum(\n acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.GROUPS_BALANCES_BY_UNIT[key][g] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0]\n )" }, { "identifier": "ActivityContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class ActivityContextManager:\n\n def __init__(self,\n io_data: dict,\n by_unit: bool = False,\n by_period: bool = False):\n\n self.DIGEST = io_data\n self.DIGEST['activity_account'] = None\n self.DIGEST['activity_balance'] = None\n\n self.BY_PERIOD = by_period\n self.BY_UNIT = by_unit\n\n self.ACCOUNTS = io_data['accounts']\n self.ACTIVITY_ACCOUNTS = dict()\n self.ACTIVITY_BALANCES = dict()\n\n if self.BY_PERIOD:\n self.ACTIVITY_BALANCES_BY_PERIOD = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_period'] = None\n if self.BY_UNIT:\n self.ACTIVITY_BALANCES_BY_UNIT = defaultdict(lambda: dict())\n self.DIGEST['activity_balance_by_unit'] = None\n if self.BY_PERIOD and self.BY_UNIT:\n self.ROLES_BALANCES_BY_PERIOD_AND_UNIT = defaultdict(lambda: dict())\n\n def digest(self):\n\n self.process_activity()\n self.DIGEST['activity_account'] = self.ACTIVITY_ACCOUNTS\n self.DIGEST['activity_balance'] = self.ACTIVITY_BALANCES\n\n if self.BY_PERIOD:\n self.DIGEST['activity_balance_by_period'] = self.ACTIVITY_BALANCES_BY_PERIOD\n if self.BY_UNIT:\n self.DIGEST['activity_balance_by_unit'] = self.ACTIVITY_BALANCES_BY_PERIOD\n\n def get_accounts_generator(self, activity: str):\n return (acc for acc in self.ACCOUNTS if acc['activity'] == activity)\n\n def process_activity(self):\n JournalEntryModel = lazy_importer.get_journal_entry_model()\n for act in JournalEntryModel.VALID_ACTIVITIES:\n acc_list = list(self.get_accounts_generator(act))\n self.ACTIVITY_ACCOUNTS[act] = acc_list\n self.ACTIVITY_BALANCES[act] = sum(acc['balance'] for acc in acc_list)\n\n if self.BY_PERIOD or self.BY_UNIT:\n for acc in acc_list:\n if self.BY_PERIOD:\n key = (acc['period_year'], acc['period_month'])\n self.ACTIVITY_BALANCES_BY_PERIOD[key][act] = sum(acc['balance'] for acc in acc_list if all([\n acc['period_year'] == key[0],\n acc['period_month'] == key[1]]\n ))\n if self.BY_UNIT:\n key = (acc['unit_uuid'], acc['unit_name'])\n self.ACTIVITY_BALANCES_BY_UNIT[key][act] = sum(\n acc['balance'] for acc in acc_list if acc['unit_uuid'] == key[0])" }, { "identifier": "BalanceSheetStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class BalanceSheetStatementContextManager:\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n gb_bs = {\n bsr: list(l) for bsr, l in groupby(\n chain.from_iterable(\n [\n self.DIGEST['group_account']['GROUP_ASSETS'],\n self.DIGEST['group_account']['GROUP_LIABILITIES'],\n self.DIGEST['group_account']['GROUP_CAPITAL'],\n ]\n ),\n key=lambda acc: acc['role_bs'])\n }\n\n bs_context = {\n bs_role: {\n 'total_balance': sum(a['balance'] for a in gb),\n 'is_block': True,\n 'roles': {\n r: {\n 'accounts': list(a)\n } for r, a in groupby(list(gb), key=lambda acc: acc['role'])\n }\n } for bs_role, gb in gb_bs.items()\n }\n\n for bs_role, bs_role_data in bs_context.items():\n for acc_role, role_data in bs_role_data['roles'].items():\n role_data['total_balance'] = sum(a['balance'] for a in role_data['accounts'])\n role_data['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc_role]\n\n bs_context['equity_balance'] = self.DIGEST['group_balance']['GROUP_EQUITY']\n bs_context['retained_earnings_balance'] = self.DIGEST['group_balance']['GROUP_EARNINGS']\n bs_context['liabilities_equity_balance'] = self.DIGEST['group_balance']['GROUP_LIABILITIES_EQUITY']\n\n self.DIGEST['balance_sheet'] = bs_context\n\n return self.DIGEST" }, { "identifier": "IncomeStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class IncomeStatementContextManager:\n\n def __init__(self, io_data: dict):\n self.DIGEST = io_data\n\n def digest(self):\n if 'group_account' in self.DIGEST:\n self.DIGEST['income_statement'] = {\n 'operating': {\n 'revenues': [\n acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_REVENUES\n ],\n 'cogs': [\n acc for acc in self.DIGEST['group_account']['GROUP_COGS'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_COGS\n ],\n 'expenses': [\n acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OPERATING_EXPENSES\n ]\n },\n 'other': {\n 'revenues': [acc for acc in self.DIGEST['group_account']['GROUP_INCOME'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_REVENUES],\n 'expenses': [acc for acc in self.DIGEST['group_account']['GROUP_EXPENSES'] if\n acc['role'] in roles_module.GROUP_IC_OTHER_EXPENSES],\n }\n }\n\n for activity, ic_section in self.DIGEST['income_statement'].items():\n for section, acc_list in ic_section.items():\n for acc in acc_list:\n acc['role_name'] = roles_module.ACCOUNT_LIST_ROLE_VERBOSE[acc['role']]\n\n # OPERATING INCOME...\n self.DIGEST['income_statement']['operating']['gross_profit'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs']\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['operating']['revenues'],\n self.DIGEST['income_statement']['operating']['cogs'],\n self.DIGEST['income_statement']['operating']['expenses'],\n ]\n ))\n self.DIGEST['income_statement']['operating']['net_operating_revenue'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['revenues']\n )\n self.DIGEST['income_statement']['operating']['net_cogs'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['cogs']\n )\n self.DIGEST['income_statement']['operating']['net_operating_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['operating']['expenses']\n )\n\n # OTHER INCOME....\n self.DIGEST['income_statement']['other']['net_other_revenues'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['revenues']\n )\n self.DIGEST['income_statement']['other']['net_other_expenses'] = sum(\n acc['balance'] for acc in self.DIGEST['income_statement']['other']['expenses']\n )\n self.DIGEST['income_statement']['other']['net_other_income'] = sum(\n acc['balance'] for acc in chain.from_iterable(\n [\n self.DIGEST['income_statement']['other']['revenues'],\n self.DIGEST['income_statement']['other']['expenses']\n ]\n ))\n\n # NET INCOME...\n self.DIGEST['income_statement']['net_income'] = self.DIGEST['income_statement']['operating'][\n 'net_operating_income']\n self.DIGEST['income_statement']['net_income'] += self.DIGEST['income_statement']['other'][\n 'net_other_income']\n return self.DIGEST" }, { "identifier": "CashFlowStatementContextManager", "path": "django_ledger/io/io_context.py", "snippet": "class CashFlowStatementContextManager:\n CFS_DIGEST_KEY = 'cash_flow_statement'\n\n # todo: implement by period and by unit...\n def __init__(self,\n io_data: dict,\n by_period: bool = False,\n by_unit: bool = False):\n self.IO_DIGEST = io_data\n self.CASH_ACCOUNTS = [a for a in self.IO_DIGEST['accounts'] if a['role'] == roles_module.ASSET_CA_CASH]\n self.JE_MODEL = lazy_loader.get_journal_entry_model()\n\n def check_io_digest(self):\n if GroupContextManager.GROUP_BALANCE_KEY not in self.IO_DIGEST:\n raise ValidationError(\n 'IO Digest must have groups for Cash Flow Statement'\n )\n\n def operating(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n operating_activities = dict()\n operating_activities['GROUP_CFS_NET_INCOME'] = {\n 'description': 'Net Income',\n 'balance': group_balances['GROUP_CFS_NET_INCOME']\n }\n operating_activities['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION'] = {\n 'description': 'Depreciation & Amortization of Assets',\n 'balance': -group_balances['GROUP_CFS_OP_DEPRECIATION_AMORTIZATION']\n }\n operating_activities['GROUP_CFS_OP_INVESTMENT_GAINS'] = {\n 'description': 'Gain/Loss Sale of Assets',\n 'balance': group_balances['GROUP_CFS_OP_INVESTMENT_GAINS']\n }\n operating_activities['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE'] = {\n 'description': 'Accounts Receivable',\n 'balance': -group_balances['GROUP_CFS_OP_ACCOUNTS_RECEIVABLE']\n }\n operating_activities['GROUP_CFS_OP_INVENTORY'] = {\n 'description': 'Inventories',\n 'balance': -group_balances['GROUP_CFS_OP_INVENTORY']\n }\n\n operating_activities['GROUP_CFS_OP_ACCOUNTS_PAYABLE'] = {\n 'description': 'Accounts Payable',\n 'balance': group_balances['GROUP_CFS_OP_ACCOUNTS_PAYABLE']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT'] = {\n 'description': 'Other Current Assets',\n 'balance': -group_balances['GROUP_CFS_OP_OTHER_CURRENT_ASSETS_ADJUSTMENT']\n }\n operating_activities['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT'] = {\n 'description': 'Other Current Liabilities',\n 'balance': group_balances['GROUP_CFS_OP_OTHER_CURRENT_LIABILITIES_ADJUSTMENT']\n }\n\n net_cash_by_op_activities = sum(i['balance'] for g, i in operating_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['operating'] = operating_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'] = dict(\n OPERATING=net_cash_by_op_activities\n )\n\n def financing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n financing_activities = dict()\n financing_activities['GROUP_CFS_FIN_ISSUING_EQUITY'] = {\n 'description': 'Common Stock, Preferred Stock and Capital Raised',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_EQUITY)\n }\n financing_activities['GROUP_CFS_FIN_DIVIDENDS'] = {\n 'description': 'Dividends Payed Out to Shareholders',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_DIVIDENDS)\n }\n financing_activities['GROUP_CFS_FIN_ST_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Short-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_STD)\n }\n financing_activities['GROUP_CFS_FIN_LT_DEBT_PAYMENTS'] = {\n 'description': 'Increase/Reduction of Long-Term Debt Principal',\n 'balance': sum(a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.FINANCING_LTD)\n }\n\n net_cash = sum(i['balance'] for g, i in financing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['financing'] = financing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['FINANCING'] = net_cash\n\n def investing(self):\n group_balances = self.IO_DIGEST[GroupContextManager.GROUP_BALANCE_KEY]\n investing_activities = dict()\n investing_activities['GROUP_CFS_INVESTING_SECURITIES'] = {\n 'description': 'Purchase, Maturity and Sales of Investments & Securities',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_SECURITIES)\n }\n investing_activities['GROUP_CFS_INVESTING_PPE'] = {\n 'description': 'Addition and Disposition of Property, Plant & Equipment',\n 'balance': sum(\n a['balance'] for a in self.CASH_ACCOUNTS if a['activity'] == self.JE_MODEL.INVESTING_PPE)\n }\n\n net_cash = sum(i['balance'] for g, i in investing_activities.items())\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['investing'] = investing_activities\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity']['INVESTING'] = net_cash\n\n def net_cash(self):\n self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash'] = sum([\n bal for act, bal in self.IO_DIGEST[self.CFS_DIGEST_KEY]['net_cash_by_activity'].items()\n ])\n\n def digest(self):\n self.check_io_digest()\n self.operating()\n self.financing()\n self.investing()\n self.net_cash()\n return self.IO_DIGEST" }, { "identifier": "IODigestContextManager", "path": "django_ledger/io/io_digest.py", "snippet": "class IODigestContextManager:\n\n def __init__(self, io_data: defaultdict):\n self.IO_DATA: defaultdict = io_data\n self.IO_MODEL = self.IO_DATA['io_model']\n self.TXS_QS = self.IO_DATA['txs_qs']\n self.STRFTIME_FORMAT = '%B %d, %Y'\n\n def get_io_data(self) -> defaultdict:\n return self.IO_DATA\n\n def get_strftime_format(self):\n return self.STRFTIME_FORMAT\n\n def get_from_date(self, as_str: bool = False, fmt=None) -> Optional[date]:\n from_date = self.IO_DATA['from_date']\n if from_date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return from_date.strftime(fmt)\n return from_date\n\n def get_to_date(self, as_str: bool = False, fmt=None) -> date:\n if as_str:\n if not fmt:\n fmt = self.get_strftime_format()\n return self.IO_DATA['to_date'].strftime(fmt)\n return self.IO_DATA['to_date']\n\n def is_entity_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_entity_model()\n )\n\n def is_ledger_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_ledger_model()\n )\n\n def is_unit_model(self) -> bool:\n return isinstance(\n self.IO_MODEL,\n lazy_loader.get_unit_model()\n )\n\n def is_by_unit(self) -> bool:\n return self.IO_DATA['by_unit']\n\n def is_by_period(self) -> bool:\n return self.IO_DATA['by_period']\n\n def is_by_activity(self) -> bool:\n return self.IO_DATA['by_activity']\n\n # Balance Sheet Data...\n def has_balance_sheet(self) -> bool:\n return 'balance_sheet' in self.IO_DATA\n\n def get_balance_sheet_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['balance_sheet']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have balance sheet information available.'\n )\n\n # Income Statement Data...\n def has_income_statement(self) -> bool:\n return 'income_statement' in self.IO_DATA\n\n def get_income_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['income_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have income statement information available.'\n )\n\n # Cash Flow Statement Data...\n def has_cash_flow_statement(self):\n return 'cash_flow_statement' in self.IO_DATA\n\n def get_cash_flow_statement_data(self, raise_exception: bool = True) -> Dict:\n try:\n return self.IO_DATA['cash_flow_statement']\n except KeyError:\n if raise_exception:\n raise IODigestValidationError(\n 'IO Digest does not have cash flow statement information available.'\n )\n\n # CLOSING ENTRIES...\n\n def get_closing_entry_data(self):\n io_data = self.get_io_data()\n return io_data['accounts']" }, { "identifier": "FinancialRatioManager", "path": "django_ledger/io/ratios.py", "snippet": "class FinancialRatioManager:\n\n def __init__(self, io_data):\n self.DIGEST = io_data\n self.ACCOUNTS = io_data['accounts']\n self.RATIO_NA = RATIO_NA\n\n self.quick_assets = io_data['group_balance']['GROUP_QUICK_ASSETS']\n self.assets = io_data['group_balance']['GROUP_ASSETS']\n self.current_liabilities = io_data['group_balance']['GROUP_CURRENT_LIABILITIES']\n self.current_assets = io_data['group_balance']['GROUP_CURRENT_ASSETS']\n self.equity = io_data['group_balance']['GROUP_CAPITAL']\n self.liabilities = io_data['group_balance']['GROUP_LIABILITIES']\n self.net_income = io_data['group_balance']['GROUP_EARNINGS']\n self.net_sales = io_data['group_balance']['GROUP_NET_SALES']\n self.net_profit = io_data['group_balance']['GROUP_NET_PROFIT']\n self.gross_profit = io_data['group_balance']['GROUP_GROSS_PROFIT']\n self.RATIOS = dict()\n\n def digest(self):\n self.quick_ratio()\n self.current_ratio()\n self.debt_to_equity()\n self.return_on_equity()\n self.return_on_assets()\n self.net_profit_margin()\n self.gross_profit_margin()\n self.DIGEST['ratios'] = self.RATIOS\n return self.DIGEST\n\n # ------> SOLVENCY RATIOS <------\n def quick_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = self.RATIO_NA\n else:\n cr = self.quick_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['quick_ratio'] = cr\n\n def current_ratio(self, as_percent=False):\n if self.current_liabilities == 0:\n cr = RATIO_NA\n else:\n cr = self.current_assets / self.current_liabilities\n if as_percent:\n cr = cr * 100\n self.RATIOS['current_ratio'] = cr\n\n # ------> LEVERAGE RATIOS <------\n def debt_to_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.liabilities / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['debt_to_equity'] = cr\n\n # ------> PROFITABILITY RATIOS <------\n def return_on_equity(self, as_percent=False):\n if self.equity == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.equity\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_equity'] = cr\n\n def return_on_assets(self, as_percent=False):\n if self.assets == 0:\n cr = RATIO_NA\n else:\n cr = self.net_income / self.assets\n if as_percent:\n cr = cr * 100\n self.RATIOS['return_on_assets'] = cr\n\n def net_profit_margin(self, as_percent=False):\n if self.net_sales == 0:\n npm = RATIO_NA\n else:\n npm = self.net_profit / self.net_sales\n if as_percent:\n npm = npm * 100\n self.RATIOS['net_profit_margin'] = npm\n\n def gross_profit_margin(self, as_percent=False):\n if self.gross_profit == 0:\n gpm = RATIO_NA\n else:\n gpm = self.gross_profit / self.net_sales\n if as_percent:\n gpm = gpm * 100\n self.RATIOS['gross_profit_margin'] = gpm" }, { "identifier": "lazy_loader", "path": "django_ledger/models/utils.py", "snippet": "class LazyLoader:\n ENTITY_MODEL = None\n ENTITY_STATE_MODEL = None\n UNIT_MODEL = None\n ACCOUNT_MODEL = None\n BANK_ACCOUNT_MODEL = None\n LEDGER_MODEL = None\n TXS_MODEL = None\n JE_MODEL = None\n ITEM_MODEL = None\n ITEM_TRANSACTION_MODEL = None\n CUSTOMER_MODEL = None\n INVOICE_MODEL = None\n BILL_MODEL = None\n UOM_MODEL = None\n VENDOR_MODEL = None\n TRANSACTION_MODEL = None\n ENTITY_UNIT_MODEL = None\n PURCHASE_ORDER_MODEL = None\n ESTIMATE_MODEL = None\n CLOSING_ENTRY_MODEL = None\n CLOSING_ENTRY_TRANSACTION_MODEL = None\n ENTITY_DATA_GENERATOR = None\n BALANCE_SHEET_REPORT_CLASS = None\n INCOME_STATEMENT_REPORT_CLASS = None\n CASH_FLOW_STATEMENT_REPORT_CLASS = None\n def get_entity_model(self):\n def get_entity_state_model(self):\n def get_bank_account_model(self):\n def get_account_model(self):\n def get_txs_model(self):\n def get_purchase_order_model(self):\n def get_ledger_model(self):\n def get_unit_model(self):\n def get_journal_entry_model(self):\n def get_item_model(self):\n def get_item_transaction_model(self):\n def get_customer_model(self):\n def get_bill_model(self):\n def get_invoice_model(self):\n def get_uom_model(self):\n def get_vendor_model(self):\n def get_transaction_model(self):\n def get_entity_unit_model(self):\n def get_estimate_model(self):\n def get_entity_data_generator(self):\n def get_closing_entry_model(self):\n def get_closing_entry_transaction_model(self):\n def get_balance_sheet_report_class(self):\n def get_income_statement_report_class(self):\n def get_cash_flow_statement_report_class(self):" } ]
from collections import defaultdict, namedtuple from datetime import datetime, date from itertools import groupby from pathlib import Path from random import choice from typing import List, Set, Union, Tuple, Optional, Dict from django.contrib.auth import get_user_model from django.core.exceptions import ValidationError, ObjectDoesNotExist from django.db.models import Sum, QuerySet from django.db.models.functions import TruncMonth from django.http import Http404 from django.utils.dateparse import parse_date, parse_datetime from django.utils.timezone import make_aware, is_naive, localtime from django.utils.translation import gettext_lazy as _ from django_ledger import settings from django_ledger.exceptions import InvalidDateInputError, TransactionNotInBalanceError from django_ledger.io import roles as roles_module from django_ledger.io.io_context import (RoleContextManager, GroupContextManager, ActivityContextManager, BalanceSheetStatementContextManager, IncomeStatementContextManager, CashFlowStatementContextManager) from django_ledger.io.io_digest import IODigestContextManager from django_ledger.io.ratios import FinancialRatioManager from django_ledger.models.utils import lazy_loader
15,760
a['account__uuid'], a.get('journal_entry__entity_unit__uuid') if by_unit else None, a.get('dt_idx').year if by_period else None, a.get('dt_idx').month if by_period else None, a.get('journal_entry__activity') if by_activity else None, a.get('tx_type') if by_tx_type else None, )) gb_digest = [self.aggregate_balances(k, g) for k, g in accounts_gb_code] for acc in gb_digest: acc['balance_abs'] = abs(acc['balance']) if signs: TransactionModel = lazy_loader.get_txs_model() for acc in gb_digest: if any([ all([acc['role_bs'] == roles_module.BS_ASSET_ROLE, acc['balance_type'] == TransactionModel.CREDIT]), all([acc['role_bs'] in ( roles_module.BS_LIABILITIES_ROLE, roles_module.BS_EQUITY_ROLE ), acc['balance_type'] == TransactionModel.DEBIT]) ]): acc['balance'] = -acc['balance'] return txs_queryset, gb_digest @staticmethod def aggregate_balances(k, g): gl = list(g) return { 'account_uuid': k[0], 'unit_uuid': k[1], 'unit_name': gl[0].get('journal_entry__entity_unit__name'), 'activity': gl[0].get('journal_entry__activity'), 'period_year': k[2], 'period_month': k[3], 'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles:
""" Django Ledger created by Miguel Sanda <[email protected]>. Copyright© EDMA Group Inc licensed under the GPLv3 Agreement. Contributions to this module: * Miguel Sanda <[email protected]> """ UserModel = get_user_model() def diff_tx_data(tx_data: list, raise_exception: bool = True): IS_TX_MODEL = False TransactionModel = lazy_loader.get_txs_model() if isinstance(tx_data[0], TransactionModel): CREDITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'credit') DEBITS = sum(tx.amount for tx in tx_data if tx.tx_type == 'debit') IS_TX_MODEL = True elif isinstance(tx_data[0], dict): CREDITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'credit') DEBITS = sum(tx['amount'] for tx in tx_data if tx['tx_type'] == 'debit') else: raise ValidationError('Only Dictionary or TransactionModel allowed.') is_valid = (CREDITS == DEBITS) diff = CREDITS - DEBITS if not is_valid and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: if raise_exception: raise TransactionNotInBalanceError( f'Invalid tx data. Credits and debits must match. Currently cr: {CREDITS}, db {DEBITS}.' f'Max Tolerance {settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE}' ) return IS_TX_MODEL, is_valid, diff def check_tx_balance(tx_data: list, perform_correction: bool = False) -> bool: if tx_data: IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data, raise_exception=perform_correction) if not perform_correction and abs(diff): return False if not perform_correction and abs(diff) > settings.DJANGO_LEDGER_TRANSACTION_MAX_TOLERANCE: return False while not is_valid: tx_type_choice = choice(['debit', 'credit']) txs_candidates = list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice) if len(txs_candidates) > 0: tx = choice(list(tx for tx in tx_data if tx['tx_type'] == tx_type_choice)) if any([diff > 0 and tx_type_choice == 'debit', diff < 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION elif any([diff < 0 and tx_type_choice == 'debit', diff > 0 and tx_type_choice == 'credit']): if IS_TX_MODEL: tx.amount -= settings.DJANGO_LEDGER_TRANSACTION_CORRECTION else: tx['amount'] += settings.DJANGO_LEDGER_TRANSACTION_CORRECTION IS_TX_MODEL, is_valid, diff = diff_tx_data(tx_data) return True def validate_io_date(dt: Union[str, date, datetime], no_parse_localdate: bool = True) -> Optional[datetime]: if not dt: return if isinstance(dt, date): dt = make_aware( value=datetime.combine( dt, datetime.min.time() )) return dt elif isinstance(dt, datetime): if is_naive(dt): return make_aware(dt) return dt elif isinstance(dt, str): # try to parse a date object from string... fdt = parse_date(dt) if not fdt: # try to parse a datetime object from string... fdt = parse_datetime(dt) if not fdt: raise InvalidDateInputError( message=f'Could not parse date from {dt}' ) elif is_naive(fdt): fdt = make_aware(fdt) return fdt if no_parse_localdate: return localtime() def validate_dates( from_date: Union[str, datetime, date] = None, to_date: Union[str, datetime, date] = None) -> Tuple[date, date]: from_date = validate_io_date(from_date, no_parse_localdate=False) to_date = validate_io_date(to_date) return from_date, to_date def validate_activity(activity: str, raise_404: bool = False): # idea: move to model???... JournalEntryModel = lazy_loader.get_journal_entry_model() valid = activity in JournalEntryModel.VALID_ACTIVITIES if activity and not valid: exception = ValidationError(f'{activity} is invalid. Choices are {JournalEntryModel.VALID_ACTIVITIES}.') if raise_404: raise Http404(exception) raise exception return activity class IOValidationError(ValidationError): pass class IODatabaseMixIn: """ Controls how transactions are recorded into the ledger. """ def is_entity_model(self): return isinstance(self, lazy_loader.get_entity_model()) def is_ledger_model(self): return isinstance(self, lazy_loader.get_ledger_model()) def is_entity_unit_model(self): return isinstance(self, lazy_loader.get_unit_model()) def get_entity_model_from_io(self): if self.is_entity_model(): return self elif self.is_ledger_model(): return self.entity elif self.is_entity_unit_model(): return self.entity # def is_time_bounded(self, from_date, to_date): def database_digest(self, txs_queryset: QuerySet, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, from_date: date = None, to_date: date = None, activity: str = None, role: str = None, accounts: str or List[str] or Set[str] = None, posted: bool = True, exclude_zero_bal: bool = True, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, by_unit: bool = False, **kwargs): if settings.DJANGO_LEDGER_USE_CLOSING_ENTRIES: if not from_date: entity_model = self.get_entity_model_from_io() closing_entry_date = entity_model.select_closing_entry_for_io_date(to_date=to_date) # print(closing_entry_date) # # if closing_entry_date: # closing_entry_list = entity_model.get_closing_entry_cache_for_date( # closing_date=closing_entry_date, # force_cache_update=True # ) # from_date_d = closing_entry_date + timedelta(days=1) # print('Orig From:', from_date) # print('New from:', from_date_d) # print('To Date:', to_date) # print(closing_entry_list) if not txs_queryset: TransactionModel = lazy_loader.get_txs_model() if self.is_entity_model(): if entity_slug: if entity_slug != self.slug: raise IOValidationError('Inconsistent entity_slug. ' f'Provided {entity_slug} does not match actual {self.slug}') if unit_slug: txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug or self.slug, unit_slug=unit_slug ) else: txs_queryset = TransactionModel.objects.for_entity( user_model=user_model, entity_slug=self ) elif self.is_ledger_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Ledger Model requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_ledger( user_model=user_model, entity_slug=entity_slug, ledger_model=self ) elif self.is_entity_unit_model(): if not entity_slug: raise IOValidationError( 'Calling digest from Entity Unit requires entity_slug explicitly for safety') txs_queryset = TransactionModel.objects.for_unit( user_model=user_model, entity_slug=entity_slug, unit_slug=unit_slug or self ) else: txs_queryset = TransactionModel.objects.none() txs_queryset = txs_queryset.not_closing_entry() if exclude_zero_bal: txs_queryset = txs_queryset.filter(amount__gt=0) if posted: txs_queryset = txs_queryset.posted() if from_date: txs_queryset = txs_queryset.from_date(from_date=from_date) if to_date: txs_queryset = txs_queryset.to_date(to_date=to_date) if accounts: if not isinstance(accounts, str): accounts = [accounts] txs_queryset = txs_queryset.for_accounts(account_list=accounts) if activity: if isinstance(activity, str): activity = [activity] txs_queryset = txs_queryset.for_activity(activity_list=activity) if role: txs_queryset = txs_queryset.for_roles(role_list=role) VALUES = [ 'account__uuid', 'account__balance_type', 'tx_type', 'account__code', 'account__name', 'account__role', ] ANNOTATE = {'balance': Sum('amount')} ORDER_BY = ['account__uuid'] if by_unit: ORDER_BY.append('journal_entry__entity_unit__uuid') VALUES += ['journal_entry__entity_unit__uuid', 'journal_entry__entity_unit__name'] if by_period: ORDER_BY.append('journal_entry__timestamp') ANNOTATE['dt_idx'] = TruncMonth('journal_entry__timestamp') if by_activity: ORDER_BY.append('journal_entry__activity') VALUES.append('journal_entry__activity') if by_tx_type: ORDER_BY.append('tx_type') VALUES.append('tx_type') return txs_queryset.values(*VALUES).annotate(**ANNOTATE).order_by(*ORDER_BY) def python_digest(self, txs_queryset: Optional[QuerySet] = None, user_model: Optional[UserModel] = None, to_date: date = None, from_date: date = None, equity_only: bool = False, activity: str = None, entity_slug: str = None, unit_slug: str = None, role: Optional[Union[Set[str], List[str]]] = None, accounts: Optional[Union[Set[str], List[str]]] = None, signs: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, by_period: bool = False, **kwargs) -> list or tuple: if equity_only: role = roles_module.GROUP_EARNINGS txs_queryset = self.database_digest( user_model=user_model, txs_queryset=txs_queryset, to_date=to_date, from_date=from_date, entity_slug=entity_slug, unit_slug=unit_slug, activity=activity, role=role, accounts=accounts, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, by_period=by_period, **kwargs) for tx_model in txs_queryset: if tx_model['account__balance_type'] != tx_model['tx_type']: tx_model['balance'] = -tx_model['balance'] # txs_list = list(txs_queryset) # txs_list.sort(key=lambda a: ( # a['account__uuid'], # str(a.get('journal_entry__entity_unit__uuid', '')) if by_unit else '', # a['dt_idx'].year if by_period else 0, # a['dt_idx'].month if by_period else 0, # str(a['journal_entry__activity']) if by_activity else None, # a['tx_type'] if by_tx_type else '', # )) accounts_gb_code = groupby(txs_queryset, key=lambda a: ( a['account__uuid'], a.get('journal_entry__entity_unit__uuid') if by_unit else None, a.get('dt_idx').year if by_period else None, a.get('dt_idx').month if by_period else None, a.get('journal_entry__activity') if by_activity else None, a.get('tx_type') if by_tx_type else None, )) gb_digest = [self.aggregate_balances(k, g) for k, g in accounts_gb_code] for acc in gb_digest: acc['balance_abs'] = abs(acc['balance']) if signs: TransactionModel = lazy_loader.get_txs_model() for acc in gb_digest: if any([ all([acc['role_bs'] == roles_module.BS_ASSET_ROLE, acc['balance_type'] == TransactionModel.CREDIT]), all([acc['role_bs'] in ( roles_module.BS_LIABILITIES_ROLE, roles_module.BS_EQUITY_ROLE ), acc['balance_type'] == TransactionModel.DEBIT]) ]): acc['balance'] = -acc['balance'] return txs_queryset, gb_digest @staticmethod def aggregate_balances(k, g): gl = list(g) return { 'account_uuid': k[0], 'unit_uuid': k[1], 'unit_name': gl[0].get('journal_entry__entity_unit__name'), 'activity': gl[0].get('journal_entry__activity'), 'period_year': k[2], 'period_month': k[3], 'role_bs': roles_module.BS_ROLES.get(gl[0]['account__role']), 'role': gl[0]['account__role'], 'code': gl[0]['account__code'], 'name': gl[0]['account__name'], 'balance_type': gl[0]['account__balance_type'], 'tx_type': k[5], 'balance': sum(a['balance'] for a in gl), } def digest(self, entity_slug: str = None, unit_slug: str = None, user_model: UserModel = None, txs_queryset: QuerySet = None, as_io_digest: bool = False, accounts: Optional[Union[Set[str], List[str]]] = None, role: Optional[Union[Set[str], List[str]]] = None, activity: str = None, signs: bool = True, to_date: Union[str, datetime, date] = None, from_date: Union[str, datetime, date] = None, process_roles: bool = False, process_groups: bool = False, process_ratios: bool = False, process_activity: bool = False, equity_only: bool = False, by_period: bool = False, by_unit: bool = False, by_activity: bool = False, by_tx_type: bool = False, digest_name: str = None, balance_sheet_statement: bool = False, income_statement: bool = False, cash_flow_statement: bool = False, **kwargs) -> Union[Tuple, IODigestContextManager]: if balance_sheet_statement: from_date = None if cash_flow_statement: by_activity = True if activity: activity = validate_activity(activity) if role: role = roles_module.validate_roles(role) from_date, to_date = validate_dates(from_date, to_date) io_data = defaultdict(lambda: dict()) io_data['io_model'] = self io_data['from_date'] = from_date io_data['to_date'] = to_date io_data['by_unit'] = by_unit io_data['by_period'] = by_period io_data['by_activity'] = by_activity io_data['by_tx_type'] = by_tx_type txs_qs, accounts_digest = self.python_digest( txs_queryset=txs_queryset, user_model=user_model, accounts=accounts, role=role, activity=activity, entity_slug=entity_slug, unit_slug=unit_slug, to_date=to_date, from_date=from_date, signs=signs, equity_only=equity_only, by_period=by_period, by_unit=by_unit, by_activity=by_activity, by_tx_type=by_tx_type, **kwargs ) io_data['txs_qs'] = txs_qs io_data['accounts'] = accounts_digest if process_roles:
roles_mgr = RoleContextManager(
4
2023-10-20 01:07:20+00:00
24k
acolas1/KGSimple
simplify.py
[ { "identifier": "FluencyScorer", "path": "scoring/fluency_scorer.py", "snippet": "class FluencyScorer:\n def __init__(self, batch_size=1, reduce=\"mean\", log=True, laplace_smooth=False, prob_dict_path=None):\n self.device = \"cuda:1\" if torch.cuda.is_available() else \"cpu\"\n self.batch_size = batch_size\n self.reduce = reduce\n self.log = log\n self.laplace_smooth = laplace_smooth\n self.tokenizer = GPT2Tokenizer.from_pretrained(\"gpt2\")\n self.scorer = LMScorer.from_pretrained(\"gpt2\", device=self.device, batch_size=batch_size)\n self.idf_df = pd.read_csv(prob_dict_path, ',', encoding='utf-8')\n self.freq_dict = pd.Series((self.idf_df.frequency.values), index=self.idf_df.token).to_dict()\n self.num_tokens = self.idf_df.total.values[0] \n \n def unigram_score(self, sentences):\n if self.freq_dict is None:\n raise Exception(\"Probability dictionary is not defined.\") \n unigram_scores = []\n for sent in sentences:\n unigram_prob = 1\n for token in word_tokenize(sent.lower()):\n if token in self.freq_dict:\n if self.laplace_smooth:\n curr_unigram_prob = (self.freq_dict[token]+1)/(self.num_tokens+len(self.freq_dict))\n else:\n curr_unigram_prob = self.freq_dict[token]/self.num_tokens\n \n \n\n else:\n if self.laplace_smooth:\n curr_unigram_prob = (1/(self.num_tokens+len(self.freq_dict)))\n else:\n curr_unigram_prob = 1\n # unigram_prob += curr_unigram_prob\n \n \n if self.log:\n unigram_prob +=np.log(curr_unigram_prob)\n else:\n unigram_prob *= curr_unigram_prob\n uni_score = unigram_prob/len(word_tokenize(sent))\n unigram_scores.append(uni_score)\n return unigram_scores\n \n def SLOR_score(self, sentence_list, lm_score, unigram_score):\n SLOR_scores = []\n for i in range(len(sentence_list)):\n SLOR_score = lm_score[i]-unigram_score[i]\n if self.log:\n SLOR_score = math.exp(lm_score[i]-unigram_score[i])\n SLOR_scores.append(SLOR_score)\n return SLOR_scores\n \n def score_batched(self, generated_texts, source_texts=None, printing=False, **kwargs):\n sources_SLOR_score, generateds_SLOR_score = None, None\n if source_texts:\n sources_lm_prob_scores = self.scorer.sentence_score(source_texts, reduce=self.reduce, log=self.log)\n sources_unigram_scores = self.unigram_score(source_texts)\n sources_SLOR_score = self.SLOR_score(source_texts, sources_lm_prob_scores, sources_unigram_scores)\n\n\n\n generateds_lm_prob_scores = self.scorer.sentence_score(generated_texts, reduce=self.reduce, log=self.log)\n generateds_unigram_scores = self.unigram_score(generated_texts)\n generateds_SLOR_score = self.SLOR_score(generated_texts, generateds_lm_prob_scores, generateds_unigram_scores)\n \n if printing:\n print(\"[source_sents]\", source_texts)\n print(\"[source_lm]\", sources_lm_prob_scores)\n print(\"[source_unigram]\", sources_unigram_scores)\n print(\"[source_scores]\", sources_SLOR_score)\n print(\"[generated_sents]\", generated_texts)\n print(\"[generated_lm]\", generateds_lm_prob_scores)\n print(\"[generated_unigram]\", generateds_unigram_scores)\n print(\"[generated_scores]\", generateds_SLOR_score)\n return {\"scores\": generateds_SLOR_score, \"source_scores\": sources_SLOR_score}\n\n def score(self, generated_text, source_text=None, printing=False, **kwargs):\n # sources_lm_prob_score = scorer.sentence_score(source_list, reduce=\"mean\")\n \n sources_SLOR_score, generateds_SLOR_score = None, None\n if source_text:\n source_list = [source_text]\n sources_lm_prob_scores = self.scorer.sentence_score(source_list, reduce=self.reduce, log=self.log)\n sources_unigram_scores = self.unigram_score(source_list)\n sources_SLOR_score = self.SLOR_score(source_list, sources_lm_prob_scores, sources_unigram_scores)\n \n \n \n generateds_list = [generated_text]\n generateds_lm_prob_scores = self.scorer.sentence_score(generateds_list, reduce=self.reduce, log=self.log)\n generateds_unigram_scores = self.unigram_score(generateds_list)\n generateds_SLOR_score = self.SLOR_score(generateds_list, generateds_lm_prob_scores, generateds_unigram_scores)\n \n if printing:\n print(\"[source_sents]\", source_text)\n print(\"[source_lm]\", sources_lm_prob_scores)\n print(\"[source_unigram]\", sources_unigram_scores)\n print(\"[source_scores]\", sources_SLOR_score)\n print(\"[generated_sents]\", generated_text)\n print(\"[generated_lm]\", generateds_lm_prob_scores)\n print(\"[generated_unigram]\", generateds_unigram_scores)\n print(\"[generated_scores]\", generateds_SLOR_score)\n return {\"scores\": generateds_SLOR_score, \"source_scores\": sources_SLOR_score}" }, { "identifier": "SaliencyBERTScore", "path": "scoring/saliency_scorer.py", "snippet": "class SaliencyBERTScore:\n def __init__(self, lmscorer = \"bertscore\", lang=\"en\"):\n self.bertscore = evaluate.load(lmscorer)\n self.lang = lang\n\n\n def calc_BERT_score(self, predictions, references, sigmoid):\n results = self.bertscore.compute(predictions=predictions, references=references, lang=self.lang)\n if sigmoid:\n results = expit(results)\n return results\n\n def score_batched(self, generated_text, source_text=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = None, None\n bert_score = self.calc_BERT_score(generated_text, source_text, sigmoid)\n f1 = bert_score['f1']\n \n if printing:\n print(\"scores: \", str(f1))\n return {\"scores\": f1}\n\n def score(self, generated_text, source_text=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = None, None\n bert_score = self.calc_BERT_score([generated_text], [source_text], sigmoid)\n f1 = bert_score['f1']\n \n if printing:\n print(\"scores: \", str(f1))\n return {\"scores\": f1}" }, { "identifier": "SimplicityTextScore", "path": "scoring/simplicity_scorer.py", "snippet": "class SimplicityTextScore:\n def __init__(self):\n pass\n\n def calc_FRE(self, text, sigmoid):\n min_val = -30\n score = textstat.flesch_reading_ease(text)\n scaled_score = (score - min_val) / (121.22 - min_val)\n # Clamp scaled_score to the range [0, 1]\n scaled_score = max(0, min(scaled_score, 1))\n \n if sigmoid:\n scaled_score = expit(scaled_score)\n \n return scaled_score\n \n \n \n def calc_FKGL(self, text, sigmoid):\n score = max(0,textstat.flesch_kincaid_grade(text))\n if sigmoid:\n score = expit(score)\n return score\n\n def score_batched(self, generated_texts, source_texts=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = [],[]\n \n for text in generated_texts:\n gen_score.append(self.calc_FRE(text, sigmoid))\n \n \n if source_texts:\n for text in source_texts:\n source_score.append(self.calc_FRE(text, sigmoid))\n \n if printing:\n print(\"score: \", gen_score)\n print(\"source_score: \", source_score)\n return {\"scores\": gen_score, \"source_scores\": source_score}\n \n def score(self, generated_text, source_text=None, sigmoid=False, printing=False, **kwargs):\n gen_score, source_score = None, None\n \n gen_score = self.calc_FRE(generated_text, sigmoid)\n \n if source_text:\n source_score = self.calc_FRE(source_text, sigmoid)\n \n if printing:\n print(\"score: \", gen_score)\n print(\"source_score: \", source_score)\n return {\"scores\": gen_score, \"source_scores\": source_score}" }, { "identifier": "ScorerWrapper", "path": "scoring/aggregate_scorer.py", "snippet": "class ScorerWrapper:\n def __init__(self, scorers, scoring_method=\"logsum\", batch_size=1):\n assert scoring_method in [\"product\", \"logsum\"], \"Unrecognized `scoring_method`\"\n \n self.scorers = scorers\n self.scoring_method = scoring_method\n\n # if self.scoring_method == \"logsum\":\n # self.score_func = logsum_score\n # elif self.scoring_method == \"product\":\n # self.score_func = product_score\n \n if batch_size > 1:\n exec(\"self.score_func = {}\".format(self.scoring_method+\"_\"+\"score_batched\"))\n else:\n exec(\"self.score_func = {}\").format(self.scoring_method+\"_\"+\"score\")\n self.batch_size = batch_size\n def get_score_names(self):\n return [s[\"name\"] for s in self.scorers]\n \n def score_batched(self, input_texts=None, generated_texts=None, old_kgs=None, new_kgs=None, dels_ents=None, partial=False, printing=False, timings=False, extras={}, progress=False):\n assert len(input_texts) == len(generated_texts) == len(old_kgs) == len(new_kgs) == len(dels_ents), \"Data lengths don't match\"\n \n data_list = []\n for inp, gen, old_kg, new_kg, del_ents in zip(input_texts, generated_texts, old_kgs, new_kgs, dels_ents):\n data_list.append({\"inp\": inp, \"gen\": gen, \"old_kg\": old_kg, \"new_kg\": new_kg, \"del_ents\": del_ents})\n\n if len(data_list) == 0:\n progress = False\n \n for batch in batcher(data_list, batch_size=self.batch_size, progress=progress):\n batch_inputs = [instance_dict[\"inp\"] for instance_dict in batch]\n batch_gens = [instance_dict[\"gen\"] for instance_dict in batch]\n batch_old_kgs = [instance_dict[\"old_kg\"] for instance_dict in batch]\n batch_new_kgs = [instance_dict[\"new_kg\"] for instance_dict in batch]\n batch_dels_ents = [instance_dict[\"del_ents\"] for instance_dict in batch]\n batch_scores = self.score_func(self.scorers, batch_inputs, batch_gens, batch_old_kgs, batch_new_kgs, batch_dels_ents)\n for score_type, scores in batch_scores.items():\n if type(scores) in [torch.Tensor, np.array, np.ndarray]:\n batch_scores[score_type] = scores.tolist()\n\n if printing:\n print(\"[total]\", all_outputs[\"total_scores\"])\n return batch_scores\n \n def score(self, input_text=None, generated_text=None, old_kg=None, new_kg=None, del_ents=None):\n aggregate_score = self.score_func(self.scorers, input_text, generated_text, old_kg, new_kg, del_ents)\n return aggregate_score\n \n\n def __call__(self, graphs, input_text, generated_text, **kwargs):\n return self.score(graphs, input_text, generated_text, **kwargs)" }, { "identifier": "GAPDataloader", "path": "GAP/data_relations_as_nodes.py", "snippet": "class GAPDataloader(DataLoader):\n\n def __init__(self, args, dataset, mode):\n if mode == \"train\":\n sampler = RandomSampler(dataset)\n batch_size = args.train_batch_size\n else:\n sampler = SequentialSampler(dataset)\n batch_size = args.predict_batch_size\n super(GAPDataloader, self).__init__(dataset, sampler=sampler, batch_size=batch_size,\n num_workers=args.num_workers)" }, { "identifier": "EventDataset", "path": "GAP/data_relations_as_nodes.py", "snippet": "class EventDataset(Dataset):\n def __init__(self, logger, args, data, tokenizer, mode):\n self.data = data\n self.tokenizer = tokenizer\n self.topology = {\"entity-entity\": args.entity_entity, \n \"entity-relation\": args.entity_relation,\n \"relation-entity\": args.relation_entity,\n \"relation-relation\": args.relation_relation\n } \n \n \n \n print(\"Total samples = {}\".format(len(self.data)))\n\n \n assert type(self.data) == list\n self.args = args\n self.data_type = mode\n self.metric = \"BLEU\"\n self.head_ids, self.rel_ids, self.tail_ids = self.tokenizer.encode(' [head]', add_special_tokens=False), \\\n self.tokenizer.encode(' [relation]', add_special_tokens=False), \\\n self.tokenizer.encode(' [tail]', add_special_tokens=False)\n self.graph_ids, self.text_ids = self.tokenizer.encode(' [graph]', add_special_tokens=False), \\\n self.tokenizer.encode(' [text]', add_special_tokens=False)\n\n if self.args.model_name == \"bart\":\n self.mask_token = self.tokenizer.mask_token\n self.mask_token_id = self.tokenizer.mask_token_id\n else:\n self.mask_token = self.tokenizer.additional_special_tokens[0]\n self.mask_token_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.additional_special_tokens[0])\n\n if self.args.model_name == \"bart\":\n if self.args.append_another_bos:\n self.add_bos_id = [self.tokenizer.bos_token_id] * 2\n else:\n self.add_bos_id = [self.tokenizer.bos_token_id]\n else:\n self.add_bos_id = []\n\n def __len__(self):\n return len(self.data)\n \n def graph_size(self,idx):\n entry = self.data[idx]\n kg = entry[0]\n \n kg_list = []\n triple_list = kg.split('<S>')\n triple_list = [triple_list[0]] + ['<S>'+triple for triple in triple_list[1:]]\n triple_list = list(filter(None,triple_list))\n for triple in triple_list:\n head = re.search('<S>(.*)<P>', triple).group(1).strip()\n rel = re.search('<P>(.*)<O>', triple).group(1).strip()\n tail = re.search('<O>(.*)', triple).group(1).strip()\n kg_list.append([head,rel,tail])\n \n \n\n strings_label = []\n node_ids = []\n edge_ids = []\n strings_label_tokens = ''\n\n \n text_entity, text_relation = self.get_all_entities_per_sample(kg_list)\n entity_change, relation_change = self.get_change_per_sample(text_entity, text_relation)\n return len(entity_change)\n\n def graph_linearize(self, triple, entity_change, head_ids, rel_ids, tail_ids,\n relation_change, cnt_edge, adj_matrix):\n # string_label: encoder ids\n # string_label_tokens: encoder tokens\n if len(triple[0]) == 0:\n return [], '', [], [], cnt_edge, adj_matrix\n nodes, edges = [], []\n string_label = copy.deepcopy(head_ids)\n string_label_tokens = ' <S>'\n nodes.extend([-1] * len(string_label))\n edges.extend([-1] * len(string_label))\n\n\n string_label += entity_change[triple[0]][0]\n string_label_tokens += ' {}'.format(triple[0])\n nodes.extend([entity_change[triple[0]][1]] * len(entity_change[triple[0]][0]))\n edges.extend([-1] * len(entity_change[triple[0]][0]))\n\n\n if len(triple[1]) != 0 and len(triple[2]) != 0:\n rel_label = relation_change[triple[1]]\n rel_ent_label = entity_change[triple[1]][1]\n rel_label_token = copy.deepcopy(triple[1])\n words_label = rel_ids + rel_label + tail_ids + entity_change[triple[2]][0]\n words_label_tokens = ' <P> {} <O> {}'.format(rel_label_token, triple[2])\n nodes.extend(\n ([-1] * len(rel_ids)) + ([entity_change[triple[1]][1]] * len(rel_label)) + ([-1] * len(tail_ids)) + ([entity_change[triple[2]][1]] * len(\n entity_change[triple[2]][0])))\n edges.extend([-1] * len(rel_ids) + [cnt_edge] * len(rel_label) + [-1] * (\n len(tail_ids) + len(entity_change[triple[2]][0])))\n if entity_change[triple[0]][1] < len(adj_matrix) and entity_change[triple[2]][1] < len(adj_matrix):\n\n\n if self.topology['entity-entity']:\n adj_matrix[entity_change[triple[0]][1]][entity_change[triple[2]][1]] = 1\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[0]][1]] = 1\n\n if self.topology['entity-relation']:\n adj_matrix[entity_change[triple[0]][1]][entity_change[triple[1]][1]] = 2\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[1]][1]] = 2\n\n if self.topology['relation-entity']:\n adj_matrix[entity_change[triple[1]][1]][entity_change[triple[0]][1]] = 3\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[1]][1]] = 3\n \n if not self.topology['relation-entity'] and not self.topology['relation-relation']:\n adj_matrix[entity_change[triple[1]][1]][entity_change[triple[1]][1]] = 10\n\n if not self.topology['entity-relation'] and not self.topology['entity-entity']:\n adj_matrix[entity_change[triple[0]][1]][entity_change[triple[0]][1]] = 10\n adj_matrix[entity_change[triple[2]][1]][entity_change[triple[2]][1]] = 10\n\n cnt_edge += 1\n string_label += words_label\n string_label_tokens += words_label_tokens\n\n assert len(string_label) == len(nodes) == len(edges)\n\n return string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix\n\n def relation_to_relation_fill(self, node_dict, rel_dict, adj_matrix):\n adj_matrix_temp = np.array(adj_matrix)\n rel_idx_list = []\n for rel in rel_dict.keys():\n rel_idx = node_dict[rel][1]\n rel_idx_list.append(rel_idx)\n adj_matrix_np = np.array(adj_matrix)\n adj_matrix_np_bool = (adj_matrix_np==-1)\n #reassign -1s to 0s\n adj_matrix_np[adj_matrix_np_bool] = 0\n #get squared matrix for r-r\n adj_matrix_sq = adj_matrix_np@adj_matrix_np\n \n #old adj_matrix + squared matrix only r-r\n rel_idx_list = np.array(rel_idx_list, dtype=np.intp)\n adj_matrix_temp[rel_idx_list[:,np.newaxis], rel_idx_list] = (adj_matrix_sq[rel_idx_list][:,rel_idx_list] > 0)*4\n adj_matrix_new = adj_matrix_temp.tolist()\n \n return adj_matrix_new\n \n def get_all_entities_per_sample(self, triple_list):\n text_entity = set()\n text_relation = set()\n for triple in triple_list:\n if len(triple[0]) == 0:\n continue\n if len(triple[1]) != 0 and len(triple[2]) != 0:\n text_relation.add(triple[1])\n text_entity.add(triple[0])\n text_entity.add(triple[2])\n \n text_entity_list = list(text_entity)+list(text_relation)\n text_relation_list = list(text_relation)\n \n return text_entity_list, text_relation_list\n\n def get_change_per_sample(self, text_entity, text_relation):\n # during fine-tuning, we don't mask entities or relations\n ent_change = {}\n total_entity = text_entity\n\n for ent_id in range(len(total_entity)):\n entity_toks = self.tokenizer.encode(\" {}\".format(total_entity[ent_id]), add_special_tokens=False)\n ent_change[total_entity[ent_id]] = [entity_toks, ent_id]\n \n # relation change only includes the relation tokens and ids\n rel_change = {}\n for rel_id in range(len(text_relation)):\n rel_change[text_relation[rel_id]] = self.tokenizer.encode(' {}'.format(text_relation[rel_id]),\n add_special_tokens=False)\n\n return ent_change, rel_change\n\n def truncate_pair_ar(self, a, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n # add_bos_id + graph_ids + a + text_ids + b + eos_token_id\n length_a_b = self.args.max_input_length - len(add_bos_id) - len(graph_ids) - len(text_ids) - 1\n if len(a) > length_a_b:\n a = a[:length_a_b]\n node_ids = node_ids[:length_a_b]\n edge_ids = edge_ids[:length_a_b]\n input_ids = add_bos_id + graph_ids + a + text_ids + [self.tokenizer.eos_token_id]\n input_node_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + node_ids + [-1] * (len(text_ids) + 1)\n input_edge_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + edge_ids + [-1] * (len(text_ids) + 1)\n attn_mask = [1] * len(input_ids) + [0] * (self.args.max_input_length - len(input_ids))\n input_ids += [self.tokenizer.pad_token_id] * (self.args.max_input_length - len(input_ids))\n input_node_ids += [-1] * (self.args.max_input_length - len(input_node_ids))\n input_edge_ids += [-1] * (self.args.max_input_length - len(input_edge_ids))\n assert len(input_ids) == len(attn_mask) == self.args.max_input_length == len(input_node_ids) == len(\n input_edge_ids)\n return input_ids, attn_mask, input_node_ids, input_edge_ids\n\n \n def ar_prep_data(self, questions, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n input_ids, input_attn_mask, input_node_ids, input_edge_ids = self.truncate_pair_ar(questions, add_bos_id,\n graph_ids, text_ids,\n node_ids, edge_ids)\n\n return input_ids, input_attn_mask, input_node_ids, input_edge_ids\n\n\n\n def __getitem__(self, idx):\n kg = self.data[idx]\n # print(\"KG: \", kg)\n kg_list = []\n triple_list = kg.split('<S>')\n triple_list = [triple_list[0]] + ['<S>'+triple for triple in triple_list[1:]]\n triple_list = list(filter(None,triple_list))\n for triple in triple_list:\n head = re.search('<S>(.*)<P>', triple).group(1).strip()\n rel = re.search('<P>(.*)<O>', triple).group(1).strip()\n tail = re.search('<O>(.*)', triple).group(1).strip()\n kg_list.append([head,rel,tail])\n \n strings_label = []\n node_ids = []\n edge_ids = []\n strings_label_tokens = ''\n\n # print(\"kg_list: \", kg_list)\n text_entity, text_relation = self.get_all_entities_per_sample(kg_list)\n entity_change, relation_change = self.get_change_per_sample(text_entity, text_relation)\n adj_matrix = [[-1] * (self.args.max_node_length + 1) for _ in range(self.args.max_node_length + 1)]\n\n cnt_edge = 0\n\n for i, triple in enumerate(kg_list):\n string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix = self.graph_linearize(\n triple,\n entity_change,\n self.head_ids,\n self.rel_ids, self.tail_ids,\n relation_change, cnt_edge, adj_matrix)\n \n strings_label += string_label\n strings_label_tokens += string_label_tokens\n node_ids += nodes\n edge_ids += edges\n if self.topology['relation-relation']:\n adj_matrix = self.relation_to_relation_fill(entity_change, relation_change, adj_matrix)\n \n words_label_ids, words_label_tokens, words_input_ids, words_input_tokens = [], '', [], ''\n# current_text = entry[1]\n \n# for word in current_text.split():\n# word_label_ids = self.tokenizer.encode(\" {}\".format(word), add_special_tokens=False)\n# word_label_tokens = copy.deepcopy(word)\n\n# words_label_ids += word_label_ids\n# words_label_tokens += ' ' + word_label_tokens\n # print(\"strings_label: \", strings_label)\n # print(\"node_ids: \", node_ids)\n # print(\"edge_ids: \", edge_ids)\n # print(\"self.add_bos_id: \", self.add_bos_id)\n # print(\"self.graph_ids: \", self.graph_ids)\n input_ids_ar, attn_mask_ar, input_node_ids_ar, input_edge_ids_ar = \\\n self.ar_prep_data(strings_label, self.add_bos_id, self.graph_ids,\n self.text_ids, node_ids, edge_ids)\n node_length_ar = max(input_node_ids_ar) + 1\n edge_length_ar = max(input_edge_ids_ar) + 1\n \n\n def masked_fill(src, masked_value, fill_value):\n return [src[src_id] if src[src_id] != masked_value and src[src_id] < fill_value else fill_value for src_id\n in range(len(src))]\n\n input_node_ids_ar, input_edge_ids_ar = masked_fill(input_node_ids_ar, -1, self.args.max_node_length), \\\n masked_fill(input_edge_ids_ar, -1, self.args.max_edge_length)\n\n def masked_fill_matrix(adj_matrix_input, masked_value, fill_value):\n adj_matrix_tmp = copy.deepcopy(adj_matrix_input)\n for a_id in range(len(adj_matrix_tmp)):\n for b_id in range(len(adj_matrix_tmp)):\n if adj_matrix_tmp[a_id][b_id] == masked_value or adj_matrix_tmp[a_id][b_id] > fill_value:\n adj_matrix_tmp[a_id][b_id] = fill_value\n return adj_matrix_tmp\n\n adj_matrix_ar = masked_fill_matrix(adj_matrix, -1, self.args.max_edge_length)\n\n assert len(input_ids_ar) == len(attn_mask_ar) == self.args.max_input_length == len(input_node_ids_ar) == len(\n input_edge_ids_ar)\n\n input_ids_ar = torch.LongTensor(input_ids_ar)\n attn_mask_ar = torch.LongTensor(attn_mask_ar)\n \n input_node_ids_ar = torch.LongTensor(input_node_ids_ar)\n input_edge_ids_ar = torch.LongTensor(input_edge_ids_ar)\n node_length_ar = torch.LongTensor([node_length_ar])\n edge_length_ar = torch.LongTensor([edge_length_ar])\n adj_matrix_ar = torch.LongTensor(adj_matrix_ar)\n \n return input_ids_ar, attn_mask_ar, input_node_ids_ar, node_length_ar, adj_matrix_ar" }, { "identifier": "WebNLGDataset", "path": "GAP/data_relations_as_nodes.py", "snippet": "class WebNLGDataset(Dataset):\n def __init__(self, logger, args, data_path, tokenizer, mode):\n self.data_path = data_path\n self.tokenizer = tokenizer\n self.topology = {\"entity-entity\": args.entity_entity, \n \"entity-relation\": args.entity_relation,\n \"relation-entity\": args.relation_entity,\n \"relation-relation\": args.relation_relation\n } \n \n with open(self.data_path + '.json', 'r') as f:\n self.data = json.load(f)\n\n print(\"Total samples = {}\".format(len(self.data)))\n\n assert type(self.data) == list\n assert all([\"id\" in d for d in self.data]), self.data[0].keys()\n if type(self.data[0][\"id\"]) == int:\n for i in range(len(self.data)):\n self.data[i][\"id\"] = str(self.data[i][\"id\"])\n\n self.args = args\n self.data_type = mode\n self.metric = \"BLEU\"\n\n self.head_ids, self.rel_ids, self.tail_ids = self.tokenizer.encode(' [head]', add_special_tokens=False), \\\n self.tokenizer.encode(' [relation]', add_special_tokens=False), \\\n self.tokenizer.encode(' [tail]', add_special_tokens=False)\n\n self.graph_ids, self.text_ids = self.tokenizer.encode(' [graph]', add_special_tokens=False), \\\n self.tokenizer.encode(' [text]', add_special_tokens=False)\n\n if self.args.model_name == \"bart\":\n self.mask_token = self.tokenizer.mask_token\n self.mask_token_id = self.tokenizer.mask_token_id\n else:\n self.mask_token = self.tokenizer.additional_special_tokens[0]\n self.mask_token_id = self.tokenizer.convert_tokens_to_ids(self.tokenizer.additional_special_tokens[0])\n\n if self.args.model_name == \"bart\":\n if self.args.append_another_bos:\n self.add_bos_id = [self.tokenizer.bos_token_id] * 2\n else:\n self.add_bos_id = [self.tokenizer.bos_token_id]\n else:\n self.add_bos_id = []\n\n def __len__(self):\n return len(self.data)\n\n def linearize_v2(self, entity, entity_change, head_ids, rel_ids, tail_ids,\n relation_change, cnt_edge, adj_matrix):\n # string_label: encoder ids\n # string_label_tokens: encoder tokens\n\n if len(entity[0]) == 0:\n return [], '', [], [], cnt_edge, adj_matrix\n nodes, edges = [], []\n string_label = copy.deepcopy(head_ids)\n string_label_tokens = ' [head]'\n nodes.extend([-1] * len(string_label))\n edges.extend([-1] * len(string_label))\n\n\n string_label += entity_change[entity[0]][0]\n string_label_tokens += ' {}'.format(entity[0])\n nodes.extend([entity_change[entity[0]][1]] * len(entity_change[entity[0]][0]))\n edges.extend([-1] * len(entity_change[entity[0]][0]))\n\n\n for rel in entity[2]:\n if len(rel[0]) != 0 and len(rel[1]) != 0:\n rel_label = relation_change[rel[0]]\n rel_ent_label = entity_change[rel[0]][1]\n rel_label_token = copy.deepcopy(rel[0])\n words_label = rel_ids + rel_label + tail_ids + entity_change[rel[1]][0]\n words_label_tokens = ' [relation] {} [tail] {}'.format(rel_label_token, rel[1])\n nodes.extend(\n ([-1] * len(rel_ids)) + ([entity_change[rel[0]][1]] * len(rel_label)) + ([-1] * len(tail_ids)) + ([entity_change[rel[1]][1]] * len(\n entity_change[rel[1]][0])))\n\n \n edges.extend([-1] * len(rel_ids) + [cnt_edge] * len(rel_label) + [-1] * (\n len(tail_ids) + len(entity_change[rel[1]][0])))\n if entity_change[entity[0]][1] < len(adj_matrix) and entity_change[rel[1]][1] < len(adj_matrix):\n if self.topology['entity-entity']:\n adj_matrix[entity_change[entity[0]][1]][entity_change[rel[1]][1]] = 1\n adj_matrix[entity_change[rel[1]][1]][entity_change[entity[0]][1]] = 1\n\n if self.topology['entity-relation']:\n adj_matrix[entity_change[entity[0]][1]][entity_change[rel[0]][1]] = 2\n adj_matrix[entity_change[rel[1]][1]][entity_change[rel[0]][1]] = 2\n \n if self.topology['relation-entity']:\n adj_matrix[entity_change[rel[0]][1]][entity_change[entity[0]][1]] = 3\n adj_matrix[entity_change[rel[0]][1]][entity_change[rel[1]][1]] = 3\n \n if not self.topology['relation-entity'] and not self.topology['relation-relation']:\n adj_matrix[entity_change[rel[0]][1]][entity_change[rel[0]][1]] = 10\n \n if not self.topology['entity-relation'] and not self.topology['entity-entity']:\n adj_matrix[entity_change[entity[0]][1]][entity_change[entity[0]][1]] = 10\n adj_matrix[entity_change[rel[1]][1]][entity_change[rel[1]][1]] = 10\n\n cnt_edge += 1\n string_label += words_label\n string_label_tokens += words_label_tokens\n\n assert len(string_label) == len(nodes) == len(edges)\n\n return string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix\n\n \n def relation_to_relation_fill(self, node_dict, rel_dict, adj_matrix):\n adj_matrix_temp = np.array(adj_matrix)\n rel_idx_list = []\n for rel in rel_dict.keys():\n rel_idx = node_dict[rel][1]\n rel_idx_list.append(rel_idx)\n adj_matrix_np = np.array(adj_matrix)\n adj_matrix_np_bool = (adj_matrix_np==-1)\n #reassign -1s to 0s\n adj_matrix_np[adj_matrix_np_bool] = 0\n #get squared matrix for r-r\n adj_matrix_sq = adj_matrix_np@adj_matrix_np\n \n #old adj_matrix + squared matrix only r-r\n rel_idx_list = np.array(rel_idx_list, dtype=np.intp)\n adj_matrix_temp[rel_idx_list[:,np.newaxis], rel_idx_list] = (adj_matrix_sq[rel_idx_list][:,rel_idx_list] > 0)*4\n adj_matrix_new = adj_matrix_temp.tolist()\n \n return adj_matrix_new\n \n \n def get_all_entities_per_sample(self, mark_entity_number, mark_entity, entry):\n text_entity = set()\n text_relation = set()\n for entity_id in mark_entity_number:\n entity = entry['kbs'][entity_id]\n if len(entity[0]) == 0:\n continue\n for rel in entity[2]:\n if len(rel[0]) != 0 and len(rel[1]) != 0:\n text_relation.add(rel[0])\n text_entity.add(rel[1])\n\n text_entity_list = list(text_entity)+list(text_relation)\n text_relation_list = list(text_relation)\n for entity_ele in mark_entity:\n if entity_ele in text_entity_list:\n text_entity_list.remove(entity_ele)\n \n return text_entity_list, text_relation_list\n\n def get_change_per_sample(self, mark_entity, text_entity, text_relation):\n # during fine-tuning, we don't mask entities or relations\n ent_change = {}\n total_entity = mark_entity + text_entity\n\n for ent_id in range(len(total_entity)):\n entity_toks = self.tokenizer.encode(\" {}\".format(total_entity[ent_id]), add_special_tokens=False)\n ent_change[total_entity[ent_id]] = [entity_toks, ent_id]\n # relation change only includes the relation tokens and ids\n rel_change = {}\n for rel_id in range(len(text_relation)):\n rel_change[text_relation[rel_id]] = self.tokenizer.encode(' {}'.format(text_relation[rel_id]),\n add_special_tokens=False)\n return ent_change, rel_change\n\n def truncate_pair_ar(self, a, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n # add_bos_id + graph_ids + a + text_ids + b + eos_token_id\n length_a_b = self.args.max_input_length - len(add_bos_id) - len(graph_ids) - len(text_ids) - 1\n if len(a) > length_a_b:\n a = a[:length_a_b]\n node_ids = node_ids[:length_a_b]\n edge_ids = edge_ids[:length_a_b]\n input_ids = add_bos_id + graph_ids + a + text_ids + [self.tokenizer.eos_token_id]\n input_node_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + node_ids + [-1] * (len(text_ids) + 1)\n input_edge_ids = [-1] * (len(add_bos_id) + len(graph_ids)) + edge_ids + [-1] * (len(text_ids) + 1)\n attn_mask = [1] * len(input_ids) + [0] * (self.args.max_input_length - len(input_ids))\n input_ids += [self.tokenizer.pad_token_id] * (self.args.max_input_length - len(input_ids))\n input_node_ids += [-1] * (self.args.max_input_length - len(input_node_ids))\n input_edge_ids += [-1] * (self.args.max_input_length - len(input_edge_ids))\n assert len(input_ids) == len(attn_mask) == self.args.max_input_length == len(input_node_ids) == len(\n input_edge_ids)\n return input_ids, attn_mask, input_node_ids, input_edge_ids\n\n def ar_prep_data(self, questions, add_bos_id, graph_ids, text_ids, node_ids, edge_ids):\n input_ids, input_attn_mask, input_node_ids, input_edge_ids = self.truncate_pair_ar(questions, add_bos_id,\n graph_ids, text_ids,\n node_ids, edge_ids)\n\n return input_ids, input_attn_mask, input_node_ids, input_edge_ids\n \n\n\n def __getitem__(self, idx):\n\n entry = self.data[idx]\n\n entities = []\n for _ in entry['kbs']:\n entities.append(_)\n\n strings_label = []\n node_ids = []\n edge_ids = []\n strings_label_tokens = ''\n\n # mark_entity: entities with KB numbers which are important for this task\n # text_entity: entities without KB numbers but only with text, which are less important\n mark_entity = [entry['kbs'][ele_entity][0] for ele_entity in entities]\n mark_entity_number = entities\n text_entity, text_relation = self.get_all_entities_per_sample(mark_entity_number, mark_entity, entry)\n entity_change, relation_change = self.get_change_per_sample(mark_entity, text_entity, text_relation)\n total_entity = mark_entity + text_entity\n adj_matrix = [[-1] * (self.args.max_node_length + 1) for _ in range(self.args.max_node_length + 1)]\n\n cnt_edge = 0\n\n if 'title' in entry:\n entity = self.knowledge[entry['title_kb_id']]\n string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix = self.linearize_v2(\n entity,\n entity_change,\n self.head_ids,\n self.rel_ids, self.tail_ids,\n relation_change, cnt_edge, adj_matrix)\n\n strings_label += string_label\n strings_label_tokens += string_label_tokens\n\n for i, entity_id in enumerate(entities):\n entity = entry['kbs'][entity_id]\n string_label, string_label_tokens, nodes, edges, cnt_edge, adj_matrix = self.linearize_v2(\n entity,\n entity_change,\n self.head_ids,\n self.rel_ids, self.tail_ids,\n relation_change, cnt_edge, adj_matrix)\n \n strings_label += string_label\n strings_label_tokens += string_label_tokens\n node_ids += nodes\n edge_ids += edges\n \n if self.topology['relation-relation']:\n adj_matrix = self.relation_to_relation_fill(entity_change, relation_change, adj_matrix)\n \n\n words_label_ids, words_label_tokens, words_input_ids, words_input_tokens = [], '', [], ''\n\n\n input_ids_ar, attn_mask_ar, input_node_ids_ar, input_edge_ids_ar = \\\n self.ar_prep_data(strings_label, self.add_bos_id, self.graph_ids,\n self.text_ids, node_ids, edge_ids)\n\n node_length_ar = max(input_node_ids_ar) + 1\n edge_length_ar = max(input_edge_ids_ar) + 1\n \n\n def masked_fill(src, masked_value, fill_value):\n return [src[src_id] if src[src_id] != masked_value and src[src_id] < fill_value else fill_value for src_id\n in range(len(src))]\n\n input_node_ids_ar, input_edge_ids_ar = masked_fill(input_node_ids_ar, -1, self.args.max_node_length), \\\n masked_fill(input_edge_ids_ar, -1, self.args.max_edge_length)\n\n def masked_fill_matrix(adj_matrix_input, masked_value, fill_value):\n adj_matrix_tmp = copy.deepcopy(adj_matrix_input)\n for a_id in range(len(adj_matrix_tmp)):\n for b_id in range(len(adj_matrix_tmp)):\n if adj_matrix_tmp[a_id][b_id] == masked_value or adj_matrix_tmp[a_id][b_id] > fill_value:\n adj_matrix_tmp[a_id][b_id] = fill_value\n return adj_matrix_tmp\n\n adj_matrix_ar = masked_fill_matrix(adj_matrix, -1, self.args.max_edge_length)\n\n assert len(input_ids_ar) == len(attn_mask_ar) == self.args.max_input_length == len(input_node_ids_ar) == len(\n input_edge_ids_ar)\n\n input_ids_ar = torch.LongTensor(input_ids_ar)\n attn_mask_ar = torch.LongTensor(attn_mask_ar)\n \n input_node_ids_ar = torch.LongTensor(input_node_ids_ar)\n input_edge_ids_ar = torch.LongTensor(input_edge_ids_ar)\n node_length_ar = torch.LongTensor([node_length_ar])\n edge_length_ar = torch.LongTensor([edge_length_ar])\n adj_matrix_ar = torch.LongTensor(adj_matrix_ar)\n \n return input_ids_ar, attn_mask_ar, input_node_ids_ar, node_length_ar, adj_matrix_ar" }, { "identifier": "evaluate_bleu", "path": "GAP/data_relations_as_nodes.py", "snippet": "def evaluate_bleu(data_ref, data_sys):\n coco_eval = run_coco_eval(data_ref, data_sys)\n scores = {metric: score for metric, score in list(coco_eval.eval.items())}\n return scores[\"Bleu_4\"]" }, { "identifier": "get_t_emb_dim", "path": "GAP/data_relations_as_nodes.py", "snippet": "def get_t_emb_dim(args):\n t_emb_dim = int(args.entity_entity)+int(args.entity_relation)\\\n +int(args.relation_entity)+int(args.relation_relation)+1\n return t_emb_dim" }, { "identifier": "GAPBartForConditionalGeneration", "path": "GAP/modeling_gap_type.py", "snippet": "class GAPBartForConditionalGeneration(BartForConditionalGeneration):\n def __init__(self, config, **kwargs):\n super().__init__(config)\n base_model = GAPBartModel(config,**kwargs)\n self.model = base_model\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n \n def forward(self, input_ids, attention_mask=None, encoder_outputs=None,\n decoder_input_ids=None, decoder_attention_mask=None, input_node_ids=None,\n node_length=None, adj_matrix=None, decoder_whole_ids=None, decoder_cached_states=None,\n use_cache=False, is_training=False):\n\n if is_training:\n _decoder_input_ids = shift_tokens_right(decoder_input_ids, self.config.pad_token_id)\n else:\n _decoder_input_ids = decoder_input_ids\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n encoder_outputs=encoder_outputs,\n decoder_input_ids=_decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n input_node_ids=input_node_ids,\n node_length=node_length,\n adj_matrix=adj_matrix,\n decoder_cached_states=decoder_cached_states,\n use_cache=use_cache,\n )\n lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)\n if is_training:\n loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)\n loss = loss_fct(lm_logits.view(-1, self.config.vocab_size),\n decoder_input_ids.view(-1))\n return loss\n return (lm_logits, ) + outputs[1:]\n\n @torch.no_grad()\n def generate(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n max_length: Optional[int] = None,\n min_length: Optional[int] = None,\n do_sample: Optional[bool] = None,\n early_stopping: Optional[bool] = None,\n num_beams: Optional[int] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n repetition_penalty: Optional[float] = None,\n bad_words_ids: Optional[Iterable[int]] = None,\n bos_token_id: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n eos_token_id: Optional[int] = None,\n length_penalty: Optional[float] = None,\n no_repeat_ngram_size: Optional[int] = None,\n num_return_sequences: Optional[int] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n input_node_ids=None,\n node_length=None,\n adj_matrix=None,\n decoder_start_token_id: Optional[int] = None,\n use_cache: Optional[bool] = None,\n **model_specific_kwargs\n ) -> torch.LongTensor:\n r\"\"\" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.\n\n Adapted in part from `Facebook's XLM beam search code`_.\n\n .. _`Facebook's XLM beam search code`:\n https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529\n\n\n Parameters:\n\n input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`\n The sequence used as a prompt for the generation. If `None` the method initializes\n it as an empty `torch.LongTensor` of shape `(1,)`.\n\n max_length: (`optional`) int\n The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.\n\n min_length: (`optional`) int\n The min length of the sequence to be generated. Between 0 and infinity. Default to 0.\n\n do_sample: (`optional`) bool\n If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n early_stopping: (`optional`) bool\n if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n num_beams: (`optional`) int\n Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.\n\n temperature: (`optional`) float\n The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.\n\n top_k: (`optional`) int\n The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.\n\n top_p: (`optional`) float\n The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.\n\n repetition_penalty: (`optional`) float\n The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.\n\n pad_token_id: (`optional`) int\n Padding token. Default to specicic model pad_token_id or None if it does not exist.\n\n bos_token_id: (`optional`) int\n BOS token. Defaults to `bos_token_id` as defined in the models config.\n\n eos_token_id: (`optional`) int\n EOS token. Defaults to `eos_token_id` as defined in the models config.\n\n length_penalty: (`optional`) float\n Exponential penalty to the length. Default to 1.\n\n no_repeat_ngram_size: (`optional`) int\n If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.\n bad_words_ids: (`optional`) list of lists of int\n `bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.\n\n num_return_sequences: (`optional`) int\n The number of independently computed returned sequences for each element in the batch. Default to 1.\n\n attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n Defaults to `None`.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n decoder_start_token_id=None: (`optional`) int\n Start token id for the decoder. Defaults to ``decoder_start_token_id`` as defined the model's config or to the ``bos_token_id``\n if no ``decoder_start_token_id`` is found in the config.\n This is only relevant for encoder-decoder models.\n\n use_cache: (`optional`) bool\n If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.\n\n model_specific_kwargs: (`optional`) dict\n Additional model specific kwargs will be forwarded to the `forward` function of the model.\n\n Return:\n\n output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`\n sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`\n\n Examples::\n\n from transformers import AutoTokenizer, AutoModelForCausalLM\n\n tokenizer = AutoTokenizer. ('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n outputs = model.generate(max_length=40) # do greedy decoding\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # 3 generate sequences using by sampling\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('ctrl') # Download model and configuration from S3 and cache.\n input_context = 'Legal My neighbor is' # \"Legal\" is one of the control codes for ctrl\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('gpt2') # Download model and configuration from S3 and cache.\n input_context = 'My cute dog' # \"Legal\" is one of the control codes for ctrl\n bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated\n \"\"\"\n\n # We cannot generate if the model does not have a LM head\n if self.get_output_embeddings() is None:\n raise AttributeError(\n \"You tried to generate sequences with a model that does not have a LM Head.\"\n \"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )\"\n )\n\n max_length = max_length if max_length is not None else self.config.max_length\n min_length = min_length if min_length is not None else self.config.min_length\n do_sample = do_sample if do_sample is not None else self.config.do_sample\n early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n num_beams = num_beams if num_beams is not None else self.config.num_beams\n temperature = temperature if temperature is not None else self.config.temperature\n top_k = top_k if top_k is not None else self.config.top_k\n top_p = top_p if top_p is not None else self.config.top_p\n repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty\n bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id\n pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id\n length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty\n no_repeat_ngram_size = (\n no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size\n )\n bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids\n num_return_sequences = (\n num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences\n )\n decoder_start_token_id = (\n decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id\n )\n\n if input_ids is not None:\n batch_size = input_ids.shape[0] # overriden by the input batch_size\n else:\n batch_size = 1\n\n assert isinstance(max_length, int) and max_length > 0, \"`max_length` should be a strictly positive integer.\"\n assert isinstance(min_length, int) and min_length >= 0, \"`min_length` should be a positive integer.\"\n assert isinstance(do_sample, bool), \"`do_sample` should be a boolean.\"\n assert isinstance(early_stopping, bool), \"`early_stopping` should be a boolean.\"\n assert isinstance(use_cache, bool), \"`use_cache` should be a boolean.\"\n assert isinstance(num_beams, int) and num_beams > 0, \"`num_beams` should be a strictly positive integer.\"\n assert temperature > 0, \"`temperature` should be strictly positive.\"\n assert isinstance(top_k, int) and top_k >= 0, \"`top_k` should be a positive integer.\"\n assert 0 <= top_p <= 1, \"`top_p` should be between 0 and 1.\"\n assert repetition_penalty >= 1.0, \"`repetition_penalty` should be >= 1.\"\n assert input_ids is not None or (\n isinstance(bos_token_id, int) and bos_token_id >= 0\n ), \"If input_ids is not defined, `bos_token_id` should be a positive integer.\"\n assert pad_token_id is None or (\n isinstance(pad_token_id, int) and (pad_token_id >= 0)\n ), \"`pad_token_id` should be a positive integer.\"\n assert (eos_token_id is None) or (\n isinstance(eos_token_id, int) and (eos_token_id >= 0)\n ), \"`eos_token_id` should be a positive integer.\"\n assert length_penalty > 0, \"`length_penalty` should be strictly positive.\"\n assert (\n isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0\n ), \"`no_repeat_ngram_size` should be a positive integer.\"\n assert (\n isinstance(num_return_sequences, int) and num_return_sequences > 0\n ), \"`num_return_sequences` should be a strictly positive integer.\"\n assert (\n bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)\n ), \"`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated\"\n\n if input_ids is None:\n assert isinstance(bos_token_id, int) and bos_token_id >= 0, (\n \"you should either supply a context to complete as `input_ids` input \"\n \"or a `bos_token_id` (integer >= 0) as a first token to start the generation.\"\n )\n input_ids = torch.full(\n (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,\n )\n else:\n assert input_ids.dim() == 2, \"Input prompt should be of shape (batch_size, sequence length).\"\n\n # not allow to duplicate outputs when greedy decoding\n if do_sample is False:\n if num_beams == 1:\n # no_beam_search greedy generation conditions\n assert (\n num_return_sequences == 1\n ), \"Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1\"\n\n else:\n # beam_search greedy generation conditions\n assert (\n num_beams >= num_return_sequences\n ), \"Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences\"\n\n # create attention mask if necessary\n # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140\n if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):\n attention_mask = input_ids.ne(pad_token_id).long()\n elif attention_mask is None:\n attention_mask = input_ids.new_ones(input_ids.shape)\n\n # set pad_token_id to eos_token_id if not set. Important that this is done after\n # attention_mask is created\n if pad_token_id is None and eos_token_id is not None:\n logger.warning(\n \"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence\".format(eos_token_id)\n )\n pad_token_id = eos_token_id\n\n # current position and vocab size\n if hasattr(self.config, \"vocab_size\"):\n vocab_size = self.config.vocab_size\n elif (\n self.config.is_encoder_decoder\n and hasattr(self.config, \"decoder\")\n and hasattr(self.config.decoder, \"vocab_size\")\n ):\n vocab_size = self.config.decoder.vocab_size\n\n # set effective batch size and effective batch multiplier according to do_sample\n if do_sample:\n effective_batch_size = batch_size * num_return_sequences\n effective_batch_mult = num_return_sequences\n else:\n effective_batch_size = batch_size\n effective_batch_mult = 1\n\n if self.config.is_encoder_decoder:\n if decoder_start_token_id is None:\n decoder_start_token_id = bos_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation\"\n assert hasattr(self, \"get_encoder\"), \"{} should have a 'get_encoder' function defined\".format(self)\n assert callable(self.get_encoder), \"{} should be a method\".format(self.get_encoder)\n\n # get encoder and store encoder outputs\n encoder = self.get_encoder()\n\n # add structural information when encoding\n encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask, input_node_ids=input_node_ids,\n node_length=node_length, adj_matrix=adj_matrix)\n\n # Expand input ids if num_beams > 1 or num_return_sequences > 1\n if num_return_sequences > 1 or num_beams > 1:\n input_ids_len = input_ids.shape[-1]\n input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)\n attention_mask = attention_mask.unsqueeze(1).expand(\n batch_size, effective_batch_mult * num_beams, input_ids_len\n )\n\n input_ids = input_ids.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n attention_mask = attention_mask.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n\n if self.config.is_encoder_decoder:\n # create empty decoder_input_ids\n input_ids = torch.full(\n (effective_batch_size * num_beams, 1),\n decoder_start_token_id,\n dtype=torch.long,\n device=next(self.parameters()).device,\n )\n cur_len = 1\n\n assert (\n batch_size == encoder_outputs[0].shape[0]\n ), f\"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} \"\n\n # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)\n expanded_batch_idxs = (\n torch.arange(batch_size)\n .view(-1, 1)\n .repeat(1, num_beams * effective_batch_mult)\n .view(-1)\n .to(input_ids.device)\n )\n # expand encoder_outputs\n encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])\n\n else:\n encoder_outputs = None\n cur_len = input_ids.shape[-1]\n\n if num_beams > 1:\n output = self._generate_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n early_stopping=early_stopping,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n num_return_sequences=num_return_sequences,\n length_penalty=length_penalty,\n num_beams=num_beams,\n vocab_size=vocab_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n else:\n output = self._generate_no_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n\n return output" }, { "identifier": "GAPBartForConditionalGeneration", "path": "GAP/modeling_gap.py", "snippet": "class GAPBartForConditionalGeneration(BartForConditionalGeneration):\n def __init__(self, config):\n super().__init__(config)\n base_model = GAPBartModel(config)\n self.model = base_model\n self.register_buffer(\"final_logits_bias\", torch.zeros((1, self.model.shared.num_embeddings)))\n\n def forward(self, input_ids, attention_mask=None, encoder_outputs=None,\n decoder_input_ids=None, decoder_attention_mask=None, input_node_ids=None, \n node_length=None, adj_matrix=None, decoder_whole_ids=None, decoder_cached_states=None,\n use_cache=False, is_training=False):\n\n if is_training:\n _decoder_input_ids = shift_tokens_right(decoder_input_ids, self.config.pad_token_id)\n else:\n _decoder_input_ids = decoder_input_ids\n\n outputs = self.model(\n input_ids,\n attention_mask=attention_mask,\n encoder_outputs=encoder_outputs,\n decoder_input_ids=_decoder_input_ids,\n decoder_attention_mask=decoder_attention_mask,\n input_node_ids=input_node_ids,\n node_length=node_length,\n adj_matrix=adj_matrix,\n decoder_cached_states=decoder_cached_states,\n use_cache=use_cache,\n )\n lm_logits = F.linear(outputs[0], self.model.shared.weight, bias=self.final_logits_bias)\n if is_training:\n loss_fct = nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id)\n loss = loss_fct(lm_logits.view(-1, self.config.vocab_size),\n decoder_input_ids.view(-1))\n return loss\n return (lm_logits, ) + outputs[1:]\n\n @torch.no_grad()\n def generate(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n max_length: Optional[int] = None,\n min_length: Optional[int] = None,\n do_sample: Optional[bool] = None,\n early_stopping: Optional[bool] = None,\n num_beams: Optional[int] = None,\n temperature: Optional[float] = None,\n top_k: Optional[int] = None,\n top_p: Optional[float] = None,\n repetition_penalty: Optional[float] = None,\n bad_words_ids: Optional[Iterable[int]] = None,\n bos_token_id: Optional[int] = None,\n pad_token_id: Optional[int] = None,\n eos_token_id: Optional[int] = None,\n length_penalty: Optional[float] = None,\n no_repeat_ngram_size: Optional[int] = None,\n num_return_sequences: Optional[int] = None,\n attention_mask: Optional[torch.LongTensor] = None,\n input_node_ids=None,\n node_length=None,\n adj_matrix=None,\n decoder_start_token_id: Optional[int] = None,\n use_cache: Optional[bool] = None,\n **model_specific_kwargs\n ) -> torch.LongTensor:\n r\"\"\" Generates sequences for models with a LM head. The method currently supports greedy decoding, beam-search decoding, sampling with temperature, sampling with top-k or nucleus sampling.\n\n Adapted in part from `Facebook's XLM beam search code`_.\n\n .. _`Facebook's XLM beam search code`:\n https://github.com/facebookresearch/XLM/blob/9e6f6814d17be4fe5b15f2e6c43eb2b2d76daeb4/src/model/transformer.py#L529\n\n\n Parameters:\n\n input_ids: (`optional`) `torch.LongTensor` of shape `(batch_size, sequence_length)`\n The sequence used as a prompt for the generation. If `None` the method initializes\n it as an empty `torch.LongTensor` of shape `(1,)`.\n\n max_length: (`optional`) int\n The max length of the sequence to be generated. Between `min_length` and infinity. Default to 20.\n\n min_length: (`optional`) int\n The min length of the sequence to be generated. Between 0 and infinity. Default to 0.\n\n do_sample: (`optional`) bool\n If set to `False` greedy decoding is used. Otherwise sampling is used. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n early_stopping: (`optional`) bool\n if set to `True` beam search is stopped when at least `num_beams` sentences finished per batch. Defaults to `False` as defined in `configuration_utils.PretrainedConfig`.\n\n num_beams: (`optional`) int\n Number of beams for beam search. Must be between 1 and infinity. 1 means no beam search. Default to 1.\n\n temperature: (`optional`) float\n The value used to module the next token probabilities. Must be strictly positive. Default to 1.0.\n\n top_k: (`optional`) int\n The number of highest probability vocabulary tokens to keep for top-k-filtering. Between 1 and infinity. Default to 50.\n\n top_p: (`optional`) float\n The cumulative probability of parameter highest probability vocabulary tokens to keep for nucleus sampling. Must be between 0 and 1. Default to 1.\n\n repetition_penalty: (`optional`) float\n The parameter for repetition penalty. Between 1.0 and infinity. 1.0 means no penalty. Default to 1.0.\n\n pad_token_id: (`optional`) int\n Padding token. Default to specicic model pad_token_id or None if it does not exist.\n\n bos_token_id: (`optional`) int\n BOS token. Defaults to `bos_token_id` as defined in the models config.\n\n eos_token_id: (`optional`) int\n EOS token. Defaults to `eos_token_id` as defined in the models config.\n\n length_penalty: (`optional`) float\n Exponential penalty to the length. Default to 1.\n\n no_repeat_ngram_size: (`optional`) int\n If set to int > 0, all ngrams of size `no_repeat_ngram_size` can only occur once.\n bad_words_ids: (`optional`) list of lists of int\n `bad_words_ids` contains tokens that are not allowed to be generated. In order to get the tokens of the words that should not appear in the generated text, use `tokenizer.encode(bad_word, add_prefix_space=True)`.\n\n num_return_sequences: (`optional`) int\n The number of independently computed returned sequences for each element in the batch. Default to 1.\n\n attention_mask (`optional`) obj: `torch.LongTensor` of same shape as `input_ids`\n Mask to avoid performing attention on padding token indices.\n Mask values selected in ``[0, 1]``:\n ``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.\n Defaults to `None`.\n\n `What are attention masks? <../glossary.html#attention-mask>`__\n\n decoder_start_token_id=None: (`optional`) int\n Start token id for the decoder. Defaults to ``decoder_start_token_id`` as defined the model's config or to the ``bos_token_id``\n if no ``decoder_start_token_id`` is found in the config.\n This is only relevant for encoder-decoder models.\n\n use_cache: (`optional`) bool\n If `use_cache` is True, past key values are used to speed up decoding if applicable to model. Defaults to `True`.\n\n model_specific_kwargs: (`optional`) dict\n Additional model specific kwargs will be forwarded to the `forward` function of the model.\n\n Return:\n\n output: `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`\n sequence_length is either equal to max_length or shorter if all batches finished early due to the `eos_token_id`\n\n Examples::\n\n from transformers import AutoTokenizer, AutoModelForCausalLM\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n outputs = model.generate(max_length=40) # do greedy decoding\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('openai-gpt') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('openai-gpt') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, num_beams=5, num_return_sequences=3, temperature=1.5) # generate 3 independent sequences using beam search decoding (5 beams) with sampling from initial context 'The dog'\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('distilgpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('distilgpt2') # Download model and configuration from S3 and cache.\n input_context = 'The dog'\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=40, temperature=0.7, num_return_sequences=3, do_sample=True) # 3 generate sequences using by sampling\n for i in range(3): # 3 output sequences were generated\n print('Generated {}: {}'.format(i, tokenizer.decode(outputs[i], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('ctrl') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('ctrl') # Download model and configuration from S3 and cache.\n input_context = 'Legal My neighbor is' # \"Legal\" is one of the control codes for ctrl\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=50, temperature=0.7, repetition_penalty=1.2) # generate sequences\n print('Generated: {}'.format(tokenizer.decode(outputs[0], skip_special_tokens=True)))\n\n tokenizer = AutoTokenizer.from_pretrained('gpt2') # Initialize tokenizer\n model = AutoModelForCausalLM.from_pretrained('gpt2') # Download model and configuration from S3 and cache.\n input_context = 'My cute dog' # \"Legal\" is one of the control codes for ctrl\n bad_words_ids = [tokenizer.encode(bad_word, add_prefix_space=True) for bad_word in ['idiot', 'stupid', 'shut up']]\n input_ids = tokenizer.encode(input_context, return_tensors='pt') # encode input context\n outputs = model.generate(input_ids=input_ids, max_length=100, do_sample=True, bad_words_ids=bad_words_ids) # generate sequences without allowing bad_words to be generated\n \"\"\"\n\n # We cannot generate if the model does not have a LM head\n if self.get_output_embeddings() is None:\n raise AttributeError(\n \"You tried to generate sequences with a model that does not have a LM Head.\"\n \"Please use another model class (e.g. `OpenAIGPTLMHeadModel`, `XLNetLMHeadModel`, `GPT2LMHeadModel`, `CTRLLMHeadModel`, `T5WithLMHeadModel`, `TransfoXLLMHeadModel`, `XLMWithLMHeadModel`, `BartForConditionalGeneration` )\"\n )\n\n max_length = max_length if max_length is not None else self.config.max_length\n min_length = min_length if min_length is not None else self.config.min_length\n do_sample = do_sample if do_sample is not None else self.config.do_sample\n early_stopping = early_stopping if early_stopping is not None else self.config.early_stopping\n use_cache = use_cache if use_cache is not None else self.config.use_cache\n num_beams = num_beams if num_beams is not None else self.config.num_beams\n temperature = temperature if temperature is not None else self.config.temperature\n top_k = top_k if top_k is not None else self.config.top_k\n top_p = top_p if top_p is not None else self.config.top_p\n repetition_penalty = repetition_penalty if repetition_penalty is not None else self.config.repetition_penalty\n bos_token_id = bos_token_id if bos_token_id is not None else self.config.bos_token_id\n pad_token_id = pad_token_id if pad_token_id is not None else self.config.pad_token_id\n eos_token_id = eos_token_id if eos_token_id is not None else self.config.eos_token_id\n length_penalty = length_penalty if length_penalty is not None else self.config.length_penalty\n no_repeat_ngram_size = (\n no_repeat_ngram_size if no_repeat_ngram_size is not None else self.config.no_repeat_ngram_size\n )\n bad_words_ids = bad_words_ids if bad_words_ids is not None else self.config.bad_words_ids\n num_return_sequences = (\n num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences\n )\n decoder_start_token_id = (\n decoder_start_token_id if decoder_start_token_id is not None else self.config.decoder_start_token_id\n )\n\n if input_ids is not None:\n batch_size = input_ids.shape[0] # overriden by the input batch_size\n else:\n batch_size = 1\n\n assert isinstance(max_length, int) and max_length > 0, \"`max_length` should be a strictly positive integer.\"\n assert isinstance(min_length, int) and min_length >= 0, \"`min_length` should be a positive integer.\"\n assert isinstance(do_sample, bool), \"`do_sample` should be a boolean.\"\n assert isinstance(early_stopping, bool), \"`early_stopping` should be a boolean.\"\n assert isinstance(use_cache, bool), \"`use_cache` should be a boolean.\"\n assert isinstance(num_beams, int) and num_beams > 0, \"`num_beams` should be a strictly positive integer.\"\n assert temperature > 0, \"`temperature` should be strictly positive.\"\n assert isinstance(top_k, int) and top_k >= 0, \"`top_k` should be a positive integer.\"\n assert 0 <= top_p <= 1, \"`top_p` should be between 0 and 1.\"\n assert repetition_penalty >= 1.0, \"`repetition_penalty` should be >= 1.\"\n assert input_ids is not None or (\n isinstance(bos_token_id, int) and bos_token_id >= 0\n ), \"If input_ids is not defined, `bos_token_id` should be a positive integer.\"\n assert pad_token_id is None or (\n isinstance(pad_token_id, int) and (pad_token_id >= 0)\n ), \"`pad_token_id` should be a positive integer.\"\n assert (eos_token_id is None) or (\n isinstance(eos_token_id, int) and (eos_token_id >= 0)\n ), \"`eos_token_id` should be a positive integer.\"\n assert length_penalty > 0, \"`length_penalty` should be strictly positive.\"\n assert (\n isinstance(no_repeat_ngram_size, int) and no_repeat_ngram_size >= 0\n ), \"`no_repeat_ngram_size` should be a positive integer.\"\n assert (\n isinstance(num_return_sequences, int) and num_return_sequences > 0\n ), \"`num_return_sequences` should be a strictly positive integer.\"\n assert (\n bad_words_ids is None or isinstance(bad_words_ids, list) and isinstance(bad_words_ids[0], list)\n ), \"`bad_words_ids` is either `None` or a list of lists of tokens that should not be generated\"\n\n if input_ids is None:\n assert isinstance(bos_token_id, int) and bos_token_id >= 0, (\n \"you should either supply a context to complete as `input_ids` input \"\n \"or a `bos_token_id` (integer >= 0) as a first token to start the generation.\"\n )\n input_ids = torch.full(\n (batch_size, 1), bos_token_id, dtype=torch.long, device=next(self.parameters()).device,\n )\n else:\n assert input_ids.dim() == 2, \"Input prompt should be of shape (batch_size, sequence length).\"\n\n # not allow to duplicate outputs when greedy decoding\n if do_sample is False:\n if num_beams == 1:\n # no_beam_search greedy generation conditions\n assert (\n num_return_sequences == 1\n ), \"Greedy decoding will always produce the same output for num_beams == 1 and num_return_sequences > 1. Please set num_return_sequences = 1\"\n\n else:\n # beam_search greedy generation conditions\n assert (\n num_beams >= num_return_sequences\n ), \"Greedy beam search decoding cannot return more sequences than it has beams. Please set num_beams >= num_return_sequences\"\n\n # create attention mask if necessary\n # TODO (PVP): this should later be handled by the forward fn() in each model in the future see PR 3140\n if (attention_mask is None) and (pad_token_id is not None) and (pad_token_id in input_ids):\n attention_mask = input_ids.ne(pad_token_id).long()\n elif attention_mask is None:\n attention_mask = input_ids.new_ones(input_ids.shape)\n\n # set pad_token_id to eos_token_id if not set. Important that this is done after\n # attention_mask is created\n if pad_token_id is None and eos_token_id is not None:\n logger.warning(\n \"Setting `pad_token_id` to {} (first `eos_token_id`) to generate sequence\".format(eos_token_id)\n )\n pad_token_id = eos_token_id\n\n # current position and vocab size\n if hasattr(self.config, \"vocab_size\"):\n vocab_size = self.config.vocab_size\n elif (\n self.config.is_encoder_decoder\n and hasattr(self.config, \"decoder\")\n and hasattr(self.config.decoder, \"vocab_size\")\n ):\n vocab_size = self.config.decoder.vocab_size\n\n # set effective batch size and effective batch multiplier according to do_sample\n if do_sample:\n effective_batch_size = batch_size * num_return_sequences\n effective_batch_mult = num_return_sequences\n else:\n effective_batch_size = batch_size\n effective_batch_mult = 1\n\n if self.config.is_encoder_decoder:\n if decoder_start_token_id is None:\n decoder_start_token_id = bos_token_id\n\n assert (\n decoder_start_token_id is not None\n ), \"decoder_start_token_id or bos_token_id has to be defined for encoder-decoder generation\"\n assert hasattr(self, \"get_encoder\"), \"{} should have a 'get_encoder' function defined\".format(self)\n assert callable(self.get_encoder), \"{} should be a method\".format(self.get_encoder)\n\n # get encoder and store encoder outputs\n encoder = self.get_encoder()\n\n # add structural information when encoding\n encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask, input_node_ids=input_node_ids,\n node_length=node_length, adj_matrix=adj_matrix)\n\n # Expand input ids if num_beams > 1 or num_return_sequences > 1\n if num_return_sequences > 1 or num_beams > 1:\n input_ids_len = input_ids.shape[-1]\n input_ids = input_ids.unsqueeze(1).expand(batch_size, effective_batch_mult * num_beams, input_ids_len)\n attention_mask = attention_mask.unsqueeze(1).expand(\n batch_size, effective_batch_mult * num_beams, input_ids_len\n )\n\n input_ids = input_ids.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n attention_mask = attention_mask.contiguous().view(\n effective_batch_size * num_beams, input_ids_len\n ) # shape: (batch_size * num_return_sequences * num_beams, cur_len)\n\n if self.config.is_encoder_decoder:\n # create empty decoder_input_ids\n input_ids = torch.full(\n (effective_batch_size * num_beams, 1),\n decoder_start_token_id,\n dtype=torch.long,\n device=next(self.parameters()).device,\n )\n cur_len = 1\n\n assert (\n batch_size == encoder_outputs[0].shape[0]\n ), f\"expected encoder_outputs[0] to have 1st dimension bs={batch_size}, got {encoder_outputs[0].shape[0]} \"\n\n # expand batch_idx to assign correct encoder output for expanded input_ids (due to num_beams > 1 and num_return_sequences > 1)\n expanded_batch_idxs = (\n torch.arange(batch_size)\n .view(-1, 1)\n .repeat(1, num_beams * effective_batch_mult)\n .view(-1)\n .to(input_ids.device)\n )\n # expand encoder_outputs\n encoder_outputs = (encoder_outputs[0].index_select(0, expanded_batch_idxs), *encoder_outputs[1:])\n\n else:\n encoder_outputs = None\n cur_len = input_ids.shape[-1]\n\n if num_beams > 1:\n output = self._generate_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n early_stopping=early_stopping,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n num_return_sequences=num_return_sequences,\n length_penalty=length_penalty,\n num_beams=num_beams,\n vocab_size=vocab_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n else:\n output = self._generate_no_beam_search(\n input_ids,\n cur_len=cur_len,\n max_length=max_length,\n min_length=min_length,\n do_sample=do_sample,\n temperature=temperature,\n top_k=top_k,\n top_p=top_p,\n repetition_penalty=repetition_penalty,\n no_repeat_ngram_size=no_repeat_ngram_size,\n bad_words_ids=bad_words_ids,\n pad_token_id=pad_token_id,\n eos_token_id=eos_token_id,\n batch_size=effective_batch_size,\n encoder_outputs=encoder_outputs,\n attention_mask=attention_mask,\n use_cache=use_cache,\n model_specific_kwargs=model_specific_kwargs,\n )\n\n return output" } ]
import os import json import numpy as np import pandas as pd import torch import random from collections import defaultdict from transformers import BartTokenizer, T5Tokenizer from transformers import AdamW, get_linear_schedule_with_warmup from utils import * from scoring.fluency_scorer import FluencyScorer from scoring.saliency_scorer import SaliencyBERTScore from scoring.simplicity_scorer import SimplicityTextScore from scoring.guardrails import * from scoring.aggregate_scorer import ScorerWrapper from GAP.data_relations_as_nodes import GAPDataloader, EventDataset, WebNLGDataset from GAP.data_relations_as_nodes import evaluate_bleu, get_t_emb_dim from tqdm import tqdm, trange from rake_nltk import Rake from evaluate import load from sentence_similarity import sentence_similarity from GAP.modeling_gap_type import GAPBartForConditionalGeneration as GAP_Type_model from GAP.modeling_gap import GAPBartForConditionalGeneration as GAP_model
21,357
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args) model = GAP_Type_model.from_pretrained(checkpoint,t_emb_dim=t_emb_dim) else:
# import yake bertscore = load("bertscore") ## sentence model for merge phrase_model = sentence_similarity(model_name='distilbert-base-uncased',embedding_type='cls_token_embedding') ## for sentence checking ner_check = NERInaccuracyPenalty() def run(args, logger): #load in model for graph-to-text and tokenizer checkpoint = args.model_path tokenizer_path = args.tokenizer_path tokenizer = BartTokenizer.from_pretrained(tokenizer_path) n_gpu = torch.cuda.device_count() if n_gpu > 0: torch.cuda.manual_seed_all(args.seed) if args.type_encoding: t_emb_dim = get_t_emb_dim(args) model = GAP_Type_model.from_pretrained(checkpoint,t_emb_dim=t_emb_dim) else:
model = GAP_model.from_pretrained(checkpoint)
8
2023-10-24 13:24:23+00:00
24k
S-LoRA/S-LoRA
slora/server/router/manager.py
[ { "identifier": "SamplingParams", "path": "slora/server/sampling_params.py", "snippet": "class SamplingParams:\n\n def __init__(\n self,\n do_sample: bool = False,\n presence_penalty: float = 0.0,\n frequency_penalty: float = 0.0,\n temperature: float = 1.0,\n top_p: float = 1.0,\n top_k: int = -1, # -1 is for all \n ignore_eos: bool = False,\n max_new_tokens: int = 16,\n stop_sequences: Optional[Union[str, List[str]]] = None # 停止句子条件\n ) -> None:\n self.do_sample = do_sample\n self.presence_penalty = presence_penalty\n self.frequency_penalty = frequency_penalty\n self.temperature = temperature\n self.top_p = top_p\n self.top_k = top_k\n self.ignore_eos = ignore_eos\n self.max_new_tokens = max_new_tokens\n self.stop_sequences = stop_sequences\n if self.do_sample == False:\n self.temperature = 1.0\n self.top_p = 1.0\n self.top_k = 1\n if self.temperature >= 0.0 and self.temperature < _SAMPLING_EPS: # temperature is too slow, change to greedy search\n self.temperature = 1.0\n self.top_k = 1\n return\n \n def verify(self):\n if self.presence_penalty < 0.0:\n raise ValueError(f\"presence_penalty must >= 0.0, got {self.presence_penalty}\")\n if self.frequency_penalty < 0.0:\n raise ValueError(f\"frequency_penalty must >= 0.0, got {self.frequency_penalty}\")\n if self.temperature <= 0.0:\n raise ValueError(f\"temperature must > 0.0, got {self.temperature}\")\n if self.top_p <= 0.0 or self.top_p > 1.0:\n raise ValueError(f\"top_p must in (0.0, 1.0], got {self.top_p}\")\n if self.top_k < -1 or self.top_k == 0:\n raise ValueError(f\"top_k must be -1 (disable), or at least 1, got {self.top_k}.\")\n if self.max_new_tokens < 1:\n raise ValueError(f\"max_new_tokens must be at least 1 , got {self.max_new_tokens}.\")\n return\n\n def stop_sentences_to_token_ids(self, tokenizer):\n if self.stop_sequences is None:\n self.stop_sequences = []\n else:\n if isinstance(self.stop_sequences, str):\n self.stop_sequences = [self.stop_sequences]\n new_stop_sequences = []\n for stop_str in self.stop_sequences:\n stop_str_ids = tokenizer.encode(stop_str)\n if stop_str_ids is not None and len(stop_str_ids) >= 1: # remove bos_token_id\n stop_str_ids = stop_str_ids[1:]\n if len(stop_str_ids) > 0:\n new_stop_sequences.append(stop_str_ids)\n self.stop_sequences = new_stop_sequences\n return\n \n def to_dict(self):\n ret = {}\n ret[\"do_sample\"] = self.do_sample\n ret[\"presence_penalty\"] = self.presence_penalty\n ret[\"frequency_penalty\"] = self.frequency_penalty\n ret[\"temperature\"] = self.temperature\n ret[\"top_p\"] = self.top_p\n ret[\"top_k\"] = self.top_k\n # if self.ignore_eos is not None:\n # ret[\"ignore_eos\"] = self.ignore_eos\n # if self.max_tokens is not None:\n # ret[\"max_tokens\"] = self.max_tokens\n return ret" }, { "identifier": "Req", "path": "slora/server/io_struct.py", "snippet": "class Req:\n def __init__(self, adapter_dir, request_id, prompt_ids, sample_params: SamplingParams):\n self.adapter_dir = adapter_dir\n self.request_id = request_id\n self.prompt_ids = prompt_ids\n self.input_len = len(prompt_ids)\n self.max_output_len = sample_params.max_new_tokens\n self.sample_params = sample_params\n self.output_ids = []\n self.output_metadata_list = []\n self.has_generate_finished = False\n self.aborted = False\n\n def to_rpc_obj(self):\n return {\"adapter_dir\": self.adapter_dir,\n \"request_id\": self.request_id,\n \"input_id\": self.prompt_ids,\n \"output_len\": self.max_output_len,\n \"sampling_param\": self.sample_params.to_dict() }\n\n def to_req_detokenization_state(self):\n out = ReqDetokenizationState(self.request_id, self.prompt_ids, self.max_output_len, self.sample_params.ignore_eos)\n if self.output_metadata_list:\n out.gen_metadata.update(self.output_metadata_list[-1])\n return out\n \n def stop_sequences_matched(self):\n for stop_token_ids in self.sample_params.stop_sequences:\n stop_len = len(stop_token_ids)\n if stop_len > 0:\n if len(self.output_ids) >= stop_len:\n if all(self.output_ids[-(stop_len - i)] == stop_token_ids[i] for i in range(stop_len)):\n return True\n return False\n\n def __repr__(self):\n return (f\"request_id(n={self.request_id}, \"\n f\"adapter_dir={self.adapter_dir}, \")\n # f\"prompt_ids={self.prompt_ids}, \")" }, { "identifier": "Batch", "path": "slora/server/io_struct.py", "snippet": "class Batch:\n def __init__(self, batch_id, reqs: List[Req]):\n self.batch_id = batch_id\n self.reqs = reqs\n self.id_to_reqs = {req.request_id: req for req in reqs}\n\n self.adapter_dirs = set()\n for req in reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def input_tokens(self):\n batch_input_tokens = 0\n for req in self.reqs:\n batch_input_tokens += req.input_len\n return batch_input_tokens\n\n def calcu_max_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + req.max_output_len\n return tokens\n \n def calcu_used_tokens(self):\n tokens = 0\n for req in self.reqs:\n tokens += req.input_len + len(req.output_ids)\n return tokens\n\n def mark_finished_req(self, eos_id):\n has_new_finish = False\n for req in self.reqs:\n if req.stop_sequences_matched():\n req.has_generate_finished = True\n has_new_finish = True\n if req.output_ids[-1] == eos_id and req.sample_params.ignore_eos == False:\n req.has_generate_finished = True\n has_new_finish = True\n if len(req.output_ids) >= req.max_output_len or req.aborted:\n req.has_generate_finished = True\n has_new_finish = True\n return has_new_finish\n\n def filter_finished(self):\n unfinished_req = []\n for req in self.reqs:\n if not req.has_generate_finished:\n unfinished_req.append(req)\n self.reqs = unfinished_req\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n\n self.adapter_dirs = set()\n for req in self.reqs:\n self.adapter_dirs.add(req.adapter_dir)\n\n def is_clear(self):\n return len(self.reqs) == 0\n\n def merge(self, mini_batch):\n for _req in mini_batch.reqs:\n self.reqs.append(_req)\n self.adapter_dirs.add(_req.adapter_dir)\n self.id_to_reqs = {req.request_id: req for req in self.reqs}\n return\n\n def __repr__(self):\n return (f\"batch_id={self.batch_id}, \"\n # f\"reqs={self.reqs}, \"\n f\"req_ids={self.id_to_reqs.keys()}\")" }, { "identifier": "BatchAbortReq", "path": "slora/server/io_struct.py", "snippet": "class BatchAbortReq:\n def __init__(self, req_ids):\n self.reqs: List[str] = req_ids" }, { "identifier": "start_model_process", "path": "slora/server/router/model_infer/model_rpc.py", "snippet": "async def start_model_process(port, world_size):\n # 单卡时不使用 rpc\n if world_size == 1:\n return ModelRpcClient(ModelRpcServer(), world_size)\n \n import multiprocessing\n proc = multiprocessing.Process(target=_init_env, args=(port,))\n proc.start()\n await asyncio.sleep(2)\n repeat_count = 0\n while repeat_count < 20:\n try:\n con = rpyc.connect(\"localhost\", port, config={\"allow_pickle\": True})\n break\n except BaseException:\n await asyncio.sleep(1)\n repeat_count += 1\n if repeat_count == 20:\n raise Exception(\"init rpc env error!\")\n\n assert proc.is_alive()\n return ModelRpcClient(con.root, world_size, rpc_server_process=proc)" }, { "identifier": "ModelRpcClient", "path": "slora/server/router/model_infer/model_rpc.py", "snippet": "class ModelRpcClient:\n def __init__(self, model_rpc, world_size, rpc_server_process=None):\n self.model: ModelRpcServer = model_rpc\n self.world_size = world_size\n self.rpc_server_process = rpc_server_process\n self.use_rpc = self.world_size != 1\n if self.use_rpc:\n def async_wrap(f):\n f = rpyc.async_(f)\n async def _func(*args, **kwargs):\n ans = f(*args, **kwargs)\n await asyncio.to_thread(ans.wait)\n # raise if exception\n return ans.value\n return _func\n self._init_model = async_wrap(self.model.init_model)\n self._load_adapters = rpyc.async_(self.model.load_adapters)\n self._offload_adapters = rpyc.async_(self.model.offload_adapters)\n self._unmerge_adapter = rpyc.async_(self.model.unmerge_adapter)\n self._merge_adapter = rpyc.async_(self.model.merge_adapter)\n self._add_batch = async_wrap(self.model.add_batch)\n self._prefill_batch = async_wrap(self.model.prefill_batch)\n self._decode_batch = async_wrap(self.model.decode_batch)\n self._filter_batch = async_wrap(self.model.filter_batch)\n self._merge_batch = async_wrap(self.model.merge_batch)\n self._remove_batch = async_wrap(self.model.remove_batch)\n self._profile_prefill = async_wrap(self.model.profile_prefill)\n else:\n self._init_model = self.model.exposed_init_model\n self._load_adapters = self.model.exposed_load_adapters\n self._offload_adapters = self.model.exposed_offload_adapters\n self._merge_adapter = self.model.exposed_merge_adapter\n self._unmerge_adapter = self.model.exposed_unmerge_adapter\n self._add_batch = self.model.exposed_add_batch\n self._prefill_batch = self.model.exposed_prefill_batch\n self._decode_batch = self.model.exposed_decode_batch\n self._filter_batch = self.model.exposed_filter_batch\n self._merge_batch = self.model.exposed_merge_batch\n self._remove_batch = self.model.exposed_remove_batch\n self._profile_prefill = self.model.exposed_profile_prefill\n return\n\n async def init_model(self, rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t prefetch_stream):\n ans : rpyc.AsyncResult = self._init_model(rank_id, world_size, weight_dir, adapter_dirs,\n max_total_token_num, load_way, mode, input_params,\n\t\t\t\t\t\t prefetch_stream)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n\n async def load_adapters(self, reqs, prefetch=False):\n self._load_adapters(reqs, prefetch=prefetch)\n\n\n async def offload_adapters(self, reserved_reqs=None, prefetch=False):\n self._offload_adapters(reserved_reqs, prefetch=prefetch)\n \n async def unmerge_adapter(self):\n self._unmerge_adapter()\n \n async def merge_adapter(self):\n self._merge_adapter()\n\n\n async def init_batch(self, batch_id, reqs):\n ans = self._add_batch(batch_id, reqs, \"fp16\")\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def prefill_batch(self, batch_id):\n ans = self._prefill_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def decode_batch(self, batch_id):\n ans = self._decode_batch(batch_id)\n if self.use_rpc:\n return await ans\n else:\n return ans\n\n async def filter_batch(self, batch_id, req_id_list):\n ans = self._filter_batch(batch_id, req_id_list)\n if self.use_rpc:\n await ans\n return\n else:\n return \n\n async def merge_batch(self, batch_id1, batch_id2):\n ans = self._merge_batch(batch_id1, batch_id2)\n if self.use_rpc:\n await ans\n return\n else:\n return\n\n async def remove_batch(self, batch_id):\n ans = self._remove_batch(batch_id)\n if self.use_rpc:\n await ans\n return\n else:\n return\n \n async def profile_prefill(self):\n ans = self._profile_prefill()\n if self.use_rpc:\n return await ans\n else:\n return ans" }, { "identifier": "ReqQueue", "path": "slora/server/router/req_queue.py", "snippet": "class ReqQueue:\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n self.max_total_tokens = max_total_tokens\n assert batch_max_tokens is not None\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n self.waiting_req_list: List[Req] = []\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n \n def update_counter(self, req):\n pass \n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "calculate_time", "path": "slora/utils/infer_utils.py", "snippet": "def calculate_time(show=False, min_cost_ms=0.0):\n def wrapper(func):\n def inner_func(*args, **kwargs):\n torch.cuda.synchronize()\n if show:\n start_time = time.time()\n result = func(*args, **kwargs)\n torch.cuda.synchronize()\n if show:\n cost_time = (time.time() - start_time) * 1000\n if cost_time > min_cost_ms:\n print(f\"Function {func.__name__} took {cost_time} ms to run.\")\n return result\n\n return inner_func\n\n return wrapper" }, { "identifier": "BatchTokenIdOut", "path": "slora/server/io_struct.py", "snippet": "class BatchTokenIdOut:\n def __init__(self):\n self.reqs_infs: List[Tuple[str, int, Dict, bool, bool]] = [] # [req_id, new_token_id, gen_metadata, finished_state, abort_state]" }, { "identifier": "AbortReq", "path": "slora/server/io_struct.py", "snippet": "class AbortReq:\n def __init__(self, req_id):\n self.req_id = req_id" }, { "identifier": "Stats", "path": "slora/server/router/stats.py", "snippet": "class Stats:\n\n def __init__(self, log_status, log_stats_interval) -> None:\n self.log_stats = log_status\n self.log_stats_interval = log_stats_interval\n self.last_log_time = time.time()\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n return\n \n def count_prompt_tokens(self, run_batch):\n if self.log_stats:\n tokens = run_batch.input_tokens()\n self.prompt_tokens += tokens\n self.all_tokens += tokens\n return\n \n def count_output_tokens(self, run_batch):\n if self.log_stats:\n tokens = len(run_batch.reqs)\n self.output_tokens += tokens\n self.all_tokens += tokens\n return\n\n def print_stats(self):\n if not self.log_stats:\n return\n\n now = time.time()\n if now - self.last_log_time > self.log_stats_interval:\n print(f\"Avg tokens(prompt+generate) throughput: {self.all_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg prompt tokens throughput: {self.prompt_tokens/(now-self.last_log_time):8.3f} tokens/s\\n\"\n f\"Avg generate tokens throughput: {self.output_tokens/(now-self.last_log_time):8.3f} tokens/s\")\n self.all_tokens = 0\n self.output_tokens = 0\n self.prompt_tokens = 0\n self.last_log_time = now\n return" }, { "identifier": "InputParams", "path": "slora/server/input_params.py", "snippet": "class InputParams:\n\n def __init__(\n self,\n max_req_total_len,\n # kv cache manager parameters\n max_total_token_num,\n pool_size_lora,\n batch_max_tokens,\n running_max_req_size,\n # mem_ratio,\n # adapter_ratio,\n # heuristic\n swap,\n prefetch,\n prefetch_size,\n scheduler,\n profile,\n batch_num_adapters,\n enable_abort,\n # kernel,\n # # debug\n dummy,\n no_lora_compute,\n no_lora_swap,\n # no_lora_copy,\n no_kernel,\n no_mem_pool,\n bmm,\n no_lora,\n # fairness\n fair_weights,\n ) -> None:\n self.max_req_total_len = max_req_total_len\n self.max_total_token_num = max_total_token_num\n self.pool_size_lora = pool_size_lora\n self.batch_max_tokens = batch_max_tokens\n self.running_max_req_size = running_max_req_size\n # self.mem_ratio = mem_ratio\n # self.adapter_ratio = adapter_ratio\n\n self.swap = swap\n self.prefetch = prefetch\n self.prefetch_size = prefetch_size\n self.scheduler = scheduler\n self.profile = profile\n self.batch_num_adapters = batch_num_adapters\n self.enable_abort = enable_abort\n # self.kernel = kernel\n\n self.dummy = dummy\n self.no_lora_compute = no_lora_compute\n self.no_lora_swap = no_lora_swap\n # self.no_lora_copy = no_lora_copy\n self.no_kernel = no_kernel\n self.no_mem_pool = no_mem_pool\n self.bmm = bmm\n self.no_lora = no_lora\n \n self.fair_weights = fair_weights\n return" }, { "identifier": "get_lora_config", "path": "slora/models/peft/lora_adapter.py", "snippet": "def get_lora_config(lora_dir, dummy):\n if dummy:\n return get_lora_config_json(lora_dir), lora_dir\n else:\n lora_dir = re.sub(r'-(\\d+)$', '', lora_dir)\n return hf_load_config(lora_dir)" }, { "identifier": "AlphaModel", "path": "slora/server/router/profiler.py", "snippet": "class AlphaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n print(self.base_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n\n def get_latency(self, batch_size, seq_len):\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n return (self.base_prefill[batch_size - 1][seq_len] + self.base_prefill[batch_size + 1][seq_len]) / 2\n else:\n return np.Inf" }, { "identifier": "BetaModel", "path": "slora/server/router/profiler.py", "snippet": "class BetaModel:\n def __init__(self, profiling_results) -> None:\n self.base_prefill = profiling_results[0]\n self.adapter_prefill = profiling_results[1]\n print(self.adapter_prefill)\n \n # load from .pkl file\n @classmethod\n def from_file(cls, file_path):\n with open(file_path, \"rb\") as f:\n results = pickle.load(f)\n return cls(results)\n \n def get_latency(self, rank_size, batch_size, seq_len):\n if rank_size == 0: return 0\n seq_len = math.ceil(seq_len / 32) * 32\n assert seq_len <= 1024\n if batch_size == 0: return 0\n # assert batch_size in self.base_prefill\n if batch_size in self.base_prefill:\n return self.adapter_prefill[rank_size][batch_size][seq_len] - self.base_prefill[batch_size][seq_len]\n elif batch_size == 1 and 2 in self.base_prefill:\n return self.adapter_prefill[rank_size][2][seq_len] - self.base_prefill[2][seq_len]\n elif batch_size % 2 != 0 and batch_size - 1 in self.base_prefill and batch_size + 1 in self.base_prefill:\n a = self.adapter_prefill[rank_size][batch_size - 1][seq_len] - self.base_prefill[batch_size - 1][seq_len]\n b = self.adapter_prefill[rank_size][batch_size + 1][seq_len] - self.base_prefill[batch_size + 1][seq_len]\n return (a + b) / 2\n else:\n return np.Inf" }, { "identifier": "AbortReqQueue", "path": "slora/server/router/abort_req_queue.py", "snippet": "class AbortReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.abort_req_list: List[str] = []\n self.req_time_stamp = []\n self.init_bs = 1\n self.apprx_req_rate = 1\n self.apprx_bs = self.init_bs\n self.last_req_num = 0\n self.last_time = time.time()\n \n def append(self, req):\n self.waiting_req_list.insert(0, req)\n self.req_time_stamp.insert(0, time.time())\n assert len(self.waiting_req_list) == len(self.req_time_stamp)\n return\n\n def reset_abort_list(self):\n self.abort_req_list = []\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n self.apprx_req_rate = int(0.7 * (len(self.waiting_req_list) - self.last_req_num) + 0.3 * self.apprx_req_rate)\n for i, req in enumerate(self.waiting_req_list):\n if attainment_func(time.time() - self.req_time_stamp[i] + 0.5) == 0:\n req.aborted = True\n aborted_count += 1\n abort_list.append(req)\n self.abort_req_list.append(req.request_id)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in abort_list]\n \n if self.apprx_req_rate >= self.apprx_bs:\n print(\"apprx bs\", self.apprx_bs, \"req rate\", self.apprx_req_rate)\n # choose from the latest requests\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n elif self.apprx_req_rate < self.apprx_bs:\n # choose from the earliest requests\n for req in reversed(self.waiting_req_list):\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n \n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.req_time_stamp = [self.req_time_stamp[i] for i in range(len(self.req_time_stamp)) if self.waiting_req_list[i] not in can_run_list and self.waiting_req_list[i] not in abort_list]\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n self.last_req_num = len(self.waiting_req_list)\n self.apprx_bs = max(int(0.7 * len(new_batch.reqs) + 0.3 * self.apprx_bs), self.init_bs)\n return new_batch\n else:\n return None" }, { "identifier": "ClusterReqQueue", "path": "slora/server/router/cluster_req_queue.py", "snippet": "class ClusterReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size, batch_num_adapters) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.batch_num_adapters = batch_num_adapters\n\n def _generate_new_batch_prioritizing_existing_adapters(self, current_batch:Batch, lora_ranks: dict[str, int]):\n filtered_waiting_req_list = list(filter(lambda req: req.adapter_dir in current_batch.adapter_dirs, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n can_run_list = []\n new_batch_total_tokens = 0\n for idx, req in enumerate(filtered_waiting_req_list):\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n request_ids_to_remove_from_waiting_queue = set()\n\n # If filtered waiting list was not enough to max-out the current running batch, we resolve to FIFO\n for req in self.waiting_req_list:\n if req.aborted:\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n continue\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n request_ids_to_remove_from_waiting_queue.add(req.request_id)\n else:\n break\n \n self.waiting_req_list = list(filter(lambda req: req.request_id not in request_ids_to_remove_from_waiting_queue, self.waiting_req_list))\n\n return can_run_list\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n\n for req in self.waiting_req_list:\n if req.aborted:\n aborted_count += 1\n continue\n\n if current_batch is not None and len(current_batch.adapter_dirs) >= self.batch_num_adapters:\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n rest_of_batch = self._generate_new_batch_prioritizing_existing_adapters(current_batch, lora_ranks)\n can_run_list += rest_of_batch\n if len(can_run_list) != 0:\n return Batch(uuid.uuid4().hex, can_run_list)\n else:\n return None\n\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = self.waiting_req_list[len(can_run_list) + aborted_count:]\n return new_batch\n else:\n return None" }, { "identifier": "VTCReqQueue", "path": "slora/server/router/vtc_req_queue.py", "snippet": "class VTCReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size,\n adapter_dirs, fair_weights,\n input_price=1, output_price=2) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.input_price = input_price\n self.output_price = output_price\n self.served = {}\n self.user_req_list = {}\n\n self.adapter_dirs = adapter_dirs\n self.fair_weights = fair_weights\n\n self.fairw = {}\n for i in range(len(adapter_dirs)):\n if i < len(fair_weights):\n self.fairw[adapter_dirs[i]] = fair_weights[i]\n else:\n self.fairw[adapter_dirs[i]] = 1\n \n \n def append(self, req):\n self.waiting_req_list.append(req)\n if req.adapter_dir not in self.user_req_list:\n self.user_req_list[req.adapter_dir] = deque([req])\n self.served[req.adapter_dir] = 0\n else:\n self.user_req_list[req.adapter_dir].append(req)\n\n # waiting queue was empty before\n if len(self.user_req_list[req.adapter_dir]) == 1:\n # lift counter\n cnts = [v for k, v in self.served.items()\n if (len(self.user_req_list[k]) > 0 and k != req.adapter_dir)]\n if len(cnts) > 0:\n self.served[req.adapter_dir] = max(self.served[req.adapter_dir], min(cnts))\n\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n if len(self.served) == 0:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n active_served = {k: v for k, v in self.served.items()}\n while True:\n if len(active_served) == 0:\n break\n adapter_dir = min(active_served, key=active_served.get)\n if len(self.user_req_list[adapter_dir]) > 0:\n req = self.user_req_list[adapter_dir][0]\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n self.user_req_list[adapter_dir].popleft()\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n self.user_req_list[adapter_dir].popleft()\n # update fairness counter\n self.served[adapter_dir] += req.input_len * self.input_price / self.fairw[adapter_dir]\n active_served[adapter_dir] += req.input_len * self.input_price / self.fairw[adapter_dir]\n else:\n break\n else:\n del active_served[adapter_dir]\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list\n if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n \n def update_counter(self, current_batch: Batch):\n for req in current_batch.reqs:\n self.served[req.adapter_dir] += 1 * self.output_price / self.fairw[req.adapter_dir]\n\n\n def next_batch(self):\n raise NotImplementedError()" }, { "identifier": "PETSReqQueue", "path": "slora/server/router/pets_req_queue.py", "snippet": "class PETSReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n self.alpha = None\n self.beta = None # will be set automatically in the profiling function in router.manager\n \n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n def intra_task_batching(self, lora_ranks):\n ## Preprocessing: gather the queries with the same adapter.\n clustered_queries_by_adapter = {}\n for query in self.waiting_req_list:\n adapter_dir = query.adapter_dir\n if adapter_dir in clustered_queries_by_adapter:\n clustered_queries_by_adapter[adapter_dir].append(query)\n else:\n clustered_queries_by_adapter[adapter_dir] = [query]\n\n ## DP\n mini_batches = []\n for adapter_dir, queries in clustered_queries_by_adapter.items():\n state_1st_stage = []\n split_idx_list = []\n\n ### Sort queries according to the sequence length in ascending order.\n queries = sorted(queries, key=lambda x: x.input_len)\n queries.insert(0, None) # Sentinel.\n\n ### Initialize.\n state_1st_stage.append(0)\n split_idx_list.append(0)\n for j in range(1, len(queries)):\n min_cost = np.Inf # INF\n split_idx = 0\n for k in range(1, j+1):\n lora_rank = lora_ranks[adapter_dir]\n tmp = state_1st_stage[k-1] + self.beta.get_latency(lora_rank, j-k+1, queries[j].input_len)\n if tmp < min_cost:\n min_cost = tmp\n split_idx = k-1\n split_idx_list.append(split_idx)\n state_1st_stage.append(min_cost)\n \n ### Split queries into mini-batches according to split_idx_list.\n \n end_idx = len(queries) - 1\n\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n mini_batch = []\n max_len = queries[end_idx].input_len\n for j in range(start_idx, end_idx + 1):\n mini_batch.append(queries[j]) \n mini_batches.append((mini_batch, max_len))\n end_idx = split_idx_list[end_idx] \n \n return mini_batches\n \n # Inter-task batching.\n def inter_task_batching(self, mini_batches):\n ## Sort mini_batches according to the max sequence length.\n mini_batches = sorted(mini_batches, key=lambda x: x[1])\n mini_batches.insert(0, None) # Sentinel.\n\n tmp = 0\n mini_batch_sum = [0]\n for mini_batch in mini_batches[1:]:\n tmp += len(mini_batch[0])\n mini_batch_sum.append(tmp)\n\n ## DP.\n state_2nd_stage = []\n split_idx_list = []\n state_2nd_stage.append(0)\n split_idx_list.append(0)\n\n for i in range(1, len(mini_batches)):\n min_cost = np.Inf # INF\n split_idx = 0\n for j in range(1, i+1):\n total_samples = mini_batch_sum[i] - mini_batch_sum[j-1]\n tmp = state_2nd_stage[j-1] + self.alpha.get_latency(total_samples, mini_batches[i][1])\n if tmp < min_cost:\n min_cost = tmp\n split_idx = j - 1\n split_idx_list.append(split_idx)\n state_2nd_stage.append(min_cost)\n\n ## Split mini_batches into final scheduled_batches.\n ### Split mini_batches into macro_batches.\n\n end_idx = len(mini_batches) - 1\n macro_batches = []\n while(end_idx > 0):\n start_idx = split_idx_list[end_idx] + 1\n macro_batch = []\n max_len = mini_batches[end_idx][1]\n for j in range(start_idx, end_idx + 1):\n macro_batch.append(mini_batches[j]) \n macro_batches.append((macro_batch, max_len))\n end_idx = split_idx_list[end_idx] \n\n total_samples = 0\n for macro_batch in macro_batches:\n for mini_batch in macro_batch[0]:\n total_samples += len(mini_batch[0])\n # print(total_samples)\n\n return macro_batches\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n reqs = self.waiting_req_list\n # when waiting_reqs > 20\n if len(self.waiting_req_list) > 10:\n mini_batches = self.intra_task_batching(lora_ranks)\n macro_batches = self.inter_task_batching(mini_batches)\n \n macro_batch = macro_batches[-1][0]\n reqs = [req for minibatch in macro_batch for req in minibatch[0]]\n \n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n for req in reqs:\n if req.aborted:\n aborted_count += 1\n abort_list.append(req)\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" }, { "identifier": "PEFTReqQueue", "path": "slora/server/router/peft_req_queue.py", "snippet": "class PEFTReqQueue(ReqQueue):\n\n def __init__(self, max_total_tokens, batch_max_tokens, running_max_req_size) -> None:\n super().__init__(max_total_tokens, batch_max_tokens, running_max_req_size)\n \n def append(self, req):\n self.waiting_req_list.append(req)\n return\n \n def _init_cache_list(self, current_batch:Batch, lora_ranks):\n if current_batch is not None:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n for req in current_batch.reqs:\n self.cache_len_list.append((req.input_len + len(req.output_ids),\n req.max_output_len - len(req.output_ids) - 1))\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n else:\n self.cache_len_list = []\n self.adapters = set()\n self.adapter_size = 0\n \n # @calculate_time(show=True, min_cost_ms=0.1)\n def _can_add_new_req(self, req, lora_ranks):\n self.cache_len_list.append((req.input_len + 1, req.max_output_len - 1)) # hard to analysis\n self.cache_len_list.sort(key=lambda x: -x[1])\n if req.adapter_dir not in self.adapters:\n self.adapter_size += lora_ranks[req.adapter_dir] * 4\n self.adapters.add(req.adapter_dir)\n \n left_out_len_array = np.array([e[1] for e in self.cache_len_list])\n # assert left_out_len_array.min() >= 0\n has_run_len_array = np.array([e[0] for e in self.cache_len_list])\n cum_run_len_array = np.cumsum(has_run_len_array)\n size_array = np.arange(1, len(self.cache_len_list) + 1, 1)\n \n need_max_token_num = (left_out_len_array * size_array + cum_run_len_array).max()\n if (need_max_token_num < self.max_total_tokens - self.adapter_size and\n len(self.cache_len_list) <= self.running_max_req_size):\n return True\n else:\n return False\n\n def generate_new_batch(self, current_batch:Batch, lora_ranks: dict[str, int]):\n if current_batch is not None and len(current_batch.reqs) >= self.running_max_req_size:\n return None\n \n self._init_cache_list(current_batch, lora_ranks)\n can_run_list = []\n abort_list = []\n new_batch_total_tokens = 0\n aborted_count = 0\n if len(self.waiting_req_list) > 0 and current_batch is None:\n adapter_dir = self.waiting_req_list[0].adapter_dir\n if current_batch is not None:\n adapter_dir = current_batch.reqs[0].adapter_dir\n # heuristics:\n # TODO: think more\n max_other_waited_reqs = 30\n other_waited_reqs = 0\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n other_waited_reqs += 1\n if other_waited_reqs > max_other_waited_reqs:\n return None\n continue\n if req.adapter_dir == adapter_dir:\n break\n\n for req in self.waiting_req_list:\n if req.adapter_dir != adapter_dir:\n continue\n if req.aborted:\n aborted_count += 1\n continue\n if (self._can_add_new_req(req, lora_ranks) and\n new_batch_total_tokens + req.input_len <= self.batch_max_tokens):\n can_run_list.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n\n if len(can_run_list) != 0:\n new_batch = Batch(uuid.uuid4().hex, can_run_list)\n self.waiting_req_list = [req for req in self.waiting_req_list if req not in can_run_list and req not in abort_list]\n return new_batch\n else:\n return None\n\n\n def next_batch(self):\n next_batch = []\n new_batch_total_tokens = 0\n for req in self.waiting_req_list:\n if req.aborted:\n continue\n if new_batch_total_tokens + req.input_len <= self.batch_max_tokens:\n next_batch.append(req)\n new_batch_total_tokens += req.input_len\n else:\n break\n if len(next_batch) > 0:\n next_batch = Batch(uuid.uuid4().hex, next_batch)\n return next_batch\n else:\n return None" } ]
import uvloop import asyncio import os import pickle import time import torch import zmq import zmq.asyncio import traceback from typing import Dict, List, Optional from ..sampling_params import SamplingParams from ..io_struct import Req, Batch, BatchAbortReq from .model_infer.model_rpc import start_model_process, ModelRpcClient from .req_queue import ReqQueue from rpyc.utils.classic import obtain from slora.utils.infer_utils import calculate_time from ..io_struct import BatchTokenIdOut, AbortReq from .stats import Stats from slora.server.input_params import InputParams from slora.models.peft.lora_adapter import get_lora_config from slora.server.router.profiler import AlphaModel, BetaModel from slora.server.router.abort_req_queue import AbortReqQueue from slora.server.router.cluster_req_queue import ClusterReqQueue from slora.server.router.vtc_req_queue import VTCReqQueue from slora.server.router.pets_req_queue import PETSReqQueue from slora.server.router.peft_req_queue import PEFTReqQueue
15,413
self.has_wait_tokens += 1 return else: new_mini_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0: self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list)) self.req_queue.reset_abort_list() if new_mini_batch is not None: self.stats_tool.count_prompt_tokens(new_mini_batch) if not self.input_params.no_lora: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_mini_batch.adapter_dirs)) await asyncio.gather(*ret) await self._prefill_batch(new_mini_batch, minibatch=True) if not new_mini_batch.is_clear(): await self._merge_batch(self.running_batch, new_mini_batch) self.running_batch.merge(new_mini_batch) self.has_wait_tokens = 0 else: self.stats_tool.count_output_tokens(self.running_batch) await self._decode_batch(self.running_batch) await self._filter_runing_batch() async def _init_batch(self, batch: Batch): reqs = [r.to_rpc_obj() for r in batch.reqs] rets = [self.model_rpcs[tp_rank].init_batch(batch.batch_id, reqs) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _prefill_batch(self, batch, minibatch=True): await self._init_batch(batch) rets = [self.model_rpcs[tp_rank].prefill_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req, minibatch=True) return async def _decode_batch(self, batch:Batch): self.req_queue.update_counter(batch) rets = [self.model_rpcs[tp_rank].decode_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req) return async def _filter_batch(self, batch: Batch): req_id_list = [r.request_id for r in batch.reqs] rets = [self.model_rpcs[tp_rank].filter_batch(batch.batch_id, req_id_list) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _merge_batch(self, batch1, batch2): rets = [self.model_rpcs[tp_rank].merge_batch(batch1.batch_id, batch2.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _remove_batch(self, batch): rets = [self.model_rpcs[tp_rank].remove_batch(batch.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _handle_finish_req(self, batch: Batch, has_new_finished_req, minibatch=False): if has_new_finished_req: batch.filter_finished() # unmerge adapter from base model if self.input_params.scheduler == "peft" and batch.is_clear(): ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].unmerge_adapter()) await asyncio.gather(*ret) if not minibatch and not self.input_params.no_lora: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters(batch.adapter_dirs)) await asyncio.gather(*ret) if batch.is_clear(): await self._remove_batch(batch) else: await self._filter_batch(batch) return async def _filter_runing_batch(self): if self.running_batch is not None and self.running_batch.is_clear(): if not self.input_params.no_lora: # offload model and adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters()) await asyncio.gather(*ret) self.running_batch = None return def _add_token_id_to_req(self, batch: Batch, req_ans): for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] req.output_ids.append(new_token_id) req.output_metadata_list.append(new_gen_metadata) return def _send_to_detokenization_proc(self, batch: Batch, req_ans):
asyncio.set_event_loop_policy(uvloop.EventLoopPolicy()) def get_scheduler(input_params, adapter_dirs): if input_params.scheduler == "vtc_fair": return VTCReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, adapter_dirs, input_params.fair_weights) elif input_params.scheduler == "pets": return PETSReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "peft": return PEFTReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.batch_num_adapters is not None: return ClusterReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size, input_params.batch_num_adapters) elif input_params.enable_abort: return AbortReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) elif input_params.scheduler == "slora": return ReqQueue(input_params.max_total_token_num, input_params.batch_max_tokens, input_params.running_max_req_size) else: raise Exception("unrecognized scheduler") class RouterManager: def __init__(self, weightdir, adapter_dirs, load_way, world_size, eos_id, router_port, detokenization_port, model_rpc_ports, input_params, mode=[], log_stats=True, log_stats_interval=10): self.model_weightdir = weightdir self.adapter_dirs = adapter_dirs self.world_size = world_size self.load_way = load_way self.mode = mode self.input_params = input_params if self.input_params.prefetch: self.prefetch_stream = torch.cuda.Stream() else: self.prefetch_stream = None # get adapter rank self.lora_ranks = {} for lora_dir in adapter_dirs: config, _ = get_lora_config(lora_dir, input_params.dummy) self.lora_ranks[lora_dir] = config["r"] self.lora_ranks[None] = 0 self.req_queue = get_scheduler(input_params, adapter_dirs) self.running_batch: Batch = None self.eos_id = eos_id self.has_wait_tokens = 0 self.max_wait_tokens = 10 context = zmq.asyncio.Context(2) self.recv_from_httpserver = context.socket(zmq.PULL) self.recv_from_httpserver.bind(f"tcp://127.0.0.1:{router_port}") self.send_to_detokenization = context.socket(zmq.PUSH) self.send_to_detokenization.connect(f"tcp://127.0.0.1:{detokenization_port}") self.model_rpc_ports = model_rpc_ports self.stats_tool = Stats(log_stats, log_stats_interval) async def wait_to_model_ready(self): self.model_rpcs: List[ModelRpcClient] = [] for rank_id in range(self.world_size): rpc_model = await start_model_process(port=self.model_rpc_ports[rank_id], world_size=self.world_size) self.model_rpcs.append(rpc_model) init_model_ret = [] for rank_id in range(self.world_size): # async init model process init_model_ret.append( self.model_rpcs[rank_id].init_model( rank_id, self.world_size, self.model_weightdir, self.adapter_dirs, self.input_params.max_total_token_num, self.load_way, self.mode, input_params=self.input_params, prefetch_stream=self.prefetch_stream, )) await asyncio.gather(*init_model_ret) return async def profile_prefill(self): res = [] for rank_id in range(self.world_size): # async init model process res.append( self.model_rpcs[rank_id].profile_prefill()) results = await asyncio.gather(*res) self.alpha_model = AlphaModel(results[0]) self.beta_model = BetaModel(results[0]) # check if the path exists else create it cache_dir = os.path.expanduser("~/.cache/slora") if not os.path.exists(cache_dir): os.makedirs(cache_dir) with open(cache_dir+"/profile_results.pkl", "wb") as f: pickle.dump(results[0], f) return def add_req( self, adapter_dir: str, prompt_ids: List[int], sampling_params: SamplingParams, request_id: str ): req = Req(adapter_dir, request_id, prompt_ids, sampling_params) self.req_queue.append(req) self.send_to_detokenization.send_pyobj(req.to_req_detokenization_state()) return async def abort(self, request_id): if self.running_batch is not None: for req in self.running_batch.reqs: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True for req in self.req_queue.waiting_req_list: if req.request_id == request_id: req.has_generate_finished = True req.aborted = True return async def loop_for_fwd(self,): counter_count = 0 while True: await self._step() counter_count += 1 if self.running_batch is not None: if counter_count % 50 == 0: print("current batch size:", len(self.running_batch.reqs), "token used ratio:", self.running_batch.calcu_used_tokens() / self.input_params.max_total_token_num) pass self.stats_tool.print_stats() if self.running_batch is None: await asyncio.sleep(0.01) # 10ms async def _step(self): """ 事件处理循环 """ # 删除所有已经 finished 的 req if self.running_batch is None: new_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0: self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list)) self.req_queue.reset_abort_list() if new_batch is not None: self.stats_tool.count_prompt_tokens(new_batch) self.running_batch = new_batch if not self.input_params.no_lora: # load adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_batch.adapter_dirs)) await asyncio.gather(*ret) # merge adapter to base model if self.input_params.scheduler == "peft": torch.cuda.synchronize() ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].merge_adapter()) await asyncio.gather(*ret) torch.cuda.synchronize() await self._prefill_batch(self.running_batch) await self._filter_runing_batch() self.has_wait_tokens = 0 return if self.has_wait_tokens < self.max_wait_tokens: self.stats_tool.count_output_tokens(self.running_batch) # prefetch if (not self.input_params.no_lora and self.input_params.prefetch and (self.has_wait_tokens == self.max_wait_tokens // 2 or self.has_wait_tokens == self.max_wait_tokens - 3) and self.input_params.scheduler != "peft"): next_batch = self.req_queue.next_batch() if next_batch is not None: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters( next_batch.adapter_dirs, prefetch=True)) await asyncio.gather(*ret) await self._decode_batch(self.running_batch) await self._filter_runing_batch() self.has_wait_tokens += 1 return else: new_mini_batch = self.req_queue.generate_new_batch(self.running_batch, self.lora_ranks) if self.input_params.enable_abort and len(self.req_queue.abort_req_list) > 0: self.send_to_detokenization.send_pyobj(BatchAbortReq(self.req_queue.abort_req_list)) self.req_queue.reset_abort_list() if new_mini_batch is not None: self.stats_tool.count_prompt_tokens(new_mini_batch) if not self.input_params.no_lora: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].load_adapters(new_mini_batch.adapter_dirs)) await asyncio.gather(*ret) await self._prefill_batch(new_mini_batch, minibatch=True) if not new_mini_batch.is_clear(): await self._merge_batch(self.running_batch, new_mini_batch) self.running_batch.merge(new_mini_batch) self.has_wait_tokens = 0 else: self.stats_tool.count_output_tokens(self.running_batch) await self._decode_batch(self.running_batch) await self._filter_runing_batch() async def _init_batch(self, batch: Batch): reqs = [r.to_rpc_obj() for r in batch.reqs] rets = [self.model_rpcs[tp_rank].init_batch(batch.batch_id, reqs) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _prefill_batch(self, batch, minibatch=True): await self._init_batch(batch) rets = [self.model_rpcs[tp_rank].prefill_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req, minibatch=True) return async def _decode_batch(self, batch:Batch): self.req_queue.update_counter(batch) rets = [self.model_rpcs[tp_rank].decode_batch(batch.batch_id) for tp_rank in range(self.world_size)] ans = await asyncio.gather(*rets) if self.world_size != 1: req_to_out_token_id = obtain(ans[0]) else: req_to_out_token_id = ans[0] self._add_token_id_to_req(batch, req_to_out_token_id) has_new_finished_req = batch.mark_finished_req(self.eos_id) self._send_to_detokenization_proc(batch, req_to_out_token_id) await self._handle_finish_req(batch, has_new_finished_req) return async def _filter_batch(self, batch: Batch): req_id_list = [r.request_id for r in batch.reqs] rets = [self.model_rpcs[tp_rank].filter_batch(batch.batch_id, req_id_list) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _merge_batch(self, batch1, batch2): rets = [self.model_rpcs[tp_rank].merge_batch(batch1.batch_id, batch2.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _remove_batch(self, batch): rets = [self.model_rpcs[tp_rank].remove_batch(batch.batch_id) for tp_rank in range(self.world_size)] await asyncio.gather(*rets) return async def _handle_finish_req(self, batch: Batch, has_new_finished_req, minibatch=False): if has_new_finished_req: batch.filter_finished() # unmerge adapter from base model if self.input_params.scheduler == "peft" and batch.is_clear(): ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].unmerge_adapter()) await asyncio.gather(*ret) if not minibatch and not self.input_params.no_lora: ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters(batch.adapter_dirs)) await asyncio.gather(*ret) if batch.is_clear(): await self._remove_batch(batch) else: await self._filter_batch(batch) return async def _filter_runing_batch(self): if self.running_batch is not None and self.running_batch.is_clear(): if not self.input_params.no_lora: # offload model and adapters ret = [] for tp_rank in range(self.world_size): ret.append(self.model_rpcs[tp_rank].offload_adapters()) await asyncio.gather(*ret) self.running_batch = None return def _add_token_id_to_req(self, batch: Batch, req_ans): for req_id, (new_token_id, new_gen_metadata) in req_ans.items(): req = batch.id_to_reqs[req_id] req.output_ids.append(new_token_id) req.output_metadata_list.append(new_gen_metadata) return def _send_to_detokenization_proc(self, batch: Batch, req_ans):
batch_out = BatchTokenIdOut()
8
2023-11-05 04:08:36+00:00
24k
ForceFledgling/proxyhub
proxyhub/api.py
[ { "identifier": "Checker", "path": "proxyhub/checker.py", "snippet": "class Checker:\n \"\"\"Proxy checker.\"\"\"\n\n def __init__(\n self,\n judges,\n max_tries=3,\n timeout=8,\n verify_ssl=False,\n strict=False,\n dnsbl=None,\n real_ext_ip=None,\n types=None,\n post=False,\n loop=None,\n ):\n Judge.clear()\n self._judges = get_judges(judges, timeout, verify_ssl)\n self._method = 'POST' if post else 'GET'\n self._max_tries = max_tries\n self._real_ext_ip = real_ext_ip\n self._strict = strict\n self._dnsbl = dnsbl or []\n self._types = types or {}\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = Resolver(loop=self._loop)\n\n self._req_http_proto = not types or bool(\n ('HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5') & types.keys()\n )\n self._req_https_proto = not types or bool(('HTTPS',) & types.keys())\n self._req_smtp_proto = not types or bool(('CONNECT:25',) & types.keys()) # noqa\n\n self._ngtrs = {proto for proto in types or NGTRS}\n\n async def check_judges(self):\n # TODO: need refactoring\n log.debug('Start check judges')\n stime = time.time()\n await asyncio.gather(\n *[j.check(real_ext_ip=self._real_ext_ip) for j in self._judges]\n )\n\n self._judges = [j for j in self._judges if j.is_working]\n log.debug(\n '%d judges added. Runtime: %.4f;' % (len(self._judges), time.time() - stime)\n )\n\n nojudges = []\n disable_protocols = []\n\n if len(Judge.available['HTTP']) == 0:\n nojudges.append('HTTP')\n disable_protocols.extend(['HTTP', 'CONNECT:80', 'SOCKS4', 'SOCKS5'])\n self._req_http_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTP'].set()\n if len(Judge.available['HTTPS']) == 0:\n nojudges.append('HTTPS')\n disable_protocols.append('HTTPS')\n self._req_https_proto = False\n # for coroutines, which is already waiting\n Judge.ev['HTTPS'].set()\n if len(Judge.available['SMTP']) == 0:\n # nojudges.append('SMTP')\n disable_protocols.append('SMTP')\n self._req_smtp_proto = False\n # for coroutines, which is already waiting\n Judge.ev['SMTP'].set()\n\n for proto in disable_protocols:\n if proto in self._ngtrs:\n self._ngtrs.remove(proto)\n\n if nojudges:\n warnings.warn(\n 'Not found judges for the {nojudges} protocol.\\n'\n 'Checking proxy on protocols {disp} is disabled.'.format(\n nojudges=nojudges, disp=disable_protocols\n ),\n UserWarning,\n )\n if self._judges:\n log.debug('Loaded: %d proxy judges' % len(set(self._judges)))\n else:\n RuntimeError('Not found judges')\n\n def _types_passed(self, proxy):\n if not self._types:\n return True\n for proto, lvl in proxy.types.copy().items():\n req_levels = self._types.get(proto)\n if not req_levels or (lvl in req_levels):\n if not self._strict:\n return True\n else:\n if self._strict:\n del proxy.types[proto]\n if self._strict and proxy.types:\n return True\n proxy.log('Protocol or the level of anonymity differs from the requested')\n return False\n\n async def _in_DNSBL(self, host):\n _host = '.'.join(reversed(host.split('.'))) # reverse address\n tasks = []\n for domain in self._dnsbl:\n query = '.'.join([_host, domain])\n tasks.append(self._resolver.resolve(query, logging=False))\n responses = await asyncio.gather(*tasks, return_exceptions=True)\n if any([r for r in responses if not isinstance(r, ResolveError)]):\n return True\n return False\n\n async def check(self, proxy):\n if self._dnsbl:\n if await self._in_DNSBL(proxy.host):\n proxy.log('Found in DNSBL')\n return False\n\n if self._req_http_proto:\n await Judge.ev['HTTP'].wait()\n if self._req_https_proto:\n await Judge.ev['HTTPS'].wait()\n if self._req_smtp_proto:\n await Judge.ev['SMTP'].wait()\n\n if proxy.expected_types:\n ngtrs = proxy.expected_types & self._ngtrs\n else:\n ngtrs = self._ngtrs\n\n results = []\n for proto in ngtrs:\n if proto == 'CONNECT:25':\n result = await self._check_conn_25(proxy, proto)\n else:\n result = await self._check(proxy, proto)\n results.append(result)\n\n proxy.is_working = True if any(results) else False\n\n if proxy.is_working and self._types_passed(proxy):\n return True\n return False\n\n async def _check_conn_25(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n proxy.types[proxy.ngtr.name] = None\n result = True\n break\n finally:\n proxy.close()\n return result\n\n async def _check(self, proxy, proto):\n judge = Judge.get_random(proto)\n proxy.log('Selected judge: %s' % judge)\n result = False\n for attempt in range(self._max_tries):\n try:\n proxy.ngtr = proto\n await proxy.connect()\n await proxy.ngtr.negotiate(host=judge.host, ip=judge.ip)\n headers, content, rv = await _send_test_request(\n self._method, proxy, judge\n )\n except ProxyTimeoutError:\n continue\n except (\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ):\n break\n else:\n content = _decompress_content(headers, content)\n result = _check_test_response(proxy, headers, content, rv)\n if result:\n if proxy.ngtr.check_anon_lvl:\n lvl = _get_anonymity_lvl(\n self._real_ext_ip, proxy, judge, content\n )\n else:\n lvl = None\n proxy.types[proxy.ngtr.name] = lvl\n break\n finally:\n proxy.close()\n return result" }, { "identifier": "ResolveError", "path": "proxyhub/errors.py", "snippet": "class ResolveError(Exception):\n pass" }, { "identifier": "PROVIDERS", "path": "proxyhub/providers.py", "snippet": "PROVIDERS = [\n Provider(\n url='http://www.proxylists.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 49\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks4',\n proto=('SOCKS4'),\n ), # added by ZerGo0\n Provider(\n url='https://api.proxyscrape.com/?request=getproxies&proxytype=socks5',\n proto=('SOCKS5'),\n ), # added by ZerGo0\n Provider(\n url='http://ipaddress.com/proxy-list/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 53\n Provider(\n url='https://www.sslproxies.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 100\n Provider(\n url='https://freshfreeproxylist.wordpress.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 50\n Provider(\n url='http://proxytime.ru/http',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 1400\n Provider(\n url='https://free-proxy-list.net/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 300\n Provider(\n url='https://us-proxy.org/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://fineproxy.org/eng/fresh-proxies/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 5500\n Provider(url='https://socks-proxy.net/', proto=('SOCKS4', 'SOCKS5')), # 80\n Provider(\n url='http://www.httptunnel.ge/ProxyListForFree.aspx',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 200\n Provider(\n url='http://cn-proxy.com/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 70\n Provider(\n url='https://hugeproxies.com/home/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 800\n Provider(\n url='http://proxy.rufey.ru/',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 153\n Provider(\n url='https://geekelectronics.org/my-servisy/proxy',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 400\n Provider(\n url='http://pubproxy.com/api/proxy?limit=20&format=txt',\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n ), # 20\n Proxy_list_org(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 140\n Xseo_in(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 240\n Spys_ru(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 660\n Proxylistplus_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 450\n Proxylist_me(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 2872\n Foxtools_ru(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'), max_conn=1\n ), # noqa; 500\n Gatherproxy_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 3212\n Nntime_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 1050\n Blogspot_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # noqa; 24800\n Gatherproxy_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 30\n Blogspot_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1486\n Tools_rosinstrument_com(\n proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')\n ), # noqa; 4000\n Tools_rosinstrument_com_socks(proto=('SOCKS4', 'SOCKS5')), # noqa; 1800\n My_proxy_com(max_conn=2), # noqa; 1000\n Checkerproxy_net(), # noqa; 60000\n Aliveproxy_com(), # noqa; 210\n Freeproxylists_com(), # noqa; 1338\n Webanetlabs_net(), # noqa; 5000\n Maxiproxies_com(), # noqa; 430\n Proxylist_download(), # noqa; 35590\n # # Bad...\n # http://www.proxylist.ro/\n # Provider(url='http://proxydb.net/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS',\n # 'CONNECT:25', 'SOCKS4', 'SOCKS5')),\n # Provider(url='http://www.cybersyndrome.net/pla6.html',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 1100\n # Provider(url='https://www.ip-adress.com/proxy-list',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 57\n # Provider(url='https://www.marcosbl.com/lab/proxies/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 89\n # Provider(url='http://go4free.xyz/Free-Proxy/',\n # proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 196\n # Provider(url='http://blackstarsecurity.com/proxy-list.txt'), # 7014\n # Provider(url='http://www.get-proxy.net/proxy-archives'), # 519\n # Proxyb_net(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 857\n # Proxz_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25'),\n # max_conn=2), # 443\n # Proxynova_com(proto=('HTTP', 'CONNECT:80', 'HTTPS', 'CONNECT:25')), # 818\n # _50kproxies_com(), # 822\n # Free_proxy_cz(), # 420\n]" }, { "identifier": "Provider", "path": "proxyhub/providers.py", "snippet": "class Provider:\n \"\"\"Proxy provider.\n\n Provider - a website that publish free public proxy lists.\n\n :param str url: Url of page where to find proxies\n :param tuple proto:\n (optional) List of the types (protocols) that may be supported\n by proxies returned by the provider. Then used as :attr:`Proxy.types`\n :param int max_conn:\n (optional) The maximum number of concurrent connections on the provider\n :param int max_tries:\n (optional) The maximum number of attempts to receive response\n :param int timeout:\n (optional) Timeout of a request in seconds\n \"\"\"\n\n _pattern = IPPortPatternGlobal\n\n def __init__(\n self, url=None, proto=(), max_conn=4, max_tries=3, timeout=20, loop=None\n ):\n if url:\n self.domain = urlparse(url).netloc\n self.url = url\n self.proto = proto\n self._max_tries = max_tries\n self._timeout = timeout\n self._session = None\n self._cookies = {}\n self._proxies = set()\n # concurrent connections on the current provider\n self._sem_provider = asyncio.Semaphore(max_conn)\n self._loop = loop or asyncio.get_event_loop()\n\n @property\n def proxies(self):\n \"\"\"Return all found proxies.\n\n :return:\n Set of tuples with proxy hosts, ports and types (protocols)\n that may be supported (from :attr:`.proto`).\n\n For example:\n {('192.168.0.1', '80', ('HTTP', 'HTTPS'), ...)}\n\n :rtype: set\n \"\"\"\n return self._proxies\n\n @proxies.setter\n def proxies(self, new):\n new = [(host, port, self.proto) for host, port in new if port]\n self._proxies.update(new)\n\n async def get_proxies(self):\n \"\"\"Receive proxies from the provider and return them.\n\n :return: :attr:`.proxies`\n \"\"\"\n log.debug('Try to get proxies from %s' % self.domain)\n\n async with aiohttp.ClientSession(\n headers=get_headers(), cookies=self._cookies, loop=self._loop\n ) as self._session:\n await self._pipe()\n\n log.debug(\n '%d proxies received from %s: %s'\n % (len(self.proxies), self.domain, self.proxies)\n )\n return self.proxies\n\n async def _pipe(self):\n await self._find_on_page(self.url)\n\n async def _find_on_pages(self, urls):\n if not urls:\n return\n tasks = []\n if not isinstance(urls[0], dict):\n urls = set(urls)\n for url in urls:\n if isinstance(url, dict):\n tasks.append(self._find_on_page(**url))\n else:\n tasks.append(self._find_on_page(url))\n await asyncio.gather(*tasks)\n\n async def _find_on_page(self, url, data=None, headers=None, method='GET'):\n page = await self.get(url, data=data, headers=headers, method=method)\n oldcount = len(self.proxies)\n try:\n received = self.find_proxies(page)\n except Exception as e:\n received = []\n log.error(\n 'Error when executing find_proxies.'\n 'Domain: %s; Error: %r' % (self.domain, e)\n )\n self.proxies = received\n added = len(self.proxies) - oldcount\n log.debug(\n '%d(%d) proxies added(received) from %s' % (added, len(received), url)\n )\n\n async def get(self, url, data=None, headers=None, method='GET'):\n for _ in range(self._max_tries):\n page = await self._get(url, data=data, headers=headers, method=method)\n if page:\n break\n return page\n\n async def _get(self, url, data=None, headers=None, method='GET'):\n page = ''\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with self._sem_provider, self._session.request(\n method, url, data=data, headers=headers, timeout=timeout\n ) as resp:\n page = await resp.text()\n if resp.status != 200:\n log.debug(\n 'url: %s\\nheaders: %s\\ncookies: %s\\npage:\\n%s'\n % (url, resp.headers, resp.cookies, page)\n )\n raise BadStatusError('Status: %s' % resp.status)\n except (\n UnicodeDecodeError,\n BadStatusError,\n asyncio.TimeoutError,\n aiohttp.ClientOSError,\n aiohttp.ClientResponseError,\n aiohttp.ServerDisconnectedError,\n ) as e:\n page = ''\n log.debug('%s is failed. Error: %r;' % (url, e))\n return page\n\n def find_proxies(self, page):\n return self._find_proxies(page)\n\n def _find_proxies(self, page):\n proxies = self._pattern.findall(page)\n return proxies" }, { "identifier": "Proxy", "path": "proxyhub/proxy.py", "snippet": "class Proxy:\n \"\"\"Proxy.\n\n :param str host: IP address of the proxy\n :param int port: Port of the proxy\n :param tuple types:\n (optional) List of types (protocols) which may be supported\n by the proxy and which can be checked to work with the proxy\n :param int timeout:\n (optional) Timeout of a connection and receive a response in seconds\n :param bool verify_ssl:\n (optional) Flag indicating whether to check the SSL certificates.\n Set to True to check ssl certifications\n\n :raises ValueError: If the host not is IP address, or if the port > 65535\n \"\"\"\n\n @classmethod\n async def create(cls, host, *args, **kwargs):\n \"\"\"Asynchronously create a :class:`Proxy` object.\n\n :param str host: A passed host can be a domain or IP address.\n If the host is a domain, try to resolve it\n :param str *args:\n (optional) Positional arguments that :class:`Proxy` takes\n :param str **kwargs:\n (optional) Keyword arguments that :class:`Proxy` takes\n\n :return: :class:`Proxy` object\n :rtype: proxyhub.Proxy\n\n :raises ResolveError: If could not resolve the host\n :raises ValueError: If the port > 65535\n \"\"\" # noqa: W605\n loop = kwargs.pop('loop', None)\n resolver = kwargs.pop('resolver', Resolver(loop=loop))\n try:\n _host = await resolver.resolve(host)\n self = cls(_host, *args, **kwargs)\n except (ResolveError, ValueError) as e:\n log.error('%s:%s: Error at creating: %s' % (host, args[0], e))\n raise\n return self\n\n def __init__(self, host=None, port=None, types=(), timeout=8, verify_ssl=False):\n self.host = host\n if not Resolver.host_is_ip(self.host):\n raise ValueError(\n 'The host of proxy should be the IP address. '\n 'Try Proxy.create() if the host is a domain'\n )\n\n self.port = int(port)\n if self.port > 65535:\n raise ValueError('The port of proxy cannot be greater than 65535')\n\n self.expected_types = set(types) & {\n 'HTTP',\n 'HTTPS',\n 'CONNECT:80',\n 'CONNECT:25',\n 'SOCKS4',\n 'SOCKS5',\n }\n self._timeout = timeout\n self._ssl_context = True if verify_ssl else _ssl._create_unverified_context()\n self._types = {}\n self._is_working = False\n self.stat = {'requests': 0, 'errors': Counter()}\n self._ngtr = None\n self._geo = Resolver.get_ip_info(self.host)\n self._log = []\n self._runtimes = []\n self._schemes = ()\n self._closed = True\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n\n def __repr__(self):\n \"\"\"Class representation\n e.g. <Proxy US 1.12 [HTTP: Anonymous, HTTPS] 10.0.0.1:8080>\n \"\"\"\n tpinfo = []\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n s = '{tp}: {lvl}' if lvl else '{tp}'\n s = s.format(tp=tp, lvl=lvl)\n tpinfo.append(s)\n tpinfo = ', '.join(tpinfo)\n return '<Proxy {code} {avg:.2f}s [{types}] {host}:{port}>'.format(\n code=self._geo.code,\n types=tpinfo,\n host=self.host,\n port=self.port,\n avg=self.avg_resp_time,\n )\n\n @property\n def types(self):\n \"\"\"Types (protocols) supported by the proxy.\n\n | Where key is type, value is level of anonymity\n (only for HTTP, for other types level always is None).\n | Available types: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25\n | Available levels: Transparent, Anonymous, High.\n\n :rtype: dict\n \"\"\"\n return self._types\n\n @property\n def is_working(self):\n \"\"\"True if the proxy is working, False otherwise.\n\n :rtype: bool\n \"\"\"\n return self._is_working\n\n @is_working.setter\n def is_working(self, val):\n self._is_working = val\n\n @property\n def writer(self):\n return self._writer.get('ssl') or self._writer.get('conn')\n\n @property\n def reader(self):\n return self._reader.get('ssl') or self._reader.get('conn')\n\n @property\n def priority(self):\n return (self.error_rate, self.avg_resp_time)\n\n @property\n def error_rate(self):\n \"\"\"Error rate: from 0 to 1.\n\n For example: 0.7 = 70% requests ends with error.\n\n :rtype: float\n\n .. versionadded:: 0.2.0\n \"\"\"\n if not self.stat['requests']:\n return 0\n return round(sum(self.stat['errors'].values()) / self.stat['requests'], 2)\n\n @property\n def schemes(self):\n \"\"\"Return supported schemes.\"\"\"\n if not self._schemes:\n _schemes = []\n if self.types.keys() & _HTTP_PROTOS:\n _schemes.append('HTTP')\n if self.types.keys() & _HTTPS_PROTOS:\n _schemes.append('HTTPS')\n self._schemes = tuple(_schemes)\n return self._schemes\n\n @property\n def avg_resp_time(self):\n \"\"\"The average connection/response time.\n\n :rtype: float\n \"\"\"\n if not self._runtimes:\n return 0\n return round(sum(self._runtimes) / len(self._runtimes), 2)\n\n @property\n def avgRespTime(self):\n \"\"\"\n .. deprecated:: 2.0\n Use :attr:`avg_resp_time` instead.\n \"\"\"\n warnings.warn(\n '`avgRespTime` property is deprecated, ' 'use `avg_resp_time` instead.',\n DeprecationWarning,\n )\n return self.avg_resp_time\n\n @property\n def geo(self):\n \"\"\"Geo information about IP address of the proxy.\n\n :return:\n Named tuple with fields:\n * ``code`` - ISO country code\n * ``name`` - Full name of country\n * ``region_code`` - ISO region code\n * ``region_name`` - Full name of region\n * ``city_name`` - Full name of city\n :rtype: collections.namedtuple\n\n .. versionchanged:: 0.2.0\n In previous versions return a dictionary, now named tuple.\n \"\"\"\n return self._geo\n\n @property\n def ngtr(self):\n return self._ngtr\n\n @ngtr.setter\n def ngtr(self, proto):\n self._ngtr = NGTRS[proto](self)\n\n def as_json(self):\n \"\"\"Return the proxy's properties in JSON format.\n\n :rtype: dict\n \"\"\"\n info = {\n 'host': self.host,\n 'port': self.port,\n 'geo': {\n 'country': {'code': self._geo.code, 'name': self._geo.name},\n 'region': {\n 'code': self._geo.region_code,\n 'name': self._geo.region_name,\n },\n 'city': self._geo.city_name,\n },\n 'types': [],\n 'avg_resp_time': self.avg_resp_time,\n 'error_rate': self.error_rate,\n }\n\n order = lambda tp_lvl: (len(tp_lvl[0]), tp_lvl[0][-1]) # noqa: 731\n for tp, lvl in sorted(self.types.items(), key=order):\n info['types'].append({'type': tp, 'level': lvl or ''})\n return info\n\n def as_text(self):\n \"\"\"\n Return proxy as host:port\n\n :rtype: str\n \"\"\"\n return \"{}:{}\\n\".format(self.host, self.port)\n\n def log(self, msg, stime=0, err=None):\n ngtr = self.ngtr.name if self.ngtr else 'INFO'\n runtime = time.time() - stime if stime else 0\n log.debug(\n '{h}:{p} [{n}]: {msg}; Runtime: {rt:.2f}'.format(\n h=self.host, p=self.port, n=ngtr, msg=msg, rt=runtime\n )\n )\n trunc = '...' if len(msg) > 58 else ''\n msg = '{msg:.60s}{trunc}'.format(msg=msg, trunc=trunc)\n self._log.append((ngtr, msg, runtime))\n if err:\n self.stat['errors'][err.errmsg] += 1\n if runtime and 'timeout' not in msg:\n self._runtimes.append(runtime)\n\n def get_log(self):\n \"\"\"Proxy log.\n\n :return: The proxy log in format: (negotaitor, msg, runtime)\n :rtype: tuple\n\n .. versionadded:: 0.2.0\n \"\"\"\n return self._log\n\n async def connect(self, ssl=False):\n err = None\n msg = '%s' % 'SSL: ' if ssl else ''\n stime = time.time()\n self.log('%sInitial connection' % msg)\n try:\n if ssl:\n _type = 'ssl'\n sock = self._writer['conn'].get_extra_info('socket')\n params = {\n 'ssl': self._ssl_context,\n 'sock': sock,\n 'server_hostname': self.host,\n }\n else:\n _type = 'conn'\n params = {'host': self.host, 'port': self.port}\n self._reader[_type], self._writer[_type] = await asyncio.wait_for(\n asyncio.open_connection(**params), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg += 'Connection: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionRefusedError, OSError, _ssl.SSLError):\n msg += 'Connection: failed'\n err = ProxyConnError(msg)\n raise err\n # except asyncio.CancelledError:\n # log.debug('Cancelled in proxy.connect()')\n # raise ProxyConnError()\n else:\n msg += 'Connection: success'\n self._closed = False\n finally:\n self.stat['requests'] += 1\n self.log(msg, stime, err=err)\n\n def close(self):\n if self._closed:\n return\n self._closed = True\n if self.writer:\n # try:\n self.writer.close()\n # except RuntimeError:\n # print('Try proxy.close() when loop is closed:',\n # asyncio.get_event_loop()._closed)\n self._reader = {'conn': None, 'ssl': None}\n self._writer = {'conn': None, 'ssl': None}\n self.log('Connection: closed')\n self._ngtr = None\n\n async def send(self, req):\n msg, err = '', None\n _req = req.encode() if not isinstance(req, bytes) else req\n try:\n self.writer.write(_req)\n await self.writer.drain()\n except ConnectionResetError:\n msg = '; Sending: failed'\n err = ProxySendError(msg)\n raise err\n finally:\n self.log('Request: %s%s' % (req, msg), err=err)\n\n async def recv(self, length=0, head_only=False):\n resp, msg, err = b'', '', None\n stime = time.time()\n try:\n resp = await asyncio.wait_for(\n self._recv(length, head_only), timeout=self._timeout\n )\n except asyncio.TimeoutError:\n msg = 'Received: timeout'\n err = ProxyTimeoutError(msg)\n raise err\n except (ConnectionResetError, OSError):\n msg = 'Received: failed' # (connection is reset by the peer)\n err = ProxyRecvError(msg)\n raise err\n else:\n msg = 'Received: %s bytes' % len(resp)\n if not resp:\n err = ProxyEmptyRecvError(msg)\n raise err\n finally:\n if resp:\n msg += ': %s' % resp[:12]\n self.log(msg, stime, err=err)\n return resp\n\n async def _recv(self, length=0, head_only=False):\n resp = b''\n if length:\n try:\n resp = await self.reader.readexactly(length)\n except asyncio.IncompleteReadError as e:\n resp = e.partial\n else:\n body_size, body_recv, chunked = 0, 0, None\n while not self.reader.at_eof():\n line = await self.reader.readline()\n resp += line\n if body_size:\n body_recv += len(line)\n if body_recv >= body_size:\n break\n elif chunked and line == b'0\\r\\n':\n break\n elif not body_size and line == b'\\r\\n':\n if head_only:\n break\n headers = parse_headers(resp)\n body_size = int(headers.get('Content-Length', 0))\n if not body_size:\n chunked = headers.get('Transfer-Encoding') == 'chunked'\n return resp" }, { "identifier": "Resolver", "path": "proxyhub/resolver.py", "snippet": "class Resolver:\n \"\"\"Async host resolver based on aiodns.\"\"\"\n\n _cached_hosts = {}\n _ip_hosts = [\n 'https://wtfismyip.com/text',\n 'http://api.ipify.org/',\n 'http://ipinfo.io/ip',\n 'http://ipv4.icanhazip.com/',\n 'http://myexternalip.com/raw',\n 'http://ipinfo.io/ip',\n 'http://ifconfig.io/ip',\n ]\n # the list of resolvers will point a copy of original one\n _temp_host = []\n\n def __init__(self, timeout=5, loop=None):\n self._timeout = timeout\n self._loop = loop or asyncio.get_event_loop()\n self._resolver = aiodns.DNSResolver(loop=self._loop)\n\n @staticmethod\n def host_is_ip(host):\n \"\"\"Check a host is IP address.\"\"\"\n # TODO: add IPv6 support\n try:\n host = '.'.join(f'{int(n)}' for n in host.split('.'))\n ipaddress.IPv4Address(host)\n except (ipaddress.AddressValueError, ValueError):\n return False\n else:\n return True\n\n @staticmethod\n def get_ip_info(ip):\n \"\"\"Return geo information about IP address.\n\n `code` - ISO country code\n `name` - Full name of country\n `region_code` - ISO region code\n `region_name` - Full name of region\n `city_name` - Full name of city\n \"\"\"\n # from pprint import pprint\n try:\n ipInfo = _mmdb_reader.get(ip) or {}\n except (maxminddb.errors.InvalidDatabaseError, ValueError):\n ipInfo = {}\n\n code, name = '--', 'Unknown'\n city_name, region_code, region_name = ('Unknown',) * 3\n if 'country' in ipInfo:\n code = ipInfo['country']['iso_code']\n name = ipInfo['country']['names']['en']\n elif 'continent' in ipInfo:\n code = ipInfo['continent']['code']\n name = ipInfo['continent']['names']['en']\n if 'city' in ipInfo:\n city_name = ipInfo['city']['names']['en']\n if 'subdivisions' in ipInfo:\n region_code = ipInfo['subdivisions'][0]['iso_code']\n region_name = ipInfo['subdivisions'][0]['names']['en']\n return GeoData(code, name, region_code, region_name, city_name)\n\n def _pop_random_ip_host(self):\n host = random.choice(self._temp_host)\n self._temp_host.remove(host)\n return host\n\n async def get_real_ext_ip(self):\n \"\"\"Return real external IP address.\"\"\"\n # make a copy of original one to temp one\n # so original one will stay no change\n self._temp_host = self._ip_hosts.copy()\n while self._temp_host:\n try:\n timeout = aiohttp.ClientTimeout(total=self._timeout)\n async with aiohttp.ClientSession(\n timeout=timeout, loop=self._loop\n ) as session, session.get(self._pop_random_ip_host()) as resp:\n ip = await resp.text()\n except asyncio.TimeoutError:\n pass\n else:\n ip = ip.strip()\n if self.host_is_ip(ip):\n log.debug('Real external IP: %s', ip)\n break\n else:\n raise RuntimeError('Could not get the external IP')\n return ip\n\n async def resolve(self, host, port=80, family=None, qtype='A', logging=True):\n \"\"\"Return resolving IP address(es) from host name.\"\"\"\n if self.host_is_ip(host):\n return host\n\n _host = self._cached_hosts.get(host)\n if _host:\n return _host\n\n resp = await self._resolve(host, qtype)\n\n if resp:\n hosts = [\n {\n 'hostname': host,\n 'host': r.host,\n 'port': port,\n 'family': family,\n 'proto': socket.IPPROTO_IP,\n 'flags': socket.AI_NUMERICHOST,\n }\n for r in resp\n ]\n if family:\n self._cached_hosts[host] = hosts\n else:\n self._cached_hosts[host] = hosts[0]['host']\n if logging:\n log.debug('%s: Host resolved: %s' % (host, self._cached_hosts[host]))\n else:\n if logging:\n log.warning('%s: Could not resolve host' % host)\n return self._cached_hosts.get(host)\n\n async def _resolve(self, host, qtype):\n try:\n resp = await asyncio.wait_for(\n self._resolver.query(host, qtype), timeout=self._timeout\n )\n except (aiodns.error.DNSError, asyncio.TimeoutError):\n raise ResolveError\n else:\n return resp" }, { "identifier": "Server", "path": "proxyhub/server.py", "snippet": "class Server:\n \"\"\"Server distributes incoming requests to a pool of found proxies.\"\"\"\n\n def __init__(\n self,\n host,\n port,\n proxies,\n timeout=8,\n max_tries=3,\n min_queue=5,\n min_req_proxy=5,\n max_error_rate=0.5,\n max_resp_time=8,\n prefer_connect=False,\n http_allowed_codes=None,\n backlog=100,\n loop=None,\n **kwargs,\n ):\n self.host = host\n self.port = int(port)\n self._loop = loop or asyncio.get_event_loop()\n self._timeout = timeout\n self._max_tries = max_tries\n self._backlog = backlog\n self._prefer_connect = prefer_connect\n\n self._server = None\n self._connections = {}\n self._proxy_pool = ProxyPool(\n proxies, min_req_proxy, max_error_rate, max_resp_time, min_queue\n )\n self._resolver = Resolver(loop=self._loop)\n self._http_allowed_codes = http_allowed_codes or []\n\n def start(self):\n\n srv = asyncio.start_server(\n self._accept,\n host=self.host,\n port=self.port,\n backlog=self._backlog,\n loop=self._loop,\n )\n self._server = self._loop.run_until_complete(srv)\n\n log.info(\n 'Listening established on {0}'.format(self._server.sockets[0].getsockname())\n )\n\n def stop(self):\n if not self._server:\n return\n for conn in self._connections:\n if not conn.done():\n conn.cancel()\n self._server.close()\n if not self._loop.is_running():\n self._loop.run_until_complete(self._server.wait_closed())\n # Time to close the running futures in self._connections\n self._loop.run_until_complete(asyncio.sleep(0.5))\n self._server = None\n self._loop.stop()\n log.info('Server is stopped')\n\n def _accept(self, client_reader, client_writer):\n def _on_completion(f):\n reader, writer = self._connections.pop(f)\n writer.close()\n log.debug('client: %d; closed' % id(client_reader))\n try:\n exc = f.exception()\n except asyncio.CancelledError:\n log.debug('CancelledError in server._handle:_on_completion')\n exc = None\n if exc:\n if isinstance(exc, NoProxyError):\n self.stop()\n else:\n raise exc\n\n f = asyncio.ensure_future(self._handle(client_reader, client_writer))\n f.add_done_callback(_on_completion)\n self._connections[f] = (client_reader, client_writer)\n\n async def _handle(self, client_reader, client_writer):\n log.debug(\n 'Accepted connection from %s' % (client_writer.get_extra_info('peername'),)\n )\n\n request, headers = await self._parse_request(client_reader)\n scheme = self._identify_scheme(headers)\n client = id(client_reader)\n log.debug(\n 'client: %d; request: %s; headers: %s; scheme: %s'\n % (client, request, headers, scheme)\n )\n\n # API for controlling proxyhub2\n if headers['Host'] == 'proxycontrol':\n _api, _operation, _params = headers['Path'].split('/', 5)[3:]\n if _api == 'api':\n if _operation == 'remove':\n proxy_host, proxy_port = _params.split(':', 1)\n self._proxy_pool.remove(proxy_host, int(proxy_port))\n log.debug(\n 'Remove Proxy: client: %d; request: %s; headers: %s; scheme: %s; proxy_host: %s; proxy_port: %s'\n % (client, request, headers, scheme, proxy_host, proxy_port)\n )\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n elif _operation == 'history':\n query_type, url = _params.split(':', 1)\n if query_type == 'url':\n previous_proxy = history.get(\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{url}\"\n )\n if previous_proxy is None:\n client_writer.write(b'HTTP/1.1 204 No Content\\r\\n\\r\\n')\n await client_writer.drain()\n return\n else:\n previous_proxy_bytestring = (\n '{\"proxy\": \"%s\"}' % previous_proxy\n ).encode()\n client_writer.write(b'HTTP/1.1 200 OK\\r\\n')\n client_writer.write(b'Content-Type: application/json\\r\\n')\n client_writer.write(\n f\"Content-Length: {str(len(previous_proxy_bytestring) + 2).encode()}\\r\\n\"\n )\n client_writer.write(b'Access-Control-Allow-Origin: *\\r\\n')\n client_writer.write(\n b'Access-Control-Allow-Credentials: true\\r\\n\\r\\n'\n )\n\n client_writer.write(previous_proxy_bytestring + b'\\r\\n')\n await client_writer.drain()\n return\n\n for attempt in range(self._max_tries):\n stime, err = 0, None\n proxy = await self._proxy_pool.get(scheme)\n proto = self._choice_proto(proxy, scheme)\n log.debug(\n 'client: %d; attempt: %d; proxy: %s; proto: %s'\n % (client, attempt, proxy, proto)\n )\n\n try:\n await proxy.connect()\n\n if proto in ('CONNECT:80', 'SOCKS4', 'SOCKS5'):\n host = headers.get('Host')\n port = headers.get('Port', 80)\n try:\n ip = await self._resolver.resolve(host)\n except ResolveError:\n return\n proxy.ngtr = proto\n await proxy.ngtr.negotiate(host=host, port=port, ip=ip)\n if scheme == 'HTTPS' and proto in ('SOCKS4', 'SOCKS5'):\n client_writer.write(CONNECTED)\n await client_writer.drain()\n else: # HTTP\n await proxy.send(request)\n else: # proto: HTTP & HTTPS\n await proxy.send(request)\n\n history[\n f\"{client_reader._transport.get_extra_info('peername')[0]}-{headers['Path']}\"\n ] = (proxy.host + ':' + str(proxy.port))\n inject_resp_header = {\n 'headers': {'X-Proxy-Info': proxy.host + ':' + str(proxy.port)}\n }\n\n stime = time.time()\n stream = [\n asyncio.ensure_future(\n self._stream(reader=client_reader, writer=proxy.writer)\n ),\n asyncio.ensure_future(\n self._stream(\n reader=proxy.reader,\n writer=client_writer,\n scheme=scheme,\n inject=inject_resp_header,\n )\n ),\n ]\n await asyncio.gather(*stream, loop=self._loop)\n except asyncio.CancelledError:\n log.debug('Cancelled in server._handle')\n break\n except (\n ProxyTimeoutError,\n ProxyConnError,\n ProxyRecvError,\n ProxySendError,\n ProxyEmptyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n log.debug('client: %d; error: %r' % (client, e))\n continue\n except ErrorOnStream as e:\n log.debug(\n 'client: %d; error: %r; EOF: %s'\n % (client, e, client_reader.at_eof())\n )\n for task in stream:\n if not task.done():\n task.cancel()\n if client_reader.at_eof() and 'Timeout' in repr(e):\n # Proxy may not be able to receive EOF and weel be raised a\n # TimeoutError, but all the data has already successfully\n # returned, so do not consider this error of proxy\n break\n err = e\n if scheme == 'HTTPS': # SSL Handshake probably failed\n break\n else:\n break\n finally:\n proxy.log(request.decode(), stime, err=err)\n proxy.close()\n self._proxy_pool.put(proxy)\n\n async def _parse_request(self, reader, length=65536):\n request = await reader.read(length)\n headers = parse_headers(request)\n if headers['Method'] == 'POST' and request.endswith(b'\\r\\n\\r\\n'):\n # For aiohttp. POST data returns on second reading\n request += await reader.read(length)\n return request, headers\n\n def _identify_scheme(self, headers):\n if headers['Method'] == 'CONNECT':\n return 'HTTPS'\n else:\n return 'HTTP'\n\n def _choice_proto(self, proxy, scheme):\n if scheme == 'HTTP':\n if self._prefer_connect and ('CONNECT:80' in proxy.types):\n proto = 'CONNECT:80'\n else:\n relevant = {\n 'HTTP',\n 'CONNECT:80',\n 'SOCKS4',\n 'SOCKS5',\n } & proxy.types.keys()\n proto = relevant.pop()\n else: # HTTPS\n relevant = {'HTTPS', 'SOCKS4', 'SOCKS5'} & proxy.types.keys()\n proto = relevant.pop()\n return proto\n\n async def _stream(self, reader, writer, length=65536, scheme=None, inject=None):\n checked = False\n\n try:\n while not reader.at_eof():\n data = await asyncio.wait_for(reader.read(length), self._timeout)\n if not data:\n writer.close()\n break\n elif scheme and not checked:\n self._check_response(data, scheme)\n\n if inject.get('headers') is not None and len(inject['headers']) > 0:\n data = self._inject_headers(data, scheme, inject['headers'])\n\n checked = True\n\n writer.write(data)\n await writer.drain()\n\n except (\n asyncio.TimeoutError,\n ConnectionResetError,\n OSError,\n ProxyRecvError,\n BadStatusError,\n BadResponseError,\n ) as e:\n raise ErrorOnStream(e)\n\n def _check_response(self, data, scheme):\n if scheme == 'HTTP' and self._http_allowed_codes:\n line = data.split(b'\\r\\n', 1)[0].decode()\n try:\n header = parse_status_line(line)\n except BadStatusLine:\n raise BadResponseError\n if header['Status'] not in self._http_allowed_codes:\n raise BadStatusError(\n '%r not in %r' % (header['Status'], self._http_allowed_codes)\n )\n\n def _inject_headers(self, data, scheme, headers):\n custom_lines = []\n\n if scheme == 'HTTP' or scheme == 'HTTPS':\n status_line, rest_lines = data.split(b'\\r\\n', 1)\n custom_lines.append(status_line)\n\n for k, v in headers.items():\n custom_lines.append(('%s: %s' % (k, v)).encode())\n\n custom_lines.append(rest_lines)\n data = b'\\r\\n'.join(custom_lines)\n\n return data" }, { "identifier": "IPPortPatternLine", "path": "proxyhub/utils.py", "snippet": "BASE_DIR = getattr(sys, '_MEIPASS', os.path.dirname(os.path.abspath(__file__)))\nDATA_DIR = os.path.join(BASE_DIR, 'data')\ndef get_headers(rv=False):\ndef get_all_ip(page):\ndef get_status_code(resp, start=9, stop=12):\ndef parse_status_line(line):\ndef parse_headers(headers):\ndef update_geoip_db():" } ]
import asyncio import io import signal import warnings from collections import Counter, defaultdict from functools import partial from pprint import pprint from .checker import Checker from .errors import ResolveError from .providers import PROVIDERS, Provider from .proxy import Proxy from .resolver import Resolver from .server import Server from .utils import IPPortPatternLine, log
14,841
request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int strategy: (optional) The strategy used for picking proxy from pool. The default value is 'best' :param int min_queue: (optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs, ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done() async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try:
# Pause between grabbing cycles; in seconds. GRAB_PAUSE = 180 # The maximum number of providers that are parsed concurrently MAX_CONCURRENT_PROVIDERS = 3 class Broker: """The Broker. | One broker to rule them all, one broker to find them, | One broker to bring them all and in the darkness bind them. :param asyncio.Queue queue: (optional) Queue of found/checked proxies :param int timeout: (optional) Timeout of a request in seconds :param int max_conn: (optional) The maximum number of concurrent checks of proxies :param int max_tries: (optional) The maximum number of attempts to check a proxy :param list judges: (optional) Urls of pages that show HTTP headers and IP address. Or :class:`~proxyhub.judge.Judge` objects :param list providers: (optional) Urls of pages where to find proxies. Or :class:`~proxyhub.providers.Provider` objects :param bool verify_ssl: (optional) Flag indicating whether to check the SSL certificates. Set to True to check ssl certifications :param loop: (optional) asyncio compatible event loop :param stop_broker_on_sigint: (optional) whether set SIGINT signal on broker object. Useful for a thread other than main thread. .. deprecated:: 0.2.0 Use :attr:`max_conn` and :attr:`max_tries` instead of :attr:`max_concurrent_conn` and :attr:`attempts_conn`. """ def __init__( self, queue=None, timeout=8, max_conn=200, max_tries=3, judges=None, providers=None, verify_ssl=False, loop=None, stop_broker_on_sigint=True, **kwargs, ): self._loop = loop or asyncio.get_event_loop_policy().get_event_loop() self._proxies = queue or asyncio.Queue() self._resolver = Resolver(loop=self._loop) self._timeout = timeout self._verify_ssl = verify_ssl self.unique_proxies = {} self._all_tasks = [] self._checker = None self._server = None self._limit = 0 # not limited self._countries = None max_concurrent_conn = kwargs.get('max_concurrent_conn') if max_concurrent_conn: warnings.warn( '`max_concurrent_conn` is deprecated, use `max_conn` instead', DeprecationWarning, ) if isinstance(max_concurrent_conn, asyncio.Semaphore): max_conn = max_concurrent_conn._value else: max_conn = max_concurrent_conn attempts_conn = kwargs.get('attempts_conn') if attempts_conn: warnings.warn( '`attempts_conn` is deprecated, use `max_tries` instead', DeprecationWarning, ) max_tries = attempts_conn # The maximum number of concurrent checking proxies self._on_check = asyncio.Queue(maxsize=max_conn) self._max_tries = max_tries self._judges = judges self._providers = [ p if isinstance(p, Provider) else Provider(p) for p in (providers or PROVIDERS) ] if stop_broker_on_sigint: try: self._loop.add_signal_handler(signal.SIGINT, self.stop) # add_signal_handler() is not implemented on Win # https://docs.python.org/3.5/library/asyncio-eventloops.html#windows except NotImplementedError: pass async def grab(self, *, countries=None, limit=0): """Gather proxies from the providers without checking. :param list countries: (optional) List of ISO country codes where should be located proxies :param int limit: (optional) The maximum number of proxies :ref:`Example of usage <proxyhub-examples-grab>`. """ self._countries = countries self._limit = limit task = asyncio.ensure_future(self._grab(check=False)) self._all_tasks.append(task) async def find( self, *, types=None, data=None, countries=None, post=False, strict=False, dnsbl=None, limit=0, **kwargs, ): """Gather and check proxies from providers or from a passed data. :ref:`Example of usage <proxyhub-examples-find>`. :param list types: Types (protocols) that need to be check on support by proxy. Supported: HTTP, HTTPS, SOCKS4, SOCKS5, CONNECT:80, CONNECT:25 And levels of anonymity (HTTP only): Transparent, Anonymous, High :param data: (optional) String or list with proxies. Also can be a file-like object supports `read()` method. Used instead of providers :param list countries: (optional) List of ISO country codes where should be located proxies :param bool post: (optional) Flag indicating use POST instead of GET for requests when checking proxies :param bool strict: (optional) Flag indicating that anonymity levels of types (protocols) supported by a proxy must be equal to the requested types and levels of anonymity. By default, strict mode is off and for a successful check is enough to satisfy any one of the requested types :param list dnsbl: (optional) Spam databases for proxy checking. `Wiki <https://en.wikipedia.org/wiki/DNSBL>`_ :param int limit: (optional) The maximum number of proxies :raises ValueError: If :attr:`types` not given. .. versionchanged:: 0.2.0 Added: :attr:`post`, :attr:`strict`, :attr:`dnsbl`. Changed: :attr:`types` is required. """ ip = await self._resolver.get_real_ext_ip() types = _update_types(types) if not types: raise ValueError('`types` is required') self._checker = Checker( judges=self._judges, timeout=self._timeout, verify_ssl=self._verify_ssl, max_tries=self._max_tries, real_ext_ip=ip, types=types, post=post, strict=strict, dnsbl=dnsbl, loop=self._loop, ) self._countries = countries self._limit = limit tasks = [asyncio.ensure_future(self._checker.check_judges())] if data: task = asyncio.ensure_future(self._load(data, check=True)) else: task = asyncio.ensure_future(self._grab(types, check=True)) tasks.append(task) self._all_tasks.extend(tasks) def serve(self, host='127.0.0.1', port=8888, limit=100, **kwargs): """Start a local proxy server. The server distributes incoming requests to a pool of found proxies. When the server receives an incoming request, it chooses the optimal proxy (based on the percentage of errors and average response time) and passes to it the incoming request. In addition to the parameters listed below are also accept all the parameters of the :meth:`.find` method and passed it to gather proxies to a pool. :ref:`Example of usage <proxyhub-examples-server>`. :param str host: (optional) Host of local proxy server :param int port: (optional) Port of local proxy server :param int limit: (optional) When will be found a requested number of working proxies, checking of new proxies will be lazily paused. Checking will be resumed if all the found proxies will be discarded in the process of working with them (see :attr:`max_error_rate`, :attr:`max_resp_time`). And will continue until it finds one working proxy and paused again. The default value is 100 :param int max_tries: (optional) The maximum number of attempts to handle an incoming request. If not specified, it will use the value specified during the creation of the :class:`Broker` object. Attempts can be made with different proxies. The default value is 3 :param int strategy: (optional) The strategy used for picking proxy from pool. The default value is 'best' :param int min_queue: (optional) The minimum number of proxies to choose from before deciding which is the most suitable to use. The default value is 5 :param int min_req_proxy: (optional) The minimum number of processed requests to estimate the quality of proxy (in accordance with :attr:`max_error_rate` and :attr:`max_resp_time`). The default value is 5 :param int max_error_rate: (optional) The maximum percentage of requests that ended with an error. For example: 0.5 = 50%. If proxy.error_rate exceeds this value, proxy will be removed from the pool. The default value is 0.5 :param int max_resp_time: (optional) The maximum response time in seconds. If proxy.avg_resp_time exceeds this value, proxy will be removed from the pool. The default value is 8 :param bool prefer_connect: (optional) Flag that indicates whether to use the CONNECT method if possible. For example: If is set to True and a proxy supports HTTP proto (GET or POST requests) and CONNECT method, the server will try to use CONNECT method and only after that send the original request. The default value is False :param list http_allowed_codes: (optional) Acceptable HTTP codes returned by proxy on requests. If a proxy return code, not included in this list, it will be considered as a proxy error, not a wrong/unavailable address. For example, if a proxy will return a ``404 Not Found`` response - this will be considered as an error of a proxy. Checks only for HTTP protocol, HTTPS not supported at the moment. By default the list is empty and the response code is not verified :param int backlog: (optional) The maximum number of queued connections passed to listen. The default value is 100 :raises ValueError: If :attr:`limit` is less than or equal to zero. Because a parsing of providers will be endless .. versionadded:: 0.2.0 """ if limit <= 0: raise ValueError( 'In serve mode value of the limit cannot be less than or ' 'equal to zero. Otherwise, a parsing of providers will be ' 'endless' ) self._server = Server( host=host, port=port, proxies=self._proxies, timeout=self._timeout, max_tries=kwargs.pop('max_tries', self._max_tries), loop=self._loop, **kwargs, ) self._server.start() task = asyncio.ensure_future(self.find(limit=limit, **kwargs)) self._all_tasks.append(task) async def _load(self, data, check=True): """Looking for proxies in the passed data. Transform the passed data from [raw string | file-like object | list] to set {(host, port), ...}: {('192.168.0.1', '80'), } """ log.debug('Load proxies from the raw data') if isinstance(data, io.TextIOWrapper): data = data.read() if isinstance(data, str): data = IPPortPatternLine.findall(data) proxies = set(data) for proxy in proxies: await self._handle(proxy, check=check) await self._on_check.join() self._done() async def _grab(self, types=None, check=False): def _get_tasks(by=MAX_CONCURRENT_PROVIDERS): providers = [ pr for pr in self._providers if not types or not pr.proto or bool(pr.proto & types.keys()) ] while providers: tasks = [ asyncio.ensure_future(pr.get_proxies()) for pr in providers[:by] ] del providers[:by] self._all_tasks.extend(tasks) yield tasks log.debug('Start grabbing proxies') while True: for tasks in _get_tasks(): for task in asyncio.as_completed(tasks): proxies = await task for proxy in proxies: await self._handle(proxy, check=check) log.debug('Grab cycle is complete') if self._server: log.debug('fall asleep for %d seconds' % GRAB_PAUSE) await asyncio.sleep(GRAB_PAUSE) log.debug('awaked') else: break await self._on_check.join() self._done() async def _handle(self, proxy, check=False): try:
proxy = await Proxy.create(
4
2023-11-05 13:28:57+00:00
24k
TheFunny/ArisuAutoSweeper
module/webui/app.py
[ { "identifier": "AzurLaneConfig", "path": "module/config/config.py", "snippet": "class AzurLaneConfig(ConfigUpdater, ManualConfig, GeneratedConfig, ConfigWatcher):\n stop_event: threading.Event = None\n bound = {}\n\n # Class property\n is_hoarding_task = True\n\n def __setattr__(self, key, value):\n if key in self.bound:\n path = self.bound[key]\n self.modified[path] = value\n if self.auto_update:\n self.update()\n else:\n super().__setattr__(key, value)\n\n def __init__(self, config_name, task=None):\n logger.attr(\"Lang\", self.LANG)\n # This will read ./config/<config_name>.json\n self.config_name = config_name\n # Raw json data in yaml file.\n self.data = {}\n # Modified arguments. Key: Argument path in yaml file. Value: Modified value.\n # All variable modifications will be record here and saved in method `save()`.\n self.modified = {}\n # Key: Argument name in GeneratedConfig. Value: Path in `data`.\n self.bound = {}\n # If write after every variable modification.\n self.auto_update = True\n # Force override variables\n # Key: Argument name in GeneratedConfig. Value: Modified value.\n self.overridden = {}\n # Scheduler queue, will be updated in `get_next_task()`, list of Function objects\n # pending_task: Run time has been reached, but haven't been run due to task scheduling.\n # waiting_task: Run time haven't been reached, wait needed.\n self.pending_task = []\n self.waiting_task = []\n # Task to run and bind.\n # Task means the name of the function to run in AzurLaneAutoScript class.\n self.task: Function\n # Template config is used for dev tools\n self.is_template_config = config_name.startswith(\"template\")\n\n if self.is_template_config:\n # For dev tools\n logger.info(\"Using template config, which is read only\")\n self.auto_update = False\n self.task = name_to_function(\"template\")\n else:\n self.load()\n if task is None:\n # Bind `Alas` by default which includes emulator settings.\n task = name_to_function(\"Alas\")\n else:\n # Bind a specific task for debug purpose.\n task = name_to_function(task)\n self.bind(task)\n self.task = task\n self.save()\n\n def load(self):\n self.data = self.read_file(self.config_name)\n self.config_override()\n\n for path, value in self.modified.items():\n deep_set(self.data, keys=path, value=value)\n\n def bind(self, func, func_list=None):\n \"\"\"\n Args:\n func (str, Function): Function to run\n func_list (set): Set of tasks to be bound\n \"\"\"\n if func_list is None:\n func_list = [\"Alas\"]\n if isinstance(func, Function):\n func = func.command\n func_list.append(func)\n logger.info(f\"Bind task {func_list}\")\n\n # Bind arguments\n visited = set()\n self.bound.clear()\n for func in func_list:\n func_data = self.data.get(func, {})\n for group, group_data in func_data.items():\n for arg, value in group_data.items():\n path = f\"{group}.{arg}\"\n if path in visited:\n continue\n arg = path_to_arg(path)\n super().__setattr__(arg, value)\n self.bound[arg] = f\"{func}.{path}\"\n visited.add(path)\n\n # Override arguments\n for arg, value in self.overridden.items():\n super().__setattr__(arg, value)\n\n @property\n def hoarding(self):\n minutes = int(\n deep_get(\n self.data, keys=\"Alas.Optimization.TaskHoardingDuration\", default=0\n )\n )\n return timedelta(minutes=max(minutes, 0))\n\n @property\n def close_game(self):\n return deep_get(\n self.data, keys=\"Alas.Optimization.CloseGameDuringWait\", default=False\n )\n\n @cached_property\n def stored(self) -> StoredGenerated:\n stored = StoredGenerated()\n # Bind config\n for _, value in iter_attribute(stored):\n value._bind(self)\n del_cached_property(value, '_stored')\n return stored\n\n def get_next_task(self):\n \"\"\"\n Calculate tasks, set pending_task and waiting_task\n \"\"\"\n pending = []\n waiting = []\n error = []\n now = datetime.now()\n if AzurLaneConfig.is_hoarding_task:\n now -= self.hoarding\n for func in self.data.values():\n func = Function(func)\n if not func.enable:\n continue\n if not isinstance(func.next_run, datetime):\n error.append(func)\n elif func.next_run < now:\n pending.append(func)\n else:\n waiting.append(func)\n\n f = Filter(regex=r\"(.*)\", attr=[\"command\"])\n f.load(self.SCHEDULER_PRIORITY)\n if pending:\n pending = f.apply(pending)\n if waiting:\n waiting = f.apply(waiting)\n waiting = sorted(waiting, key=operator.attrgetter(\"next_run\"))\n if error:\n pending = error + pending\n\n self.pending_task = pending\n self.waiting_task = waiting\n\n def get_next(self):\n \"\"\"\n Returns:\n Function: Command to run\n \"\"\"\n self.get_next_task()\n\n if self.pending_task:\n AzurLaneConfig.is_hoarding_task = False\n logger.info(f\"Pending tasks: {[f.command for f in self.pending_task]}\")\n task = self.pending_task[0]\n logger.attr(\"Task\", task)\n return task\n else:\n AzurLaneConfig.is_hoarding_task = True\n\n if self.waiting_task:\n logger.info(\"No task pending\")\n task = copy.deepcopy(self.waiting_task[0])\n task.next_run = (task.next_run + self.hoarding).replace(microsecond=0)\n logger.attr(\"Task\", task)\n return task\n else:\n logger.critical(\"No task waiting or pending\")\n logger.critical(\"Please enable at least one task\")\n raise RequestHumanTakeover\n\n def save(self, mod_name='alas'):\n if not self.modified:\n return False\n\n for path, value in self.modified.items():\n deep_set(self.data, keys=path, value=value)\n\n logger.info(\n f\"Save config {filepath_config(self.config_name, mod_name)}, {dict_to_kv(self.modified)}\"\n )\n # Don't use self.modified = {}, that will create a new object.\n self.modified.clear()\n del_cached_property(self, 'stored')\n self.write_file(self.config_name, data=self.data)\n\n def update(self):\n self.load()\n self.config_override()\n self.bind(self.task)\n self.save()\n\n def config_override(self):\n now = datetime.now().replace(microsecond=0)\n limited = set()\n\n def limit_next_run(tasks, limit):\n for task in tasks:\n if task in limited:\n continue\n limited.add(task)\n next_run = deep_get(\n self.data, keys=f\"{task}.Scheduler.NextRun\", default=None\n )\n if isinstance(next_run, datetime) and next_run > limit:\n deep_set(self.data, keys=f\"{task}.Scheduler.NextRun\", value=now)\n\n limit_next_run(['BattlePass'], limit=now + timedelta(days=31, seconds=-1))\n limit_next_run(self.args.keys(), limit=now + timedelta(hours=24, seconds=-1))\n\n def override(self, **kwargs):\n \"\"\"\n Override anything you want.\n Variables stall remain overridden even config is reloaded from yaml file.\n Note that this method is irreversible.\n \"\"\"\n for arg, value in kwargs.items():\n self.overridden[arg] = value\n super().__setattr__(arg, value)\n\n def set_record(self, **kwargs):\n \"\"\"\n Args:\n **kwargs: For example, `Emotion1_Value=150`\n will set `Emotion1_Value=150` and `Emotion1_Record=now()`\n \"\"\"\n with self.multi_set():\n for arg, value in kwargs.items():\n record = arg.replace(\"Value\", \"Record\")\n self.__setattr__(arg, value)\n self.__setattr__(record, datetime.now().replace(microsecond=0))\n\n def multi_set(self):\n \"\"\"\n Set multiple arguments but save once.\n\n Examples:\n with self.config.multi_set():\n self.config.foo1 = 1\n self.config.foo2 = 2\n \"\"\"\n return MultiSetWrapper(main=self)\n\n def cross_get(self, keys, default=None):\n \"\"\"\n Get configs from other tasks.\n\n Args:\n keys (str, list[str]): Such as `{task}.Scheduler.Enable`\n default:\n\n Returns:\n Any:\n \"\"\"\n return deep_get(self.data, keys=keys, default=default)\n\n def cross_set(self, keys, value):\n \"\"\"\n Set configs to other tasks.\n\n Args:\n keys (str, list[str]): Such as `{task}.Scheduler.Enable`\n value (Any):\n\n Returns:\n Any:\n \"\"\"\n self.modified[keys] = value\n if self.auto_update:\n self.update()\n\n def task_delay(self, success=None, server_update=None, target=None, minute=None, task=None):\n \"\"\"\n Set Scheduler.NextRun\n Should set at least one arguments.\n If multiple arguments are set, use the nearest.\n\n Args:\n success (bool):\n If True, delay Scheduler.SuccessInterval\n If False, delay Scheduler.FailureInterval\n server_update (bool, list, str):\n If True, delay to nearest Scheduler.ServerUpdate\n If type is list or str, delay to such server update\n target (datetime.datetime, str, list):\n Delay to such time.\n minute (int, float, tuple):\n Delay several minutes.\n task (str):\n Set across task. None for current task.\n \"\"\"\n\n def ensure_delta(delay):\n return timedelta(seconds=int(ensure_time(delay, precision=3) * 60))\n\n run = []\n if success is not None:\n interval = (\n 120\n if success\n else 30\n )\n run.append(datetime.now() + ensure_delta(interval))\n if server_update is not None:\n if server_update is True:\n server_update = self.Scheduler_ServerUpdate\n run.append(get_server_next_update(server_update))\n if target is not None:\n target = [target] if not isinstance(target, list) else target\n target = nearest_future(target)\n run.append(target)\n if minute is not None:\n run.append(datetime.now() + ensure_delta(minute))\n\n if len(run):\n run = min(run).replace(microsecond=0)\n kv = dict_to_kv(\n {\n \"success\": success,\n \"server_update\": server_update,\n \"target\": target,\n \"minute\": minute,\n },\n allow_none=False,\n )\n if task is None:\n task = self.task.command\n logger.info(f\"Delay task `{task}` to {run} ({kv})\")\n self.modified[f'{task}.Scheduler.NextRun'] = run\n self.update()\n else:\n raise ScriptError(\n \"Missing argument in delay_next_run, should set at least one\"\n )\n\n def task_call(self, task, force_call=True):\n \"\"\"\n Call another task to run.\n\n That task will run when current task finished.\n But it might not be run because:\n - Other tasks should run first according to SCHEDULER_PRIORITY\n - Task is disabled by user\n\n Args:\n task (str): Task name to call, such as `Restart`\n force_call (bool):\n\n Returns:\n bool: If called.\n \"\"\"\n if deep_get(self.data, keys=f\"{task}.Scheduler.NextRun\", default=None) is None:\n raise ScriptError(f\"Task to call: `{task}` does not exist in user config\")\n\n if force_call or self.is_task_enabled(task):\n logger.info(f\"Task call: {task}\")\n self.modified[f\"{task}.Scheduler.NextRun\"] = datetime.now().replace(\n microsecond=0\n )\n self.modified[f\"{task}.Scheduler.Enable\"] = True\n if self.auto_update:\n self.update()\n return True\n else:\n logger.info(f\"Task call: {task} (skipped because disabled by user)\")\n return False\n\n @staticmethod\n def task_stop(message=\"\"):\n \"\"\"\n Stop current task.\n\n Raises:\n TaskEnd:\n \"\"\"\n if message:\n raise TaskEnd(message)\n else:\n raise TaskEnd\n\n def task_switched(self):\n \"\"\"\n Check if needs to switch task.\n\n Raises:\n bool: If task switched\n \"\"\"\n # Update event\n if self.stop_event is not None:\n if self.stop_event.is_set():\n return True\n prev = self.task\n self.load()\n new = self.get_next()\n if prev == new:\n logger.info(f\"Continue task `{new}`\")\n return False\n else:\n logger.info(f\"Switch task `{prev}` to `{new}`\")\n return True\n\n def check_task_switch(self, message=\"\"):\n \"\"\"\n Stop current task when task switched.\n\n Raises:\n TaskEnd:\n \"\"\"\n if self.task_switched():\n self.task_stop(message=message)\n\n def is_task_enabled(self, task):\n return bool(self.cross_get(keys=[task, 'Scheduler', 'Enable'], default=False))\n\n def update_daily_quests(self):\n \"\"\"\n Raises:\n TaskEnd: Call task `DailyQuest` and stop current task\n \"\"\"\n if self.stored.DailyActivity.is_expired():\n logger.info('DailyActivity expired, call task to update')\n self.task_call('DailyQuest')\n self.task_stop()\n if self.stored.DailyQuest.is_expired():\n logger.info('DailyQuest expired, call task to update')\n self.task_call('DailyQuest')\n self.task_stop()\n\n @property\n def DEVICE_SCREENSHOT_METHOD(self):\n return self.Emulator_ScreenshotMethod\n\n @property\n def DEVICE_CONTROL_METHOD(self):\n return self.Emulator_ControlMethod\n\n def temporary(self, **kwargs):\n \"\"\"\n Cover some settings, and recover later.\n\n Usage:\n backup = self.config.cover(ENABLE_DAILY_REWARD=False)\n # do_something()\n backup.recover()\n\n Args:\n **kwargs:\n\n Returns:\n ConfigBackup:\n \"\"\"\n backup = ConfigBackup(config=self)\n backup.cover(**kwargs)\n return backup" }, { "identifier": "Function", "path": "module/config/config.py", "snippet": "class Function:\n def __init__(self, data):\n self.enable = deep_get(data, keys=\"Scheduler.Enable\", default=False)\n self.command = deep_get(data, keys=\"Scheduler.Command\", default=\"Unknown\")\n self.next_run = deep_get(data, keys=\"Scheduler.NextRun\", default=DEFAULT_TIME)\n\n def __str__(self):\n enable = \"Enable\" if self.enable else \"Disable\"\n return f\"{self.command} ({enable}, {str(self.next_run)})\"\n\n __repr__ = __str__\n\n def __eq__(self, other):\n if not isinstance(other, Function):\n return False\n\n if self.command == other.command and self.next_run == other.next_run:\n return True\n else:\n return False" }, { "identifier": "alas_instance", "path": "module/config/utils.py", "snippet": "def alas_instance():\n \"\"\"\n Returns:\n list[str]: Name of all Alas instances, except `template`.\n \"\"\"\n out = []\n for file in os.listdir('./config'):\n name, extension = os.path.splitext(file)\n config_name, mod_name = os.path.splitext(name)\n mod_name = mod_name[1:]\n if name != 'template' and extension == '.json' and mod_name == '':\n out.append(name)\n\n # out.extend(mod_instance())\n\n if not len(out):\n out = ['aas']\n\n return out" }, { "identifier": "alas_template", "path": "module/config/utils.py", "snippet": "def alas_template():\n \"\"\"\n Returns:\n list[str]: Name of all Alas instances, except `template`.\n \"\"\"\n out = []\n for file in os.listdir('./config'):\n name, extension = os.path.splitext(file)\n if name == 'template' and extension == '.json':\n out.append(f'{name}-aas')\n\n # out.extend(mod_template())\n\n return out" }, { "identifier": "deep_get", "path": "module/config/utils.py", "snippet": "def deep_get(d, keys, default=None):\n \"\"\"\n Get values in dictionary safely.\n https://stackoverflow.com/questions/25833613/safe-method-to-get-value-of-nested-dictionary\n\n Args:\n d (dict):\n keys (str, list): Such as `Scheduler.NextRun.value`\n default: Default return if key not found.\n\n Returns:\n\n \"\"\"\n if isinstance(keys, str):\n keys = keys.split('.')\n assert type(keys) is list\n if d is None:\n return default\n if not keys:\n return d\n return deep_get(d.get(keys[0]), keys[1:], default)" }, { "identifier": "deep_iter", "path": "module/config/utils.py", "snippet": "def deep_iter(data, depth=0, current_depth=1):\n \"\"\"\n Iter a dictionary safely.\n\n Args:\n data (dict):\n depth (int): Maximum depth to iter\n current_depth (int):\n\n Returns:\n list: Key path\n Any:\n \"\"\"\n if isinstance(data, dict) \\\n and (depth and current_depth <= depth):\n for key, value in data.items():\n for child_path, child_value in deep_iter(value, depth=depth, current_depth=current_depth + 1):\n yield [key] + child_path, child_value\n else:\n yield [], data" }, { "identifier": "deep_set", "path": "module/config/utils.py", "snippet": "def deep_set(d, keys, value):\n \"\"\"\n Set value into dictionary safely, imitating deep_get().\n \"\"\"\n if isinstance(keys, str):\n keys = keys.split('.')\n assert type(keys) is list\n if not keys:\n return value\n if not isinstance(d, dict):\n d = {}\n d[keys[0]] = deep_set(d.get(keys[0], {}), keys[1:], value)\n return d" }, { "identifier": "dict_to_kv", "path": "module/config/utils.py", "snippet": "def dict_to_kv(dictionary, allow_none=True):\n \"\"\"\n Args:\n dictionary: Such as `{'path': 'Scheduler.ServerUpdate', 'value': True}`\n allow_none (bool):\n\n Returns:\n str: Such as `path='Scheduler.ServerUpdate', value=True`\n \"\"\"\n return ', '.join([f'{k}={repr(v)}' for k, v in dictionary.items() if allow_none or v is not None])" }, { "identifier": "filepath_args", "path": "module/config/utils.py", "snippet": "def filepath_args(filename='args', mod_name='alas'):\n return f'./module/config/argument/{filename}.json'" }, { "identifier": "filepath_config", "path": "module/config/utils.py", "snippet": "def filepath_config(filename, mod_name='alas'):\n if mod_name == 'alas':\n return os.path.join('./config', f'{filename}.json')\n else:\n return os.path.join('./config', f'{filename}.{mod_name}.json')" }, { "identifier": "read_file", "path": "module/config/utils.py", "snippet": "def read_file(file):\n \"\"\"\n Read a file, support both .yaml and .json format.\n Return empty dict if file not exists.\n\n Args:\n file (str):\n\n Returns:\n dict, list:\n \"\"\"\n folder = os.path.dirname(file)\n if not os.path.exists(folder):\n os.mkdir(folder)\n\n if not os.path.exists(file):\n return {}\n\n _, ext = os.path.splitext(file)\n lock = FileLock(f\"{file}.lock\")\n with lock:\n print(f'read: {file}')\n if ext == '.yaml':\n with open(file, mode='r', encoding='utf-8') as f:\n s = f.read()\n data = list(yaml.safe_load_all(s))\n if len(data) == 1:\n data = data[0]\n if not data:\n data = {}\n return data\n elif ext == '.json':\n with open(file, mode='r', encoding='utf-8') as f:\n s = f.read()\n return json.loads(s)\n else:\n print(f'Unsupported config file extension: {ext}')\n return {}" }, { "identifier": "logger", "path": "module/logger/logger.py", "snippet": "def empty_function(*args, **kwargs):\n def __init__(self, *args, func: Callable[[ConsoleRenderable], None] = None, **kwargs):\n def emit(self, record: logging.LogRecord) -> None:\n def handle(self, record: logging.LogRecord) -> bool:\n def options(self) -> ConsoleOptions:\ndef _set_file_logger(name=pyw_name):\ndef set_file_logger(name=pyw_name):\ndef set_func_logger(func):\ndef _get_renderables(\n self: Console, *objects, sep=\" \", end=\"\\n\", justify=None, emoji=None, markup=None, highlight=None,\n) -> List[ConsoleRenderable]:\ndef print(*objects: ConsoleRenderable, **kwargs):\ndef rule(title=\"\", *, characters=\"─\", style=\"rule.line\", end=\"\\n\", align=\"center\"):\ndef hr(title, level=3):\ndef attr(name, text):\ndef attr_align(name, text, front='', align=22):\ndef show():\ndef error_convert(func):\n def error_wrapper(msg, *args, **kwargs):\nclass RichFileHandler(RichHandler):\nclass RichRenderableHandler(RichHandler):\nclass HTMLConsole(Console):\nclass Highlighter(RegexHighlighter):\nWEB_THEME = Theme({\n \"web.brace\": Style(bold=True),\n \"web.bool_true\": Style(color=\"bright_green\", italic=True),\n \"web.bool_false\": Style(color=\"bright_red\", italic=True),\n \"web.none\": Style(color=\"magenta\", italic=True),\n \"web.path\": Style(color=\"magenta\"),\n \"web.filename\": Style(color=\"bright_magenta\"),\n \"web.str\": Style(color=\"green\", italic=False, bold=False),\n \"web.time\": Style(color=\"cyan\"),\n \"rule.text\": Style(bold=True),\n})" }, { "identifier": "Frame", "path": "module/webui/base.py", "snippet": "class Frame(Base):\n def __init__(self) -> None:\n super().__init__()\n self.page = \"Home\"\n\n def init_aside(self, expand_menu: bool = True, name: str = None) -> None:\n \"\"\"\n Call this in aside button callback function.\n Args:\n expand_menu: expand menu\n name: button name(label) to be highlight\n \"\"\"\n self.visible = True\n self.scope_clear()\n self.task_handler.remove_pending_task()\n clear(\"menu\")\n if expand_menu:\n self.expand_menu()\n if name:\n self.active_button(\"aside\", name)\n set_localstorage(\"aside\", name)\n\n def init_menu(self, collapse_menu: bool = True, name: str = None) -> None:\n \"\"\"\n Call this in menu button callback function.\n Args:\n collapse_menu: collapse menu\n name: button name(label) to be highlight\n \"\"\"\n self.visible = True\n self.page = name\n self.scope_clear()\n self.task_handler.remove_pending_task()\n clear(\"content\")\n if collapse_menu:\n self.collapse_menu()\n if name:\n self.active_button(\"menu\", name)\n\n @staticmethod\n @use_scope(\"ROOT\", clear=True)\n def _show() -> None:\n put_scope(\n \"header\",\n [\n put_html(Icon.ALAS).style(\"--header-icon--\"),\n put_text(\"AAS\").style(\"--header-text--\"),\n put_scope(\"header_status\"),\n put_scope(\"header_title\"),\n ],\n )\n put_scope(\n \"contents\",\n [\n put_scope(\"aside\"),\n put_scope(\"menu\"),\n put_scope(\"content\"),\n ],\n )\n\n @staticmethod\n @use_scope(\"header_title\", clear=True)\n def set_title(text=\"\"):\n put_text(text)\n\n @staticmethod\n def collapse_menu() -> None:\n run_js(\n f\"\"\"\n $(\"#pywebio-scope-menu\").addClass(\"container-menu-collapsed\");\n $(\".container-content-collapsed\").removeClass(\"container-content-collapsed\");\n \"\"\"\n )\n\n @staticmethod\n def expand_menu() -> None:\n run_js(\n f\"\"\"\n $(\".container-menu-collapsed\").removeClass(\"container-menu-collapsed\");\n $(\"#pywebio-scope-content\").addClass(\"container-content-collapsed\");\n \"\"\"\n )\n\n @staticmethod\n def active_button(position, value) -> None:\n run_js(\n f\"\"\"\n $(\"button.btn-{position}\").removeClass(\"btn-{position}-active\");\n $(\"div[style*='--{position}-{value}--']>button\").addClass(\"btn-{position}-active\");\n \"\"\"\n )\n\n @staticmethod\n def pin_set_invalid_mark(keys) -> None:\n if isinstance(keys, str):\n keys = [keys]\n keys = [\"_\".join(key.split(\".\")) for key in keys]\n js = \"\".join(\n [\n f\"\"\"$(\".form-control[name='{key}']\").addClass('is-invalid');\"\"\"\n for key in keys\n ]\n )\n if js:\n run_js(js)\n # for key in keys:\n # pin_update(key, valid_status=False)\n\n @staticmethod\n def pin_remove_invalid_mark(keys) -> None:\n if isinstance(keys, str):\n keys = [keys]\n keys = [\"_\".join(key.split(\".\")) for key in keys]\n js = \"\".join(\n [\n f\"\"\"$(\".form-control[name='{key}']\").removeClass('is-invalid');\"\"\"\n for key in keys\n ]\n )\n if js:\n run_js(js)\n # for key in keys:\n # pin_update(key, valid_status=0)" }, { "identifier": "get_config_mod", "path": "module/webui/fake.py", "snippet": "def get_config_mod(config_name):\n \"\"\"\n Args:\n config_name (str):\n \"\"\"\n return 'alas'" }, { "identifier": "load_config", "path": "module/webui/fake.py", "snippet": "def load_config(config_name):\n return AzurLaneConfig(config_name, '')" }, { "identifier": "asgi_app", "path": "module/webui/fastapi.py", "snippet": "def asgi_app(\n applications,\n cdn=True,\n static_dir=None,\n debug=False,\n allowed_origins=None,\n check_origin=None,\n **starlette_settings\n):\n debug = Session.debug = os.environ.get(\"PYWEBIO_DEBUG\", debug)\n cdn = cdn_validation(cdn, \"warn\")\n if cdn is False:\n cdn = \"pywebio_static\"\n routes = webio_routes(\n applications,\n cdn=cdn,\n allowed_origins=allowed_origins,\n check_origin=check_origin,\n )\n if static_dir:\n routes.append(\n Mount(\"/static\", app=StaticFiles(directory=static_dir), name=\"static\")\n )\n routes.append(\n Mount(\n \"/pywebio_static\",\n app=StaticFiles(directory=STATIC_PATH),\n name=\"pywebio_static\",\n )\n )\n middleware = [Middleware(HeaderMiddleware)]\n return Starlette(\n routes=routes, middleware=middleware, debug=debug, **starlette_settings\n )" }, { "identifier": "_t", "path": "module/webui/lang.py", "snippet": "def _t(s, lang=None):\n \"\"\"\n Get translation, ignore TRANSLATE_MODE\n \"\"\"\n if not lang:\n lang = LANG\n try:\n return dic_lang[lang][s]\n except KeyError:\n print(f\"Language key ({s}) not found\")\n return s" }, { "identifier": "t", "path": "module/webui/lang.py", "snippet": "def t(s, *args, **kwargs):\n \"\"\"\n Get translation.\n other args, kwargs pass to .format()\n \"\"\"\n if TRANSLATE_MODE:\n return s\n return _t(s, LANG).format(*args, **kwargs)" }, { "identifier": "put_input", "path": "module/webui/pin.py", "snippet": "def put_input(name, type='text', *, label='', value=None, placeholder=None, readonly=None, datalist=None,\n help_text=None, scope=None, position=OutputPosition.BOTTOM, **other_html_attrs) -> Output:\n \"\"\"Output an input widget. Refer to: `pywebio.input.input()`\"\"\"\n from pywebio.input import input\n check_dom_name_value(name, 'pin `name`')\n single_input_return = input(name=name, label=label, value=value, type=type, placeholder=placeholder,\n readonly=readonly, datalist=datalist, help_text=help_text, **other_html_attrs)\n return _pin_output(single_input_return, scope, position)" }, { "identifier": "put_select", "path": "module/webui/pin.py", "snippet": "def put_select(name, options=None, *, label='', multiple=None, value=None, help_text=None,\n scope=None, position=OutputPosition.BOTTOM, **other_html_attrs) -> Output:\n \"\"\"Output a select widget. Refer to: `pywebio.input.select()`\"\"\"\n from pywebio.input import select\n check_dom_name_value(name, 'pin `name`')\n single_input_return = select(name=name, options=options, label=label, multiple=multiple,\n value=value, help_text=help_text, **other_html_attrs)\n return _pin_output(single_input_return, scope, position)" }, { "identifier": "ProcessManager", "path": "module/webui/process_manager.py", "snippet": "class ProcessManager:\n _processes: Dict[str, \"ProcessManager\"] = {}\n\n def __init__(self, config_name: str = \"alas\") -> None:\n self.config_name = config_name\n self._renderable_queue: queue.Queue[ConsoleRenderable] = State.manager.Queue()\n self.renderables: List[ConsoleRenderable] = []\n self.renderables_max_length = 400\n self.renderables_reduce_length = 80\n self._process: Process = None\n self.thd_log_queue_handler: threading.Thread = None\n\n def start(self, func, ev: threading.Event = None) -> None:\n if not self.alive:\n if func is None:\n func = get_config_mod(self.config_name)\n self._process = Process(\n target=ProcessManager.run_process,\n args=(\n self.config_name,\n func,\n self._renderable_queue,\n ev,\n ),\n )\n self._process.start()\n self.start_log_queue_handler()\n\n def start_log_queue_handler(self):\n if (\n self.thd_log_queue_handler is not None\n and self.thd_log_queue_handler.is_alive()\n ):\n return\n self.thd_log_queue_handler = threading.Thread(\n target=self._thread_log_queue_handler\n )\n self.thd_log_queue_handler.start()\n\n def stop(self) -> None:\n lock = FileLock(f\"{filepath_config(self.config_name)}.lock\")\n with lock:\n if self.alive:\n self._process.kill()\n self.renderables.append(\n f\"[{self.config_name}] exited. Reason: Manual stop\\n\"\n )\n if self.thd_log_queue_handler is not None:\n self.thd_log_queue_handler.join(timeout=1)\n if self.thd_log_queue_handler.is_alive():\n logger.warning(\n \"Log queue handler thread does not stop within 1 seconds\"\n )\n logger.info(f\"[{self.config_name}] exited\")\n\n def _thread_log_queue_handler(self) -> None:\n while self.alive:\n try:\n log = self._renderable_queue.get(timeout=1)\n except queue.Empty:\n continue\n self.renderables.append(log)\n if len(self.renderables) > self.renderables_max_length:\n self.renderables = self.renderables[self.renderables_reduce_length :]\n logger.info(\"End of log queue handler loop\")\n\n @property\n def alive(self) -> bool:\n if self._process is not None:\n return self._process.is_alive()\n else:\n return False\n\n @property\n def state(self) -> int:\n if self.alive:\n return 1\n elif len(self.renderables) == 0:\n return 2\n else:\n console = Console(no_color=True)\n with console.capture() as capture:\n console.print(self.renderables[-1])\n s = capture.get().strip()\n if s.endswith(\"Reason: Manual stop\"):\n return 2\n elif s.endswith(\"Reason: Finish\"):\n return 2\n elif s.endswith(\"Reason: Update\"):\n return 4\n else:\n return 3\n\n @classmethod\n def get_manager(cls, config_name: str) -> \"ProcessManager\":\n \"\"\"\n Create a new alas if not exists.\n \"\"\"\n if config_name not in cls._processes:\n cls._processes[config_name] = ProcessManager(config_name)\n return cls._processes[config_name]\n\n @staticmethod\n def run_process(\n config_name, func: str, q: queue.Queue, e: threading.Event = None\n ) -> None:\n parser = argparse.ArgumentParser()\n parser.add_argument(\n \"--electron\", action=\"store_true\", help=\"Runs by electron client.\"\n )\n args, _ = parser.parse_known_args()\n State.electron = args.electron\n\n # Setup logger\n set_file_logger(name=config_name)\n if State.electron:\n # https://github.com/LmeSzinc/AzurLaneAutoScript/issues/2051\n logger.info(\"Electron detected, remove log output to stdout\")\n from module.logger.logger import console_hdlr\n logger.removeHandler(console_hdlr)\n set_func_logger(func=q.put)\n\n from module.config.config import AzurLaneConfig\n\n AzurLaneConfig.stop_event = e\n try:\n # Run alas\n if func == \"alas\":\n from module.alas import AzurLaneAutoScript\n from aas import ArisuAutoSweeper\n\n if e is not None:\n AzurLaneAutoScript.stop_event = e\n ArisuAutoSweeper(config_name=config_name).loop()\n else:\n logger.critical(f\"No function matched: {func}\")\n logger.info(f\"[{config_name}] exited. Reason: Finish\\n\")\n except Exception as e:\n logger.exception(e)\n\n @classmethod\n def running_instances(cls) -> List[\"ProcessManager\"]:\n l = []\n for process in cls._processes.values():\n if process.alive:\n l.append(process)\n return l\n\n @staticmethod\n def restart_processes(\n instances: List[Union[\"ProcessManager\", str]] = None, ev: threading.Event = None\n ):\n \"\"\"\n After update and reload, or failed to perform an update,\n restart all alas that running before update\n \"\"\"\n logger.hr(\"Restart alas\")\n\n # Load MOD_CONFIG_DICT\n mod_instance()\n\n if instances is None:\n instances = []\n\n _instances = set()\n\n for instance in instances:\n if isinstance(instance, str):\n _instances.add(ProcessManager.get_manager(instance))\n elif isinstance(instance, ProcessManager):\n _instances.add(instance)\n\n try:\n with open(\"./config/reloadalas\", mode=\"r\") as f:\n for line in f.readlines():\n line = line.strip()\n _instances.add(ProcessManager.get_manager(line))\n except FileNotFoundError:\n pass\n\n for process in _instances:\n logger.info(f\"Starting [{process.config_name}]\")\n process.start(func=get_config_mod(process.config_name), ev=ev)\n\n try:\n os.remove(\"./config/reloadalas\")\n except:\n pass\n logger.info(\"Start alas complete\")" }, { "identifier": "RemoteAccess", "path": "module/webui/remote_access.py", "snippet": "class RemoteAccess:\n @staticmethod\n def keep_ssh_alive():\n task_handler: TaskHandler\n task_handler = yield\n while True:\n if _ssh_thread is not None and _ssh_thread.is_alive():\n yield\n continue\n logger.info(\"Remote access service is not running, starting now\")\n try:\n start_remote_access_service()\n except ParseError as e:\n logger.exception(e)\n task_handler.remove_current_task()\n yield\n\n @staticmethod\n def kill_ssh_process():\n if RemoteAccess.is_alive():\n _ssh_process.kill()\n\n @staticmethod\n def is_alive():\n return (\n _ssh_thread is not None\n and _ssh_thread.is_alive()\n and _ssh_process is not None\n and _ssh_process.poll() is None\n )\n\n @staticmethod\n def get_state():\n if RemoteAccess.is_alive():\n if address is not None:\n return 1\n else:\n return 2\n elif _ssh_notfound:\n return 3\n else:\n return 0\n\n @staticmethod\n def get_entry_point():\n return address if RemoteAccess.is_alive() else None" }, { "identifier": "State", "path": "module/webui/setting.py", "snippet": "class State:\n \"\"\"\n Shared settings\n \"\"\"\n\n _init = False\n _clearup = False\n\n restart_event: threading.Event = None\n manager: SyncManager = None\n electron: bool = False\n theme: str = \"default\"\n\n @classmethod\n def init(cls):\n cls.manager = multiprocessing.Manager()\n cls._init = True\n\n @classmethod\n def clearup(cls):\n cls.manager.shutdown()\n cls._clearup = True\n\n @cached_class_property\n def deploy_config(self) -> \"DeployConfig\":\n \"\"\"\n Returns:\n DeployConfig:\n \"\"\"\n from module.webui.config import DeployConfig\n\n return DeployConfig()\n\n @cached_class_property\n def config_updater(self) -> \"ConfigUpdater\":\n \"\"\"\n Returns:\n ConfigUpdater:\n \"\"\"\n from module.config.config_updater import ConfigUpdater\n\n return ConfigUpdater()" }, { "identifier": "updater", "path": "module/webui/updater.py", "snippet": "class Updater(DeployConfig, GitManager, PipManager):\n def __init__(self, file=DEPLOY_CONFIG):\n def delay(self):\n def schedule_time(self):\n def execute_output(self, command) -> str:\n def get_commit(self, revision=\"\", n=1, short_sha1=False) -> Tuple:\n def _check_update(self) -> bool:\n def _check_update_(self) -> bool:\n def check_update(self):\n def git_install(self):\n def pip_install(self):\n def update(self):\n def run_update(self):\n def _start_update(self):\n def _wait_update(self, instances: List[ProcessManager], names):\n def _run_update(self, instances, names):\n def _trigger_reload(delay=2):\n def trigger():\n def schedule_update(self) -> Generator:\n def cancel(self):" }, { "identifier": "Icon", "path": "module/webui/utils.py", "snippet": "class Icon:\n \"\"\"\n Storage html of icon.\n \"\"\"\n\n ALAS = _read(filepath_icon(\"alas\"))\n SETTING = _read(filepath_icon(\"setting\"))\n RUN = _read(filepath_icon(\"run\"))\n DEVELOP = _read(filepath_icon(\"develop\"))\n ADD = _read(filepath_icon(\"add\"))" }, { "identifier": "Switch", "path": "module/webui/utils.py", "snippet": "class Switch:\n def __init__(self, status, get_state, name=None):\n \"\"\"\n Args:\n status\n (dict):A dict describes each state.\n {\n 0: {\n 'func': (Callable)\n },\n 1: {\n 'func'\n 'args': (Optional, tuple)\n 'kwargs': (Optional, dict)\n },\n 2: [\n func1,\n {\n 'func': func2\n 'args': args2\n }\n ]\n -1: []\n }\n (Callable):current state will pass into this function\n lambda state: do_update(state=state)\n get_state:\n (Callable):\n return current state\n (Generator):\n yield current state, do nothing when state not in status\n name:\n \"\"\"\n self._lock = threading.Lock()\n self.name = name\n self.status = status\n self.get_state = get_state\n if isinstance(get_state, Generator):\n self._generator = get_state\n elif isinstance(get_state, Callable):\n self._generator = self._get_state()\n\n @staticmethod\n def get_state():\n pass\n\n def _get_state(self):\n \"\"\"\n Predefined generator when `get_state` is an callable\n Customize it if you have multiple criteria on state\n \"\"\"\n _status = self.get_state()\n yield _status\n while True:\n status = self.get_state()\n if _status != status:\n _status = status\n yield _status\n continue\n yield -1\n\n def switch(self):\n with self._lock:\n r = next(self._generator)\n if callable(self.status):\n self.status(r)\n elif r in self.status:\n f = self.status[r]\n if isinstance(f, (dict, Callable)):\n f = [f]\n for d in f:\n if isinstance(d, Callable):\n d = {\"func\": d}\n func = d[\"func\"]\n args = d.get(\"args\", tuple())\n kwargs = d.get(\"kwargs\", dict())\n func(*args, **kwargs)\n\n def g(self) -> Generator:\n g = get_generator(self.switch)\n if self.name:\n name = self.name\n else:\n name = self.get_state.__name__\n g.__name__ = f\"Switch_{name}_refresh\"\n return g" }, { "identifier": "TaskHandler", "path": "module/webui/utils.py", "snippet": "class TaskHandler:\n def __init__(self) -> None:\n # List of background running task\n self.tasks: List[Task] = []\n # List of task name to be removed\n self.pending_remove_tasks: List[Task] = []\n # Running task\n self._task = None\n # Task running thread\n self._thread: threading.Thread = None\n self._alive = False\n self._lock = threading.Lock()\n\n def add(self, func, delay: float, pending_delete: bool = False) -> None:\n \"\"\"\n Add a task running background.\n Another way of `self.add_task()`.\n func: Callable or Generator\n \"\"\"\n if isinstance(func, Callable):\n g = get_generator(func)\n elif isinstance(func, Generator):\n g = func\n self.add_task(Task(g, delay), pending_delete=pending_delete)\n\n def add_task(self, task: Task, pending_delete: bool = False) -> None:\n \"\"\"\n Add a task running background.\n \"\"\"\n if task in self.tasks:\n logger.warning(f\"Task {task} already in tasks list.\")\n return\n logger.info(f\"Add task {task}\")\n with self._lock:\n self.tasks.append(task)\n if pending_delete:\n self.pending_remove_tasks.append(task)\n\n def _remove_task(self, task: Task) -> None:\n if task in self.tasks:\n self.tasks.remove(task)\n logger.info(f\"Task {task} removed.\")\n else:\n logger.warning(\n f\"Failed to remove task {task}. Current tasks list: {self.tasks}\"\n )\n\n def remove_task(self, task: Task, nowait: bool = False) -> None:\n \"\"\"\n Remove a task in `self.tasks`.\n Args:\n task:\n nowait: if True, remove it right now,\n otherwise remove when call `self.remove_pending_task`\n \"\"\"\n if nowait:\n with self._lock:\n self._remove_task(task)\n else:\n self.pending_remove_tasks.append(task)\n\n def remove_pending_task(self) -> None:\n \"\"\"\n Remove all pending remove tasks.\n \"\"\"\n with self._lock:\n for task in self.pending_remove_tasks:\n self._remove_task(task)\n self.pending_remove_tasks = []\n\n def remove_current_task(self) -> None:\n self.remove_task(self._task, nowait=True)\n\n def get_task(self, name) -> Task:\n with self._lock:\n for task in self.tasks:\n if task.name == name:\n return task\n return None\n\n def loop(self) -> None:\n \"\"\"\n Start task loop.\n You **should** run this function in an individual thread.\n \"\"\"\n self._alive = True\n while self._alive:\n if self.tasks:\n with self._lock:\n self.tasks.sort(key=operator.attrgetter(\"next_run\"))\n task = self.tasks[0]\n if task.next_run < time.time():\n start_time = time.time()\n try:\n self._task = task\n # logger.debug(f'Start task {task.g.__name__}')\n task.send(self)\n # logger.debug(f'End task {task.g.__name__}')\n except Exception as e:\n logger.exception(e)\n self.remove_task(task, nowait=True)\n finally:\n self._task = None\n end_time = time.time()\n task.next_run += task.delay\n with self._lock:\n for task in self.tasks:\n task.next_run += end_time - start_time\n else:\n time.sleep(0.05)\n else:\n time.sleep(0.5)\n logger.info(\"End of task handler loop\")\n\n def _get_thread(self) -> threading.Thread:\n thread = threading.Thread(target=self.loop, daemon=True)\n return thread\n\n def start(self) -> None:\n \"\"\"\n Start task handler.\n \"\"\"\n logger.info(\"Start task handler\")\n if self._thread is not None and self._thread.is_alive():\n logger.warning(\"Task handler already running!\")\n return\n self._thread = self._get_thread()\n self._thread.start()\n\n def stop(self) -> None:\n self.remove_pending_task()\n self._alive = False\n self._thread.join(timeout=2)\n if not self._thread.is_alive():\n logger.info(\"Finish task handler\")\n else:\n logger.warning(\"Task handler does not stop within 2 seconds\")" }, { "identifier": "add_css", "path": "module/webui/utils.py", "snippet": "def add_css(filepath):\n with open(filepath, \"r\") as f:\n css = f.read().replace(\"\\n\", \"\")\n run_js(f\"\"\"$('head').append('<style>{css}</style>')\"\"\")" }, { "identifier": "filepath_css", "path": "module/webui/utils.py", "snippet": "def filepath_css(filename):\n return f\"./assets/gui/css/{filename}.css\"" }, { "identifier": "get_alas_config_listen_path", "path": "module/webui/utils.py", "snippet": "def get_alas_config_listen_path(args):\n for path, d in deep_iter(args, depth=3):\n if d.get(\"display\") in [\"readonly\", \"hide\"]:\n continue\n yield path" }, { "identifier": "get_localstorage", "path": "module/webui/utils.py", "snippet": "def get_localstorage(key):\n return eval_js(\"localStorage.getItem(key)\", key=key)" }, { "identifier": "get_window_visibility_state", "path": "module/webui/utils.py", "snippet": "def get_window_visibility_state():\n ret = eval_js(\"document.visibilityState\")\n return False if ret == \"hidden\" else True" }, { "identifier": "login", "path": "module/webui/utils.py", "snippet": "def login(password):\n if get_localstorage(\"password\") == str(password):\n return True\n pwd = input(label=\"Please login below.\", type=PASSWORD, placeholder=\"PASSWORD\")\n if str(pwd) == str(password):\n set_localstorage(\"password\", str(pwd))\n return True\n else:\n toast(\"Wrong password!\", color=\"error\")\n return False" }, { "identifier": "parse_pin_value", "path": "module/webui/utils.py", "snippet": "def parse_pin_value(val, valuetype: str = None):\n \"\"\"\n input, textarea return str\n select return its option (str or int)\n checkbox return [] or [True] (define in put_checkbox_)\n \"\"\"\n if isinstance(val, list):\n if len(val) == 0:\n return False\n else:\n return True\n elif valuetype:\n return str2type[valuetype](val)\n elif isinstance(val, (int, float)):\n return val\n else:\n try:\n v = float(val)\n except ValueError:\n return val\n if v.is_integer():\n return int(v)\n else:\n return v" }, { "identifier": "raise_exception", "path": "module/webui/utils.py", "snippet": "def raise_exception(x=3):\n \"\"\"\n For testing purpose\n \"\"\"\n if x > 0:\n raise_exception(x - 1)\n else:\n raise Exception(\"quq\")" }, { "identifier": "re_fullmatch", "path": "module/webui/utils.py", "snippet": "def re_fullmatch(pattern, string):\n if pattern == \"datetime\":\n try:\n datetime.datetime.fromisoformat(string)\n return True\n except ValueError:\n return False\n # elif:\n return re.fullmatch(pattern=pattern, string=string)" }, { "identifier": "BinarySwitchButton", "path": "module/webui/widgets.py", "snippet": "class ScrollableCode:\nclass RichLog:\nclass BinarySwitchButton(Switch):\n def __init__(self, keep_bottom: bool = True) -> None:\n def output(self):\n def append(self, text: str) -> None:\n def scroll(self) -> None:\n def reset(self) -> None:\n def set_scroll(self, b: bool) -> None:\n def __init__(self, scope, font_width=\"0.559\") -> None:\n def render(self, renderable: ConsoleRenderable) -> str:\n def extend(self, text):\n def reset(self):\n def scroll(self) -> None:\n def set_scroll(self, b: bool) -> None:\n def get_width(self):\n def put_log(self, pm: ProcessManager) -> Generator:\n def __init__(\n self,\n get_state,\n label_on,\n label_off,\n onclick_on,\n onclick_off,\n scope,\n color_on=\"success\",\n color_off=\"secondary\",\n ):\n def update_button(self, label, onclick, color):\ndef put_icon_buttons(\n icon_html: str,\n buttons: List[Dict[str, str]],\n onclick: Union[List[Callable[[], None]], Callable[[], None]],\n) -> Output:\ndef put_none() -> Output:\ndef get_title_help(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_input(kwargs: T_Output_Kwargs) -> Output:\ndef product_stored_row(kwargs: T_Output_Kwargs, key, value):\ndef put_arg_stored(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_select(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_state(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_textarea(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_checkbox(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_datetime(kwargs: T_Output_Kwargs) -> Output:\ndef put_arg_storage(kwargs: T_Output_Kwargs) -> Optional[Output]:\n def clear_callback():\ndef put_output(output_kwargs: T_Output_Kwargs) -> Optional[Output]:\ndef get_loading_style(shape: str, fill: bool) -> str:\ndef put_loading_text(\n text: str,\n shape: str = \"border\",\n color: str = \"dark\",\n fill: bool = False,\n size: str = \"auto 2px 1fr\",\n):" } ]
import argparse import queue import threading import time import module.webui.lang as lang from datetime import datetime from functools import partial from typing import Dict, List, Optional from pywebio import config as webconfig from pywebio.output import ( Output, clear, close_popup, popup, put_button, put_buttons, put_collapse, put_column, put_error, put_html, put_link, put_loading, put_markdown, put_row, put_scope, put_table, put_text, put_warning, toast, use_scope, ) from pywebio.pin import pin, pin_on_change from pywebio.session import go_app, info, local, register_thread, run_js, set_env from module.config.config import AzurLaneConfig, Function from module.config.utils import ( alas_instance, alas_template, deep_get, deep_iter, deep_set, dict_to_kv, filepath_args, filepath_config, read_file, ) from module.logger import logger from module.webui.base import Frame from module.webui.fake import ( get_config_mod, load_config, ) from module.webui.fastapi import asgi_app from module.webui.lang import _t, t from module.webui.pin import put_input, put_select from module.webui.process_manager import ProcessManager from module.webui.remote_access import RemoteAccess from module.webui.setting import State from module.webui.updater import updater from module.webui.utils import ( Icon, Switch, TaskHandler, add_css, filepath_css, get_alas_config_listen_path, get_localstorage, get_window_visibility_state, login, parse_pin_value, raise_exception, re_fullmatch, ) from module.webui.widgets import ( BinarySwitchButton, RichLog, T_Output_Kwargs, put_icon_buttons, put_loading_text, put_none, put_output, )
15,235
else: name = arg color = arg_dict.get("color", "#777777") nodata = t("Gui.Dashboard.NoData") def set_value(dic): if "total" in dic.get("attrs", []) and config.get("total") is not None: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), put_text(f' / {config.get("total", "")}').style("--dashboard-time--"), ] else: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), ] with use_scope(f"dashboard-row-{arg}", clear=True): put_html(f'<div><div class="dashboard-icon" style="background-color:{color}"></div>'), put_scope(f"dashboard-content-{arg}", [ put_scope(f"dashboard-value-{arg}", set_value(arg_dict)), put_scope(f"dashboard-time-{arg}", [ put_text(f"{name} - {lang.readable_time(config.get('time', ''))}").style("--dashboard-time--"), ]) ]) @use_scope("content", clear=True) def alas_overview(self) -> None: self.init_menu(name="Overview") self.set_title(t(f"Gui.MenuAlas.Overview")) put_scope("overview", [put_scope("schedulers"), put_scope("logs")]) with use_scope("schedulers"): put_scope( "scheduler-bar", [ put_text(t("Gui.Overview.Scheduler")).style( "font-size: 1.25rem; margin: auto .5rem auto;" ), put_scope("scheduler_btn"), ], ) put_scope( "running", [ put_text(t("Gui.Overview.Running")), put_html('<hr class="hr-group">'), put_scope("running_tasks"), ], ) put_scope( "pending", [ put_text(t("Gui.Overview.Pending")), put_html('<hr class="hr-group">'), put_scope("pending_tasks"), ], ) put_scope( "waiting", [ put_text(t("Gui.Overview.Waiting")), put_html('<hr class="hr-group">'), put_scope("waiting_tasks"), ], ) switch_scheduler = BinarySwitchButton( label_on=t("Gui.Button.Stop"), label_off=t("Gui.Button.Start"), onclick_on=lambda: self.alas.stop(), onclick_off=lambda: self.alas.start(None, updater.event), get_state=lambda: self.alas.alive, color_on="off", color_off="on", scope="scheduler_btn", ) log = RichLog("log") with use_scope("logs"): put_scope("log-bar", [ put_scope("log-title", [ put_text(t("Gui.Overview.Log")).style("font-size: 1.25rem; margin: auto .5rem auto;"), put_scope("log-title-btns", [ put_scope("log_scroll_btn"), ]), ]), put_html('<hr class="hr-group">'), put_scope("dashboard", [ # Empty dashboard, values will be updated in alas_update_overview_task() put_scope(f"dashboard-row-{arg}", []) for arg in self.ALAS_STORED.keys() if deep_get(self.ALAS_STORED, keys=[arg, "order"], default=0) # Empty content to left-align last row ] + [put_html("<i></i>")] * min(len(self.ALAS_STORED), 4)) ]) put_scope("log", [put_html("")]) log.console.width = log.get_width() switch_log_scroll = BinarySwitchButton( label_on=t("Gui.Button.ScrollON"), label_off=t("Gui.Button.ScrollOFF"), onclick_on=lambda: log.set_scroll(False), onclick_off=lambda: log.set_scroll(True), get_state=lambda: log.keep_bottom, color_on="on", color_off="off", scope="log_scroll_btn", ) self.task_handler.add(switch_scheduler.g(), 1, True) self.task_handler.add(switch_log_scroll.g(), 1, True) self.task_handler.add(self.alas_update_overview_task, 10, True) self.task_handler.add(log.put_log(self.alas), 0.25, True) def _init_alas_config_watcher(self) -> None: def put_queue(path, value): self.modified_config_queue.put({"name": path, "value": value})
task_handler = TaskHandler() class AlasGUI(Frame): ALAS_MENU: Dict[str, Dict[str, List[str]]] ALAS_ARGS: Dict[str, Dict[str, Dict[str, Dict[str, str]]]] ALAS_STORED: Dict[str, Dict[str, Dict[str, str]]] theme = "default" def initial(self) -> None: self.ALAS_MENU = read_file(filepath_args("menu", self.alas_mod)) self.ALAS_ARGS = read_file(filepath_args("args", self.alas_mod)) self.ALAS_STORED = read_file(filepath_args("stored", self.alas_mod)) self._init_alas_config_watcher() def __init__(self) -> None: super().__init__() # modified keys, return values of pin_wait_change() self.modified_config_queue = queue.Queue() # alas config name self.alas_name = "" self.alas_mod = "alas" self.alas_config = AzurLaneConfig("template") self.initial() @use_scope("aside", clear=True) def set_aside(self) -> None: # TODO: update put_icon_buttons() put_icon_buttons( Icon.DEVELOP, buttons=[ {"label": t("Gui.Aside.Home"), "value": "Home", "color": "aside"} ], onclick=[self.ui_develop], ), for name in alas_instance(): put_icon_buttons( Icon.RUN, buttons=[{"label": name, "value": name, "color": "aside"}], onclick=self.ui_alas, ) put_icon_buttons( Icon.ADD, buttons=[ {"label": t("Gui.Aside.AddAlas"), "value": "AddAlas", "color": "aside"} ], onclick=[self.ui_add_alas], ), @use_scope("header_status") def set_status(self, state: int) -> None: """ Args: state (int): 1 (running) 2 (not running) 3 (warning, stop unexpectedly) 4 (stop for update) 0 (hide) -1 (*state not changed) """ if state == -1: return clear() if state == 1: put_loading_text(t("Gui.Status.Running"), color="success") elif state == 2: put_loading_text(t("Gui.Status.Inactive"), color="secondary", fill=True) elif state == 3: put_loading_text(t("Gui.Status.Warning"), shape="grow", color="warning") elif state == 4: put_loading_text(t("Gui.Status.Updating"), shape="grow", color="success") @classmethod def set_theme(cls, theme="default") -> None: cls.theme = theme State.deploy_config.Theme = theme State.theme = theme webconfig(theme=theme) @use_scope("menu", clear=True) def alas_set_menu(self) -> None: """ Set menu """ put_buttons( [{ "label": t("Gui.MenuAlas.Overview"), "value": "Overview", "color": "menu", }], onclick=[self.alas_overview], ).style(f"--menu-Overview--") for menu, task_data in self.ALAS_MENU.items(): if task_data.get("page") == "tool": _onclick = self.alas_daemon_overview else: _onclick = self.alas_set_group if task_data.get("menu") == "collapse": task_btn_list = [ put_buttons( [{ "label": t(f"Task.{task}.name"), "value": task, "color": "menu", }], onclick=_onclick, ).style(f"--menu-{task}--") for task in task_data.get("tasks", []) ] put_collapse(title=t(f"Menu.{menu}.name"), content=task_btn_list) else: title = t(f"Menu.{menu}.name") put_html('<div class="hr-task-group-box">' '<span class="hr-task-group-line"></span>' f'<span class="hr-task-group-text">{title}</span>' '<span class="hr-task-group-line"></span>' '</div>' ) for task in task_data.get("tasks", []): put_buttons( [{ "label": t(f"Task.{task}.name"), "value": task, "color": "menu", }], onclick=_onclick, ).style(f"--menu-{task}--").style(f"padding-left: 0.75rem") self.alas_overview() @use_scope("content", clear=True) def alas_set_group(self, task: str) -> None: """ Set arg groups from dict """ self.init_menu(name=task) self.set_title(t(f"Task.{task}.name")) put_scope("_groups", [put_none(), put_scope("groups"), put_scope("navigator")]) task_help: str = t(f"Task.{task}.help") if task_help: put_scope( "group__info", scope="groups", content=[put_text(task_help).style("font-size: 1rem")], ) config = self.alas_config.read_file(self.alas_name) for group, arg_dict in deep_iter(self.ALAS_ARGS[task], depth=1): if self.set_group(group, arg_dict, config, task): self.set_navigator(group) @use_scope("groups") def set_group(self, group, arg_dict, config, task): group_name = group[0] output_list: List[Output] = [] for arg, arg_dict in deep_iter(arg_dict, depth=1): output_kwargs: T_Output_Kwargs = arg_dict.copy() # Skip hide display: Optional[str] = output_kwargs.pop("display", None) if display == "hide": continue # Disable elif display == "disabled": output_kwargs["disabled"] = True # Output type output_kwargs["widget_type"] = output_kwargs.pop("type") arg_name = arg[0] # [arg_name,] # Internal pin widget name output_kwargs["name"] = f"{task}_{group_name}_{arg_name}" # Display title output_kwargs["title"] = t(f"{group_name}.{arg_name}.name") # Get value from config value = deep_get( config, [task, group_name, arg_name], output_kwargs["value"] ) # idk value = str(value) if isinstance(value, datetime) else value # Default value output_kwargs["value"] = value # Options output_kwargs["options"] = options = output_kwargs.pop("option", []) # Options label options_label = [] for opt in options: options_label.append(t(f"{group_name}.{arg_name}.{opt}")) output_kwargs["options_label"] = options_label # Help arg_help = t(f"{group_name}.{arg_name}.help") if arg_help == "" or not arg_help: arg_help = None output_kwargs["help"] = arg_help # Invalid feedback output_kwargs["invalid_feedback"] = t("Gui.Text.InvalidFeedBack", value) o = put_output(output_kwargs) if o is not None: # output will inherit current scope when created, override here o.spec["scope"] = f"#pywebio-scope-group_{group_name}" output_list.append(o) if not output_list: return 0 with use_scope(f"group_{group_name}"): put_text(t(f"{group_name}._info.name")) group_help = t(f"{group_name}._info.help") if group_help != "": put_text(group_help) put_html('<hr class="hr-group">') for output in output_list: output.show() return len(output_list) @use_scope("navigator") def set_navigator(self, group): js = f""" $("#pywebio-scope-groups").scrollTop( $("#pywebio-scope-group_{group[0]}").position().top + $("#pywebio-scope-groups").scrollTop() - 59 ) """ put_button( label=t(f"{group[0]}._info.name"), onclick=lambda: run_js(js), color="navigator", ) def set_dashboard(self, arg, arg_dict, config): i18n = arg_dict.get('i18n') if i18n: name = t(i18n) else: name = arg color = arg_dict.get("color", "#777777") nodata = t("Gui.Dashboard.NoData") def set_value(dic): if "total" in dic.get("attrs", []) and config.get("total") is not None: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), put_text(f' / {config.get("total", "")}').style("--dashboard-time--"), ] else: return [ put_text(config.get("value", nodata)).style("--dashboard-value--"), ] with use_scope(f"dashboard-row-{arg}", clear=True): put_html(f'<div><div class="dashboard-icon" style="background-color:{color}"></div>'), put_scope(f"dashboard-content-{arg}", [ put_scope(f"dashboard-value-{arg}", set_value(arg_dict)), put_scope(f"dashboard-time-{arg}", [ put_text(f"{name} - {lang.readable_time(config.get('time', ''))}").style("--dashboard-time--"), ]) ]) @use_scope("content", clear=True) def alas_overview(self) -> None: self.init_menu(name="Overview") self.set_title(t(f"Gui.MenuAlas.Overview")) put_scope("overview", [put_scope("schedulers"), put_scope("logs")]) with use_scope("schedulers"): put_scope( "scheduler-bar", [ put_text(t("Gui.Overview.Scheduler")).style( "font-size: 1.25rem; margin: auto .5rem auto;" ), put_scope("scheduler_btn"), ], ) put_scope( "running", [ put_text(t("Gui.Overview.Running")), put_html('<hr class="hr-group">'), put_scope("running_tasks"), ], ) put_scope( "pending", [ put_text(t("Gui.Overview.Pending")), put_html('<hr class="hr-group">'), put_scope("pending_tasks"), ], ) put_scope( "waiting", [ put_text(t("Gui.Overview.Waiting")), put_html('<hr class="hr-group">'), put_scope("waiting_tasks"), ], ) switch_scheduler = BinarySwitchButton( label_on=t("Gui.Button.Stop"), label_off=t("Gui.Button.Start"), onclick_on=lambda: self.alas.stop(), onclick_off=lambda: self.alas.start(None, updater.event), get_state=lambda: self.alas.alive, color_on="off", color_off="on", scope="scheduler_btn", ) log = RichLog("log") with use_scope("logs"): put_scope("log-bar", [ put_scope("log-title", [ put_text(t("Gui.Overview.Log")).style("font-size: 1.25rem; margin: auto .5rem auto;"), put_scope("log-title-btns", [ put_scope("log_scroll_btn"), ]), ]), put_html('<hr class="hr-group">'), put_scope("dashboard", [ # Empty dashboard, values will be updated in alas_update_overview_task() put_scope(f"dashboard-row-{arg}", []) for arg in self.ALAS_STORED.keys() if deep_get(self.ALAS_STORED, keys=[arg, "order"], default=0) # Empty content to left-align last row ] + [put_html("<i></i>")] * min(len(self.ALAS_STORED), 4)) ]) put_scope("log", [put_html("")]) log.console.width = log.get_width() switch_log_scroll = BinarySwitchButton( label_on=t("Gui.Button.ScrollON"), label_off=t("Gui.Button.ScrollOFF"), onclick_on=lambda: log.set_scroll(False), onclick_off=lambda: log.set_scroll(True), get_state=lambda: log.keep_bottom, color_on="on", color_off="off", scope="log_scroll_btn", ) self.task_handler.add(switch_scheduler.g(), 1, True) self.task_handler.add(switch_log_scroll.g(), 1, True) self.task_handler.add(self.alas_update_overview_task, 10, True) self.task_handler.add(log.put_log(self.alas), 0.25, True) def _init_alas_config_watcher(self) -> None: def put_queue(path, value): self.modified_config_queue.put({"name": path, "value": value})
for path in get_alas_config_listen_path(self.ALAS_ARGS):
29
2023-11-01 07:09:45+00:00
24k
radekd91/inferno
inferno/models/DECA.py
[ { "identifier": "EmoNetLoss", "path": "inferno/layers/losses/EmoNetLoss.py", "snippet": "class EmoNetLoss(EmoLossBase):\n# class EmoNetLoss(object):\n\n def __init__(self, device, emonet=None, trainable=False, normalize_features=False, emo_feat_loss=None, au_loss=None):\n if emonet is None:\n emonet = get_emonet(device).eval()\n\n last_feature_size = 256 # TODO: fix this hardcoded number, get it from EmoNet class instead\n if isinstance(emo_feat_loss, dict ) and \"barlow_twins\" in emo_feat_loss[\"type\"]:\n # if barlow twins, we need to know the feature size\n emo_feat_loss[\"feature_size\"] = last_feature_size\n\n super().__init__(trainable, normalize_features=normalize_features, emo_feat_loss=emo_feat_loss, au_loss=au_loss,\n last_feature_size=last_feature_size)\n self.emonet = emonet\n\n # elif isinstance(emonet, str):\n # path = Path(emonet)\n # if path.is_dir():\n # print(f\"Loading trained EmoNet from: '{path}'\")\n # def load_configs(run_path):\n # from omegaconf import OmegaConf\n # with open(Path(run_path) / \"cfg.yaml\", \"r\") as f:\n # conf = OmegaConf.load(f)\n # return conf\n #\n # cfg = load_configs(path)\n # checkpoint_mode = 'best'\n # stages_prefixes = \"\"\n #\n # checkpoint, checkpoint_kwargs = get_checkpoint_with_kwargs(cfg, stages_prefixes,\n # checkpoint_mode=checkpoint_mode,\n # # relative_to=relative_to_path,\n # # replace_root=replace_root_path\n # )\n # checkpoint_kwargs = checkpoint_kwargs or {}\n # emonet_module = EmoNetModule.load_from_checkpoint(checkpoint_path=checkpoint, strict=False, **checkpoint_kwargs)\n # self.emonet = emonet_module.backbone\n # else:\n # raise ValueError(\"Please specify the directory which contains the config of the trained Emonet.\")\n\n # else:\n # self.emonet = emonet\n\n if not trainable:\n self.emonet.eval()\n self.emonet.requires_grad_(False)\n else:\n self.emonet.train()\n self.emonet.emo_parameters_requires_grad(True)\n\n # self.emonet.eval()\n # self.emonet = self.emonet.requires_grad_(False)\n # self.transforms = Resize((256, 256))\n self.size = (256, 256)\n # self.emo_feat_loss = F.l1_loss\n # self.valence_loss = F.l1_loss\n # self.arousal_loss = F.l1_loss\n # # self.expression_loss = F.kl_div\n # self.expression_loss = F.l1_loss\n # self.input_emotion = None\n # self.output_emotion = None\n\n @property\n def network(self):\n return self.emonet\n\n def to(self, *args, **kwargs):\n self.emonet = self.emonet.to(*args, **kwargs)\n # self.emonet = self.emonet.requires_grad_(False)\n # for p in self.emonet.parameters():\n # p.requires_grad = False\n\n def eval(self):\n self.emonet = self.emonet.eval()\n # self.emonet = self.emonet.requires_grad_(False)\n # for p in self.emonet.parameters():\n # p.requires_grad = False\n\n def train(self, mode: bool = True):\n super().train(mode)\n if hasattr(self, 'emonet'):\n self.emonet = self.emonet.eval() # evaluation mode no matter what, it's just a loss function\n # self.emonet = self.emonet.requires_grad_(False)\n # for p in self.emonet.parameters():\n # p.requires_grad = False\n\n def forward(self, predicted, target, *args, **kwargs):\n res = self.compute_loss(target, predicted, *args, **kwargs)\n feat_2_loss = res[1]\n return feat_2_loss\n\n def emonet_out(self, images):\n images = F.interpolate(images, self.size, mode='bilinear')\n # images = self.transform(images)\n return self.emonet(images, intermediate_features=True)\n\n\n def _get_trainable_params(self):\n if self.trainable:\n return self.emonet.emo_parameters\n return []" }, { "identifier": "create_emo_loss", "path": "inferno/layers/losses/EmoNetLoss.py", "snippet": "def create_emo_loss(device, emoloss = None, trainable=False, dual=False, normalize_features=False, emo_feat_loss=None):\n if emoloss is None:\n return EmoNetLoss(device, emonet=emoloss)\n if isinstance(emoloss, str):\n path = Path(emoloss)\n if not path.is_absolute():\n path = Path(get_path_to_assets()) / path\n if path.is_dir():\n from inferno.layers.losses.emotion_loss_loader import emo_network_from_path\n emo_loss = emo_network_from_path(path)\n\n if isinstance(emo_loss, EmoNetModule):\n emonet = emo_loss.emonet\n print(\"Creating EmoNetLoss\")\n return EmoNetLoss(device, emonet=emonet, trainable=trainable,\n normalize_features=normalize_features, emo_feat_loss=emo_feat_loss)\n else:\n if not dual:\n print(f\"Creating EmoBackboneLoss, trainable={trainable}\")\n return EmoBackboneLoss(device, emo_loss, trainable=trainable,\n normalize_features=normalize_features, emo_feat_loss=emo_feat_loss)\n else:\n print(f\"Creating EmoBackboneDualLoss\")\n return EmoBackboneDualLoss(device, emo_loss, trainable=trainable, clone_is_trainable=True,\n normalize_features=normalize_features, emo_feat_loss=emo_feat_loss)\n else:\n raise ValueError(\"Please specify the directory which contains the config of the trained Emonet.\")\n else: \n raise TypeError(f\"Wrong type of emoloss: {type(emoloss)}\")" }, { "identifier": "create_au_loss", "path": "inferno/layers/losses/EmoNetLoss.py", "snippet": "def create_au_loss(device, au_loss):\n if au_loss is None:\n raise NotImplementedError(\"Pass an au_loss config.\")\n # return EmoNetLoss(device, emonet=au_loss)\n if isinstance(au_loss, (dict, omegaconf.DictConfig)):\n path = Path(au_loss.path)\n if path.is_dir():\n au_loss_net = emo_network_from_path(path)\n\n if isinstance(au_loss_net, EmoNetModule):\n emonet = au_loss_net.emonet\n print(\"Creating EmoNetLoss\")\n return EmoNetLoss(device,\n emonet=emonet,\n trainable=au_loss.trainable,\n normalize_features=au_loss.normalize_features,\n emo_feat_loss=au_loss.feat_loss,\n au_loss=au_loss.au_loss)\n else:\n if not au_loss.dual:\n print(f\"Creating EmoBackboneLoss, trainable={au_loss.trainable}\")\n return EmoBackboneLoss(device, au_loss_net,\n trainable=au_loss.trainable,\n normalize_features=au_loss.normalize_features,\n emo_feat_loss=au_loss.feat_loss, \n au_loss=au_loss.au_loss\n )\n else:\n print(f\"Creating EmoBackboneDualLoss\")\n return EmoBackboneDualLoss(device, au_loss_net,\n trainable=au_loss.trainable,\n clone_is_trainable=True,\n normalize_features=au_loss.normalize_features,\n emo_feat_loss=au_loss.feat_loss,\n au_loss=au_loss.au_loss)\n else:\n raise ValueError(\"Please specify the config to instantiate AU loss\")" }, { "identifier": "SRenderY", "path": "inferno/models/Renderer.py", "snippet": "class SRenderY(nn.Module):\n def __init__(self, image_size, obj_filename, uv_size=256):\n super(SRenderY, self).__init__()\n self.image_size = image_size\n self.uv_size = uv_size\n\n verts, faces, aux = load_obj(obj_filename)\n uvcoords = aux.verts_uvs[None, ...] # (N, V, 2)\n uvfaces = faces.textures_idx[None, ...] # (N, F, 3)\n faces = faces.verts_idx[None, ...]\n self.rasterizer = Pytorch3dRasterizer(image_size)\n self.uv_rasterizer = Pytorch3dRasterizer(uv_size)\n\n # faces\n dense_triangles = util.generate_triangles(uv_size, uv_size)\n self.register_buffer('dense_faces', torch.from_numpy(dense_triangles).long()[None, :, :])\n self.register_buffer('faces', faces)\n self.register_buffer('raw_uvcoords', uvcoords)\n\n # uv coords\n uvcoords = torch.cat([uvcoords, uvcoords[:, :, 0:1] * 0. + 1.], -1) # [bz, ntv, 3]\n uvcoords = uvcoords * 2 - 1;\n uvcoords[..., 1] = -uvcoords[..., 1]\n face_uvcoords = util.face_vertices(uvcoords, uvfaces)\n self.register_buffer('uvcoords', uvcoords)\n self.register_buffer('uvfaces', uvfaces)\n self.register_buffer('face_uvcoords', face_uvcoords)\n\n # shape colors, for rendering shape overlay\n colors = torch.tensor([180, 180, 180])[None, None, :].repeat(1, faces.max() + 1, 1).float() / 255.\n face_colors = util.face_vertices(colors, faces)\n self.register_buffer('face_colors', face_colors)\n\n ## SH factors for lighting\n pi = np.pi\n constant_factor = torch.tensor(\n [1 / np.sqrt(4 * pi), ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), \\\n ((2 * pi) / 3) * (np.sqrt(3 / (4 * pi))), (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))),\n (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))), \\\n (pi / 4) * (3) * (np.sqrt(5 / (12 * pi))), (pi / 4) * (3 / 2) * (np.sqrt(5 / (12 * pi))),\n (pi / 4) * (1 / 2) * (np.sqrt(5 / (4 * pi)))]).float()\n self.register_buffer('constant_factor', constant_factor)\n\n def forward(self, vertices, transformed_vertices, albedos, lights=None, light_type='point'):\n '''\n -- Texture Rendering\n vertices: [batch_size, V, 3], vertices in world space, for calculating normals, then shading\n transformed_vertices: [batch_size, V, 3], rnage:[-1,1], projected vertices, in image space, for rasterization\n albedos: [batch_size, 3, h, w], uv map\n lights:\n spherical homarnic: [N, 9(shcoeff), 3(rgb)]\n points/directional lighting: [N, n_lights, 6(xyzrgb)]\n light_type:\n point or directional\n '''\n batch_size = vertices.shape[0]\n ## rasterizer near 0 far 100. move mesh so minz larger than 0\n transformed_vertices[:, :, 2] = transformed_vertices[:, :, 2] + 10\n\n # attributes\n face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))\n normals = util.vertex_normals(vertices, self.faces.expand(batch_size, -1, -1))\n face_normals = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))\n transformed_normals = util.vertex_normals(transformed_vertices, self.faces.expand(batch_size, -1, -1))\n transformed_face_normals = util.face_vertices(transformed_normals, self.faces.expand(batch_size, -1, -1))\n\n attributes = torch.cat([self.face_uvcoords.expand(batch_size, -1, -1, -1),\n transformed_face_normals.detach(),\n face_vertices.detach(),\n face_normals],\n -1)\n\n # rasterize\n rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)\n\n ####\n # vis mask\n alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()\n\n # albedo\n uvcoords_images = rendering[:, :3, :, :]\n grid = (uvcoords_images).permute(0, 2, 3, 1)[:, :, :, :2]\n albedo_images = F.grid_sample(albedos, grid, align_corners=False)\n\n # visible mask for pixels with positive normal direction\n transformed_normal_map = rendering[:, 3:6, :, :].detach()\n pos_mask = (transformed_normal_map[:, 2:, :, :] < -0.05).float()\n\n # shading\n normal_images = rendering[:, 9:12, :, :]\n if lights is not None:\n if lights.shape[1] == 9:\n shading_images = self.add_SHlight(normal_images, lights)\n else:\n if light_type == 'point':\n vertice_images = rendering[:, 6:9, :, :].detach()\n shading = self.add_pointlight(vertice_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),\n normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),\n lights)\n shading_images = shading.reshape(\n [batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0, 3, 1, 2)\n else:\n shading = self.add_directionlight(normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]),\n lights)\n shading_images = shading.reshape(\n [batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0, 3, 1, 2)\n images = albedo_images * shading_images\n else:\n images = albedo_images\n shading_images = images.detach() * 0.\n # import ipdb; ipdb.set_trace()\n # print('albedo: ', albedo_images.min(), albedo_images.max())\n # print('normal: ', normal_images.min(), normal_images.max())\n # print('lights: ', lights.min(), lights.max())\n # print('shading: ', shading_images.min(), shading_images.max())\n # print('images: ', images.min(), images.max())\n # exit()\n outputs = {\n 'images': images * alpha_images,\n 'albedo_images': albedo_images,\n 'alpha_images': alpha_images,\n 'pos_mask': pos_mask,\n 'shading_images': shading_images,\n 'grid': grid,\n 'normals': normals,\n 'normal_images': normal_images,\n 'transformed_normals': transformed_normals,\n }\n\n return outputs\n\n def add_SHlight(self, normal_images, sh_coeff):\n '''\n sh_coeff: [bz, 9, 3]\n '''\n N = normal_images\n sh = torch.stack([\n N[:, 0] * 0. + 1., N[:, 0], N[:, 1], \\\n N[:, 2], N[:, 0] * N[:, 1], N[:, 0] * N[:, 2],\n N[:, 1] * N[:, 2], N[:, 0] ** 2 - N[:, 1] ** 2, 3 * (N[:, 2] ** 2) - 1\n ],\n 1) # [bz, 9, h, w]\n sh = sh * self.constant_factor[None, :, None, None]\n shading = torch.sum(sh_coeff[:, :, :, None, None] * sh[:, :, None, :, :], 1) # [bz, 9, 3, h, w]\n return shading\n\n def add_pointlight(self, vertices, normals, lights):\n '''\n vertices: [bz, nv, 3]\n lights: [bz, nlight, 6]\n returns:\n shading: [bz, nv, 3]\n '''\n light_positions = lights[:, :, :3];\n light_intensities = lights[:, :, 3:]\n directions_to_lights = F.normalize(light_positions[:, :, None, :] - vertices[:, None, :, :], dim=3)\n # normals_dot_lights = torch.clamp((normals[:,None,:,:]*directions_to_lights).sum(dim=3), 0., 1.)\n normals_dot_lights = (normals[:, None, :, :] * directions_to_lights).sum(dim=3)\n shading = normals_dot_lights[:, :, :, None] * light_intensities[:, :, None, :]\n return shading.mean(1)\n\n def add_directionlight(self, normals, lights):\n '''\n normals: [bz, nv, 3]\n lights: [bz, nlight, 6]\n returns:\n shading: [bz, nv, 3]\n '''\n light_direction = lights[:, :, :3];\n light_intensities = lights[:, :, 3:]\n directions_to_lights = F.normalize(light_direction[:, :, None, :].expand(-1, -1, normals.shape[1], -1), dim=3)\n # normals_dot_lights = torch.clamp((normals[:,None,:,:]*directions_to_lights).sum(dim=3), 0., 1.)\n # normals_dot_lights = (normals[:,None,:,:]*directions_to_lights).sum(dim=3)\n normals_dot_lights = torch.clamp((normals[:, None, :, :] * directions_to_lights).sum(dim=3), 0., 1.)\n shading = normals_dot_lights[:, :, :, None] * light_intensities[:, :, None, :]\n return shading.mean(1)\n\n def render_shape(self, vertices, transformed_vertices, images=None, detail_normal_images=None, lights=None):\n '''\n -- rendering shape with detail normal map\n '''\n batch_size = vertices.shape[0]\n if lights is None:\n light_positions = torch.tensor(\n [\n [-1, 1, 1],\n [1, 1, 1],\n [-1, -1, 1],\n [1, -1, 1],\n [0, 0, 1]\n ]\n )[None, :, :].expand(batch_size, -1, -1).float()\n light_intensities = torch.ones_like(light_positions).float() * 1.7\n lights = torch.cat((light_positions, light_intensities), 2).to(vertices.device)\n transformed_vertices[:, :, 2] = transformed_vertices[:, :, 2] + 10\n\n # Attributes\n face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))\n normals = util.vertex_normals(vertices, self.faces.expand(batch_size, -1, -1));\n face_normals = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))\n transformed_normals = util.vertex_normals(transformed_vertices, self.faces.expand(batch_size, -1, -1));\n transformed_face_normals = util.face_vertices(transformed_normals, self.faces.expand(batch_size, -1, -1))\n attributes = torch.cat([self.face_colors.expand(batch_size, -1, -1, -1),\n transformed_face_normals.detach(),\n face_vertices.detach(),\n face_normals],\n -1)\n # rasterize\n rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)\n\n ####\n alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()\n\n # albedo\n albedo_images = rendering[:, :3, :, :]\n # mask\n transformed_normal_map = rendering[:, 3:6, :, :].detach()\n pos_mask = (transformed_normal_map[:, 2:, :, :] < 0).float()\n\n # shading\n normal_images = rendering[:, 9:12, :, :].detach()\n vertice_images = rendering[:, 6:9, :, :].detach()\n if detail_normal_images is not None:\n normal_images = detail_normal_images\n\n shading = self.add_directionlight(normal_images.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]), lights)\n shading_images = shading.reshape([batch_size, albedo_images.shape[2], albedo_images.shape[3], 3]).permute(0, 3,\n 1,\n 2).contiguous()\n shaded_images = albedo_images * shading_images\n\n if images is None:\n shape_images = shaded_images * alpha_images + torch.zeros_like(shaded_images).to(vertices.device) * (\n 1 - alpha_images)\n else:\n shape_images = shaded_images * alpha_images + images * (1 - alpha_images)\n return shape_images\n\n def render_depth(self, transformed_vertices):\n '''\n -- rendering depth\n '''\n batch_size = transformed_vertices.shape[0]\n\n transformed_vertices[:, :, 2] = transformed_vertices[:, :, 2] - transformed_vertices[:, :, 2].min()\n z = -transformed_vertices[:, :, 2:].repeat(1, 1, 3)\n z = z - z.min()\n z = z / z.max()\n # Attributes\n attributes = util.face_vertices(z, self.faces.expand(batch_size, -1, -1))\n # rasterize\n rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)\n\n ####\n alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()\n depth_images = rendering[:, :1, :, :]\n return depth_images\n\n def render_normal(self, transformed_vertices, normals):\n '''\n -- rendering normal\n '''\n batch_size = normals.shape[0]\n\n # Attributes\n attributes = util.face_vertices(normals, self.faces.expand(batch_size, -1, -1))\n # rasterize\n rendering = self.rasterizer(transformed_vertices, self.faces.expand(batch_size, -1, -1), attributes)\n\n ####\n alpha_images = rendering[:, -1, :, :][:, None, :, :].detach()\n normal_images = rendering[:, :3, :, :]\n return normal_images\n\n def world2uv(self, vertices):\n '''\n project vertices from world space to uv space\n vertices: [bz, V, 3]\n uv_vertices: [bz, 3, h, w]\n '''\n batch_size = vertices.shape[0]\n face_vertices = util.face_vertices(vertices, self.faces.expand(batch_size, -1, -1))\n uv_vertices = self.uv_rasterizer(self.uvcoords.expand(batch_size, -1, -1),\n self.uvfaces.expand(batch_size, -1, -1), face_vertices)[:, :3]\n return uv_vertices" }, { "identifier": "ResnetEncoder", "path": "inferno/models/DecaEncoder.py", "snippet": "class ResnetEncoder(BaseEncoder):\n def __init__(self, outsize, last_op=None):\n super(ResnetEncoder, self).__init__(outsize, last_op)\n # feature_size = 2048\n # self.encoder = resnet.load_ResNet50Model() # out: 2048\n # ### regressor\n # self.layers = nn.Sequential(\n # nn.Linear(feature_size, 1024),\n # nn.ReLU(),\n # nn.Linear(1024, outsize)\n # )\n # self.last_op = last_op\n\n def _create_encoder(self):\n self.encoder = resnet.load_ResNet50Model() # out: 2048" }, { "identifier": "SecondHeadResnet", "path": "inferno/models/DecaEncoder.py", "snippet": "class SecondHeadResnet(nn.Module):\n\n def __init__(self, enc : BaseEncoder, outsize, last_op=None):\n super().__init__()\n self.resnet = enc # yes, self.resnet is no longer accurate but the name is kept for legacy reasons (to be able to load old models)\n self.layers = nn.Sequential(\n nn.Linear(self.resnet.feature_size, 1024),\n nn.ReLU(),\n nn.Linear(1024, outsize)\n )\n if last_op == 'same':\n self.last_op = self.resnet.last_op\n else:\n self.last_op = last_op\n\n def forward_features(self, inputs):\n out1, features = self.resnet(inputs, output_features=True)\n return out1, features\n\n def forward_features_to_output(self, features):\n parameters = self.layers(features)\n if self.last_op:\n parameters = self.last_op(parameters)\n return parameters\n\n\n def forward(self, inputs):\n out1, features = self.forward_features()\n out2 = self.forward_features_to_output(features)\n return out1, out2\n\n\n def train(self, mode: bool = True):\n #here we NEVER modify the eval/train status of the resnet backbone, only the FC layers of the second head\n self.layers.train(mode)\n return self\n\n def reset_last_layer(self):\n # initialize the last layer to zero to help the network \n # predict the initial pose a bit more stable\n torch.nn.init.constant_(self.layers[-1].weight, 0)\n torch.nn.init.constant_(self.layers[-1].bias, 0)\n\n def get_feature_size(self): \n return self.resnet.feature_size" }, { "identifier": "SwinEncoder", "path": "inferno/models/DecaEncoder.py", "snippet": "class SwinEncoder(BaseEncoder):\n\n def __init__(self, swin_type, img_size, outsize, last_op=None):\n self.swin_type = swin_type\n self.img_size = img_size\n super().__init__(outsize, last_op)\n\n def _create_encoder(self):\n swin_cfg = swin_cfg_from_name(self.swin_type)\n self.encoder = create_swin_backbone(\n swin_cfg, self.feature_size, self.img_size, load_pretrained_swin=True, pretrained_model=self.swin_type)\n\n\n def forward_features(self, inputs):\n pooled_feature, patches = self.encoder(inputs, include_features=True, include_patches=False)\n return pooled_feature, patches" }, { "identifier": "Generator", "path": "inferno/models/DecaDecoder.py", "snippet": "class Generator(nn.Module):\n def __init__(self, latent_dim=100, out_channels=1, out_scale=1, sample_mode='bilinear'):\n super(Generator, self).__init__()\n self.out_scale = out_scale\n\n self.init_size = 32 // 4 # Initial size before upsampling\n self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))\n self.conv_blocks = nn.Sequential(\n nn.BatchNorm2d(128),\n nn.Upsample(scale_factor=2, mode=sample_mode), # 16\n nn.Conv2d(128, 128, 3, stride=1, padding=1),\n nn.BatchNorm2d(128, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2, mode=sample_mode), # 32\n nn.Conv2d(128, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2, mode=sample_mode), # 64\n nn.Conv2d(64, 64, 3, stride=1, padding=1),\n nn.BatchNorm2d(64, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2, mode=sample_mode), # 128\n nn.Conv2d(64, 32, 3, stride=1, padding=1),\n nn.BatchNorm2d(32, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Upsample(scale_factor=2, mode=sample_mode), # 256\n nn.Conv2d(32, 16, 3, stride=1, padding=1),\n nn.BatchNorm2d(16, 0.8),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(16, out_channels, 3, stride=1, padding=1),\n nn.Tanh(),\n )\n\n def forward(self, z):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n img = self.conv_blocks(out)\n return img * self.out_scale" }, { "identifier": "GeneratorAdaIn", "path": "inferno/models/DecaDecoder.py", "snippet": "class GeneratorAdaIn(nn.Module):\n def __init__(self, latent_dim, condition_dim, out_channels=1, out_scale=1, sample_mode='bilinear'):\n super().__init__()\n self.out_scale = out_scale\n\n self.init_size = 32 // 4 # Initial size before upsampling\n self.l1 = nn.Sequential(nn.Linear(latent_dim, 128 * self.init_size ** 2))\n # self.conv_blocks = nn.Sequential(\n # # nn.BatchNorm2d(128),\n # # nn.Upsample(scale_factor=2, mode=sample_mode), # 16\n # # nn.Conv2d(128, 128, 3, stride=1, padding=1),\n # AdaInUpConvBlock(128,128, condition_dim),\n # # nn.BatchNorm2d(128, 0.8),\n # # nn.LeakyReLU(0.2, inplace=True),\n # # nn.Upsample(scale_factor=2, mode=sample_mode), # 32\n # # nn.Conv2d(128, 64, 3, stride=1, padding=1),\n # AdaInUpConvBlock(128, 64, condition_dim),\n # # nn.BatchNorm2d(64, 0.8),\n # # nn.LeakyReLU(0.2, inplace=True),\n # # nn.Upsample(scale_factor=2, mode=sample_mode), # 64\n # # nn.Conv2d(64, 64, 3, stride=1, padding=1),\n # AdaInUpConvBlock(64, 64, condition_dim),\n # # nn.BatchNorm2d(64, 0.8),\n # # nn.LeakyReLU(0.2, inplace=True),\n # # nn.Upsample(scale_factor=2, mode=sample_mode), # 128\n # # nn.Conv2d(64, 32, 3, stride=1, padding=1),\n # AdaInUpConvBlock(64, 32, condition_dim),\n # # nn.BatchNorm2d(32, 0.8),\n # # nn.LeakyReLU(0.2, inplace=True),\n # # nn.Upsample(scale_factor=2, mode=sample_mode), # 256\n # # nn.Conv2d(32, 16, 3, stride=1, padding=1),\n # AdaInUpConvBlock(32, 16, condition_dim),\n # # nn.BatchNorm2d(16, 0.8),\n # # nn.LeakyReLU(0.2, inplace=True),\n # # nn.Conv2d(16, out_channels, 3, stride=1, padding=1),\n # AdaInUpConvBlock(16, out_channels, condition_dim, scale_factor=0)\n # nn.Tanh(),\n # )\n self.conv_block1 = AdaInUpConvBlock(128,128, condition_dim, sample_mode=sample_mode) # 16\n self.conv_block2 = AdaInUpConvBlock(128, 64, condition_dim, sample_mode=sample_mode) # 32\n self.conv_block3 = AdaInUpConvBlock(64, 64, condition_dim, sample_mode=sample_mode) # 64\n self.conv_block4 = AdaInUpConvBlock(64, 32, condition_dim, sample_mode=sample_mode) # 128\n self.conv_block5 = AdaInUpConvBlock(32, 16, condition_dim, sample_mode=sample_mode) # 256\n self.conv_block6 = AdaInUpConvBlock(16, out_channels, condition_dim, scale_factor=0) # 256\n self.conv_blocks = [self.conv_block1, self.conv_block2, self.conv_block3, self.conv_block4,\n self.conv_block5, self.conv_block6]\n self.out_actv = nn.Tanh()\n\n\n def forward(self, z, cond):\n out = self.l1(z)\n out = out.view(out.shape[0], 128, self.init_size, self.init_size)\n for i, block in enumerate(self.conv_blocks):\n out = block(out, cond)\n img = self.out_actv(out)\n return img * self.out_scale" }, { "identifier": "FLAME", "path": "inferno/models/DecaFLAME.py", "snippet": "class FLAME(nn.Module):\n \"\"\"\n Given flame parameters this class generates a differentiable FLAME function\n which outputs the a mesh and 2D/3D facial landmarks\n \"\"\"\n\n def __init__(self, config):\n super(FLAME, self).__init__()\n print(\"creating the FLAME Decoder\")\n with open(config.flame_model_path, 'rb') as f:\n # flame_model = Struct(**pickle.load(f, encoding='latin1'))\n ss = pickle.load(f, encoding='latin1')\n flame_model = Struct(**ss)\n\n self.cfg = config\n self.dtype = torch.float32\n self.register_buffer('faces_tensor', to_tensor(to_np(flame_model.f, dtype=np.int64), dtype=torch.long))\n # The vertices of the template model\n self.register_buffer('v_template', to_tensor(to_np(flame_model.v_template), dtype=self.dtype))\n # The shape components and expression\n shapedirs = to_tensor(to_np(flame_model.shapedirs), dtype=self.dtype)\n shapedirs = torch.cat([shapedirs[:, :, :config.n_shape], shapedirs[:, :, 300:300 + config.n_exp]], 2)\n self.register_buffer('shapedirs', shapedirs)\n # The pose components\n num_pose_basis = flame_model.posedirs.shape[-1]\n posedirs = np.reshape(flame_model.posedirs, [-1, num_pose_basis]).T\n self.register_buffer('posedirs', to_tensor(to_np(posedirs), dtype=self.dtype))\n #\n self.register_buffer('J_regressor', to_tensor(to_np(flame_model.J_regressor), dtype=self.dtype))\n parents = to_tensor(to_np(flame_model.kintree_table[0])).long();\n parents[0] = -1\n self.register_buffer('parents', parents)\n self.register_buffer('lbs_weights', to_tensor(to_np(flame_model.weights), dtype=self.dtype))\n\n # Fixing Eyeball and neck rotation\n default_eyball_pose = torch.zeros([1, 6], dtype=self.dtype, requires_grad=False)\n self.register_parameter('eye_pose', nn.Parameter(default_eyball_pose,\n requires_grad=False))\n default_neck_pose = torch.zeros([1, 3], dtype=self.dtype, requires_grad=False)\n self.register_parameter('neck_pose', nn.Parameter(default_neck_pose,\n requires_grad=False))\n\n # Static and Dynamic Landmark embeddings for FLAME\n lmk_embeddings = np.load(config.flame_lmk_embedding_path, allow_pickle=True, encoding='latin1')\n lmk_embeddings = lmk_embeddings[()]\n self.register_buffer('lmk_faces_idx', torch.tensor(lmk_embeddings['static_lmk_faces_idx'], dtype=torch.long))\n self.register_buffer('lmk_bary_coords',\n torch.tensor(lmk_embeddings['static_lmk_bary_coords'], dtype=self.dtype))\n self.register_buffer('dynamic_lmk_faces_idx',\n torch.tensor(lmk_embeddings['dynamic_lmk_faces_idx'], dtype=torch.long))\n self.register_buffer('dynamic_lmk_bary_coords',\n torch.tensor(lmk_embeddings['dynamic_lmk_bary_coords'], dtype=self.dtype))\n self.register_buffer('full_lmk_faces_idx', torch.tensor(lmk_embeddings['full_lmk_faces_idx'], dtype=torch.long))\n self.register_buffer('full_lmk_bary_coords',\n torch.tensor(lmk_embeddings['full_lmk_bary_coords'], dtype=self.dtype))\n\n neck_kin_chain = [];\n NECK_IDX = 1\n curr_idx = torch.tensor(NECK_IDX, dtype=torch.long)\n while curr_idx != -1:\n neck_kin_chain.append(curr_idx)\n curr_idx = self.parents[curr_idx]\n self.register_buffer('neck_kin_chain', torch.stack(neck_kin_chain))\n\n def _find_dynamic_lmk_idx_and_bcoords(self, pose, dynamic_lmk_faces_idx,\n dynamic_lmk_b_coords,\n neck_kin_chain, dtype=torch.float32):\n \"\"\"\n Selects the face contour depending on the reletive position of the head\n Input:\n vertices: N X num_of_vertices X 3\n pose: N X full pose\n dynamic_lmk_faces_idx: The list of contour face indexes\n dynamic_lmk_b_coords: The list of contour barycentric weights\n neck_kin_chain: The tree to consider for the relative rotation\n dtype: Data type\n return:\n The contour face indexes and the corresponding barycentric weights\n \"\"\"\n\n batch_size = pose.shape[0]\n\n aa_pose = torch.index_select(pose.view(batch_size, -1, 3), 1,\n neck_kin_chain)\n rot_mats = batch_rodrigues(\n aa_pose.view(-1, 3), dtype=dtype).view(batch_size, -1, 3, 3)\n\n rel_rot_mat = torch.eye(3, device=pose.device,\n dtype=dtype).unsqueeze_(dim=0).expand(batch_size, -1, -1)\n for idx in range(len(neck_kin_chain)):\n rel_rot_mat = torch.bmm(rot_mats[:, idx], rel_rot_mat)\n\n y_rot_angle = torch.round(\n torch.clamp(rot_mat_to_euler(rel_rot_mat) * 180.0 / np.pi,\n max=39)).to(dtype=torch.long)\n\n neg_mask = y_rot_angle.lt(0).to(dtype=torch.long)\n mask = y_rot_angle.lt(-39).to(dtype=torch.long)\n neg_vals = mask * 78 + (1 - mask) * (39 - y_rot_angle)\n y_rot_angle = (neg_mask * neg_vals +\n (1 - neg_mask) * y_rot_angle)\n\n dyn_lmk_faces_idx = torch.index_select(dynamic_lmk_faces_idx,\n 0, y_rot_angle)\n dyn_lmk_b_coords = torch.index_select(dynamic_lmk_b_coords,\n 0, y_rot_angle)\n return dyn_lmk_faces_idx, dyn_lmk_b_coords\n\n def _vertices2landmarks(self, vertices, faces, lmk_faces_idx, lmk_bary_coords):\n \"\"\"\n Calculates landmarks by barycentric interpolation\n Input:\n vertices: torch.tensor NxVx3, dtype = torch.float32\n The tensor of input vertices\n faces: torch.tensor (N*F)x3, dtype = torch.long\n The faces of the mesh\n lmk_faces_idx: torch.tensor N X L, dtype = torch.long\n The tensor with the indices of the faces used to calculate the\n landmarks.\n lmk_bary_coords: torch.tensor N X L X 3, dtype = torch.float32\n The tensor of barycentric coordinates that are used to interpolate\n the landmarks\n\n Returns:\n landmarks: torch.tensor NxLx3, dtype = torch.float32\n The coordinates of the landmarks for each mesh in the batch\n \"\"\"\n # Extract the indices of the vertices for each face\n # NxLx3\n batch_size, num_verts = vertices.shape[:2]\n lmk_faces = torch.index_select(faces, 0, lmk_faces_idx.view(-1)).view(\n 1, -1, 3).view(batch_size, lmk_faces_idx.shape[1], -1)\n\n lmk_faces += torch.arange(batch_size, dtype=torch.long).view(-1, 1, 1).to(\n device=vertices.device) * num_verts\n\n lmk_vertices = vertices.view(-1, 3)[lmk_faces]\n landmarks = torch.einsum('blfi,blf->bli', [lmk_vertices, lmk_bary_coords])\n return landmarks\n\n def _vertices2landmarks2d(self, vertices, full_pose):\n \"\"\"\n Calculates landmarks by barycentric interpolation\n Input:\n vertices: torch.tensor NxVx3, dtype = torch.float32\n The tensor of input vertices\n full_pose: torch.tensor N X 12, dtype = torch.float32\n The tensor with global pose, neck pose, jaw pose and eye pose (respectively) in axis angle format\n\n Returns:\n landmarks: torch.tensor NxLx3, dtype = torch.float32\n The coordinates of the landmarks for each mesh in the batch\n \"\"\"\n # Extract the indices of the vertices for each face\n # NxLx3\n batch_size = vertices.shape[0]\n lmk_faces_idx = self.lmk_faces_idx.unsqueeze(dim=0).expand(batch_size, -1)\n lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).expand(batch_size, -1, -1)\n\n dyn_lmk_faces_idx, dyn_lmk_bary_coords = self._find_dynamic_lmk_idx_and_bcoords(\n full_pose, self.dynamic_lmk_faces_idx,\n self.dynamic_lmk_bary_coords,\n self.neck_kin_chain, dtype=self.dtype)\n lmk_faces_idx = torch.cat([dyn_lmk_faces_idx, lmk_faces_idx], 1)\n lmk_bary_coords = torch.cat([dyn_lmk_bary_coords, lmk_bary_coords], 1)\n\n landmarks2d = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx,\n lmk_bary_coords)\n return landmarks2d\n\n\n def seletec_3d68(self, vertices):\n landmarks3d = vertices2landmarks(vertices, self.faces_tensor,\n self.full_lmk_faces_idx.repeat(vertices.shape[0], 1),\n self.full_lmk_bary_coords.repeat(vertices.shape[0], 1, 1))\n return landmarks3d\n\n def forward(self, shape_params=None, expression_params=None, pose_params=None, eye_pose_params=None):\n \"\"\"\n Input:\n shape_params: N X number of shape parameters\n expression_params: N X number of expression parameters\n pose_params: N X number of pose parameters (6)\n return:d\n vertices: N X V X 3\n landmarks: N X number of landmarks X 3\n \"\"\"\n batch_size = shape_params.shape[0]\n if pose_params is None:\n pose_params = self.eye_pose.expand(batch_size, -1) # TODO: is this correct?\n if eye_pose_params is None:\n eye_pose_params = self.eye_pose.expand(batch_size, -1)\n if expression_params is None:\n expression_params = torch.zeros(batch_size, self.cfg.n_exp).to(shape_params.device)\n\n betas = torch.cat([shape_params, expression_params], dim=1)\n full_pose = torch.cat(\n [pose_params[:, :3], self.neck_pose.expand(batch_size, -1), pose_params[:, 3:], eye_pose_params], dim=1)\n template_vertices = self.v_template.unsqueeze(0).expand(batch_size, -1, -1)\n\n vertices, _ = lbs(betas, full_pose, template_vertices,\n self.shapedirs, self.posedirs,\n self.J_regressor, self.parents,\n self.lbs_weights, dtype=self.dtype, \n detach_pose_correctives=False)\n\n lmk_faces_idx = self.lmk_faces_idx.unsqueeze(dim=0).expand(batch_size, -1)\n lmk_bary_coords = self.lmk_bary_coords.unsqueeze(dim=0).expand(batch_size, -1, -1)\n\n dyn_lmk_faces_idx, dyn_lmk_bary_coords = self._find_dynamic_lmk_idx_and_bcoords(\n full_pose, self.dynamic_lmk_faces_idx,\n self.dynamic_lmk_bary_coords,\n self.neck_kin_chain, dtype=self.dtype)\n lmk_faces_idx = torch.cat([dyn_lmk_faces_idx, lmk_faces_idx], 1)\n lmk_bary_coords = torch.cat([dyn_lmk_bary_coords, lmk_bary_coords], 1)\n\n landmarks2d = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx,\n lmk_bary_coords)\n bz = vertices.shape[0]\n landmarks3d = vertices2landmarks(vertices, self.faces_tensor,\n self.full_lmk_faces_idx.repeat(bz, 1),\n self.full_lmk_bary_coords.repeat(bz, 1, 1))\n\n return vertices, landmarks2d, landmarks3d" }, { "identifier": "FLAMETex", "path": "inferno/models/DecaFLAME.py", "snippet": "class FLAMETex(nn.Module):\n \"\"\"\n current FLAME texture:\n https://github.com/TimoBolkart/TF_FLAME/blob/ade0ab152300ec5f0e8555d6765411555c5ed43d/sample_texture.py#L64\n tex_path: '/ps/scratch/yfeng/Data/FLAME/texture/albedoModel2020_FLAME_albedoPart.npz'\n ## adapted from BFM\n tex_path: '/ps/scratch/yfeng/Data/FLAME/texture/FLAME_albedo_from_BFM.npz'\n \"\"\"\n\n def __init__(self, config):\n super(FLAMETex, self).__init__()\n if config.tex_type == 'BFM':\n mu_key = 'MU'\n pc_key = 'PC'\n n_pc = 199\n tex_path = config.tex_path\n try:\n tex_space = np.load(tex_path)\n texture_mean = tex_space[mu_key].reshape(1, -1)\n texture_basis = tex_space[pc_key].reshape(-1, n_pc)\n except FileNotFoundError as e: \n im_size = 512 \n texture_mean = np.ones((1, im_size*im_size*3)) * 0.5\n texture_basis = np.eye(im_size*im_size*3, n_pc) * 0.5\n print(\"[WARNING] texture file not found. Setting texture space with dummy values.\")\n\n elif config.tex_type == 'FLAME':\n mu_key = 'mean'\n pc_key = 'tex_dir'\n n_pc = 200\n tex_path = config.tex_path\n tex_space = np.load(tex_path)\n texture_mean = tex_space[mu_key].reshape(1, -1) / 255.\n texture_basis = tex_space[pc_key].reshape(-1, n_pc) / 255.\n\n else:\n print('texture type \"', config.tex_type, '\" does not exist!')\n raise NotImplementedError('texture type \"', config.tex_type, '\" does not exist!')\n\n n_tex = config.n_tex\n num_components = texture_basis.shape[1]\n texture_mean = torch.from_numpy(texture_mean).float()[None, ...]\n texture_basis = torch.from_numpy(texture_basis[:, :n_tex]).float()[None, ...]\n self.register_buffer('texture_mean', texture_mean)\n self.register_buffer('texture_basis', texture_basis)\n\n def forward(self, texcode):\n texture = self.texture_mean + (self.texture_basis * texcode[:, None, :]).sum(-1)\n texture = texture.reshape(texcode.shape[0], 512, 512, 3).permute(0, 3, 1, 2)\n texture = F.interpolate(texture, [256, 256])\n texture = texture[:, [2, 1, 0], :, :]\n return texture" }, { "identifier": "FLAME_mediapipe", "path": "inferno/models/DecaFLAME.py", "snippet": "class FLAME_mediapipe(FLAME): \n\n def __init__(self, config):\n super().__init__(config)\n # static MEDIAPIPE landmark embeddings for FLAME\n lmk_embeddings_mediapipe = np.load(config.flame_mediapipe_lmk_embedding_path, \n allow_pickle=True, encoding='latin1')\n # indices = lmk_embeddings_mediapipe['landmark_indices']\n self.register_buffer('lmk_faces_idx_mediapipe', \n torch.tensor(lmk_embeddings_mediapipe['lmk_face_idx'].astype(np.int64), dtype=torch.long))\n self.register_buffer('lmk_bary_coords_mediapipe',\n torch.tensor(lmk_embeddings_mediapipe['lmk_b_coords'], dtype=self.dtype))\n \n def forward(self, shape_params=None, expression_params=None, pose_params=None, eye_pose_params=None):\n vertices, landmarks2d, landmarks3d = super().forward(shape_params, expression_params, pose_params, eye_pose_params)\n batch_size = shape_params.shape[0]\n lmk_faces_idx_mediapipe = self.lmk_faces_idx_mediapipe.unsqueeze(dim=0).expand(batch_size, -1).contiguous()\n lmk_bary_coords_mediapipe = self.lmk_bary_coords_mediapipe.unsqueeze(dim=0).expand(batch_size, -1, -1).contiguous()\n landmarks2d_mediapipe = vertices2landmarks(vertices, self.faces_tensor,\n lmk_faces_idx_mediapipe,\n lmk_bary_coords_mediapipe )\n # landmarks3d_mediapipe = vertices2landmarks(vertices, self.faces_tensor,\n # self.full_lmk_faces_idx_mediapipe.repeat(bz, 1),\n # self.full_lmk_bary_coords_mediapipe.repeat(bz, 1, 1))\n\n return vertices, landmarks2d, landmarks3d, landmarks2d_mediapipe#, landmarks3d_mediapipe" }, { "identifier": "EmotionMLP", "path": "inferno/models/EmotionMLP.py", "snippet": "class EmotionMLP(torch.nn.Module):\n\n def __init__(self, config, deca_cfg):\n super().__init__()\n self.config = config\n in_size = 0\n if self.config.use_identity:\n in_size += deca_cfg.n_shape\n if self.config.use_expression:\n in_size += deca_cfg.n_exp\n if self.config.use_global_pose:\n in_size += 3\n if self.config.use_jaw_pose:\n in_size += 3\n if self.config.use_detail_code:\n self.n_detail = deca_cfg.n_detail\n in_size += deca_cfg.n_detail\n else:\n self.n_detail = None\n if 'use_detail_emo_code' in self.config.keys() and self.config.use_detail_emo_code:\n self.n_detail_emo = deca_cfg.n_detail_emo\n in_size += deca_cfg.n_detail_emo\n else:\n self.n_detail_emo = None\n\n hidden_layer_sizes = config.num_mlp_layers * [in_size]\n\n out_size = 0\n if self.config.predict_expression:\n self.num_classes = self.config.data.n_expression if 'n_expression' in self.config.data.keys() else 9\n out_size += self.num_classes\n if self.config.predict_valence:\n out_size += 1\n if self.config.predict_arousal:\n out_size += 1\n\n # if \"use_mlp\" not in self.config.keys() or self.config.use_mlp:\n if 'mlp_norm_layer' in self.config.keys():\n batch_norm = class_from_str(self.config.mlp_norm_layer, sys.modules[__name__])\n else:\n batch_norm = None\n self.mlp = MLP(in_size, out_size, hidden_layer_sizes, batch_norm=batch_norm)\n # else:\n # self.mlp = None\n\n if 'v_activation' in config.keys():\n self.v_activation = class_from_str(self.config.v_activation, sys.modules[__name__])\n else:\n self.v_activation = None\n\n if 'a_activation' in config.keys():\n self.a_activation = class_from_str(self.config.a_activation, sys.modules[__name__])\n else:\n self.a_activation = None\n\n if 'exp_activation' in config.keys():\n self.exp_activation = class_from_str(self.config.exp_activation, sys.modules[__name__])\n else:\n self.exp_activation = F.log_softmax\n\n self.va_loss = loss_from_cfg(config, 'va_loss')\n self.v_loss = loss_from_cfg(config, 'v_loss')\n self.a_loss = loss_from_cfg(config, 'a_loss')\n self.exp_loss = loss_from_cfg(config, 'exp_loss')\n\n # config backwards compatibility\n self.config = add_cfg_if_missing(self.config, 'detach_shape', False)\n self.config = add_cfg_if_missing(self.config, 'detach_expression', False)\n self.config = add_cfg_if_missing(self.config, 'detach_detailcode', False)\n self.config = add_cfg_if_missing(self.config, 'detach_jaw', False)\n self.config = add_cfg_if_missing(self.config, 'detach_global_pose', False)\n\n\n def forward(self, values, result_prefix=\"\"):\n shapecode = values['shapecode']\n\n if self.config.detach_shape:\n shapecode = shapecode.detach()\n\n # texcode = values['texcode']\n expcode = values['expcode']\n\n if self.config.detach_expression:\n expcode = expcode.detach()\n\n posecode = values['posecode']\n if self.config.use_detail_code:\n if 'detailcode' in values.keys() and values['detailcode'] is not None:\n detailcode = values['detailcode']\n if self.config.detach_detailcode:\n detailcode = detailcode.detach()\n else:\n detailcode = torch.zeros((posecode.shape[0], self.n_detail), dtype=posecode.dtype, device=posecode.device )\n else:\n detailcode = None\n\n if 'use_detailemo_code' in self.config.keys() and self.config.use_detailemo_code:\n if 'detailemocode' in values.keys() and values['detailemocode'] is not None:\n detailemocode = values['detailemocode']\n if 'detach_detailemocode' in self.config.keys() and self.config.detach_detailemocode:\n detailemocode = detailemocode.detach()\n else:\n detailemocode = torch.zeros((posecode.shape[0], self.n_detail_emo), dtype=posecode.dtype, device=posecode.device )\n else:\n detailemocode = None\n\n\n global_pose = posecode[:, :3]\n if self.config.detach_global_pose:\n global_pose = global_pose.detach()\n\n jaw_pose = posecode[:, 3:]\n if self.config.detach_jaw:\n jaw_pose = jaw_pose.detach()\n\n input_list = []\n\n if self.config.use_identity:\n input_list += [shapecode]\n\n if self.config.use_expression:\n input_list += [expcode]\n\n if self.config.use_global_pose:\n input_list += [global_pose]\n\n if self.config.use_jaw_pose:\n input_list += [jaw_pose]\n\n if self.config.use_detail_code:\n input_list += [detailcode]\n\n if 'use_detail_emo_code' in self.config.keys() and self.config.use_detail_emo_code:\n input_list += [detailemocode]\n\n input = torch.cat(input_list, dim=1)\n output = self.mlp(input)\n\n out_idx = 0\n if self.config.predict_expression:\n expr_classification = output[:, out_idx:(out_idx + self.num_classes)]\n if self.exp_activation is not None:\n expr_classification = self.exp_activation(output[:, out_idx:(out_idx + self.num_classes)], dim=1)\n out_idx += self.num_classes\n else:\n expr_classification = None\n\n if self.config.predict_valence:\n valence = output[:, out_idx:(out_idx+1)]\n if self.v_activation is not None:\n valence = self.v_activation(valence)\n out_idx += 1\n else:\n valence = None\n\n if self.config.predict_arousal:\n arousal = output[:, out_idx:(out_idx+1)]\n if self.a_activation is not None:\n arousal = self.a_activation(output[:, out_idx:(out_idx + 1)])\n out_idx += 1\n else:\n arousal = None\n\n values[result_prefix + \"valence\"] = valence\n values[result_prefix + \"arousal\"] = arousal\n values[result_prefix + \"expr_classification\"] = expr_classification\n\n return values\n\n\n def compute_loss(self, pred, batch, training, pred_prefix=\"\"):\n valence_gt = pred[\"va\"][:, 0:1]\n arousal_gt = pred[\"va\"][:, 1:2]\n expr_classification_gt = pred[\"affectnetexp\"]\n if \"expression_weight\" in pred.keys():\n class_weight = pred[\"expression_weight\"][0]\n else:\n class_weight = None\n\n gt = {}\n gt[\"valence\"] = valence_gt\n gt[\"arousal\"] = arousal_gt\n gt[\"expr_classification\"] = expr_classification_gt\n\n # TODO: this is not ugly enough\n scheme = None if 'va_loss_scheme' not in self.config.keys() else self.config.va_loss_scheme\n loss_term_weights = _get_step_loss_weights(self.v_loss, self.a_loss, self.va_loss, scheme, training)\n\n valence_sample_weight = batch[\"valence_sample_weight\"] if \"valence_sample_weight\" in batch.keys() else None\n arousal_sample_weight = batch[\"arousal_sample_weight\"] if \"arousal_sample_weight\" in batch.keys() else None\n va_sample_weight = batch[\"va_sample_weight\"] if \"va_sample_weight\" in batch.keys() else None\n expression_sample_weight = batch[\"expression_sample_weight\"] if \"expression_sample_weight\" in batch.keys() else None\n\n if 'continuous_va_balancing' in self.config.keys():\n if self.config.continuous_va_balancing == '1d':\n v_weight = valence_sample_weight\n a_weight = arousal_sample_weight\n elif self.config.continuous_va_balancing == '2d':\n v_weight = va_sample_weight\n a_weight = va_sample_weight\n elif self.config.continuous_va_balancing == 'expr':\n v_weight = expression_sample_weight\n a_weight = expression_sample_weight\n else:\n raise RuntimeError(f\"Invalid continuous affect balancing\"\n f\" '{self.config.continuous_va_balancing}'\")\n if len(v_weight.shape) > 1:\n v_weight = v_weight.view(-1)\n if len(a_weight.shape) > 1:\n a_weight = a_weight.view(-1)\n else:\n v_weight = None\n a_weight = None\n\n losses, metrics = {}, {}\n # print(metrics.keys())\n losses, metrics = v_or_a_loss(self.v_loss, pred, gt, loss_term_weights, metrics, losses, \"valence\",\n pred_prefix=pred_prefix, permit_dropping_corr=not training,\n sample_weights=v_weight)\n losses, metrics = v_or_a_loss(self.a_loss, pred, gt, loss_term_weights, metrics, losses, \"arousal\",\n pred_prefix=pred_prefix, permit_dropping_corr=not training,\n sample_weights=a_weight)\n losses, metrics = va_loss(self.va_loss, pred, gt, loss_term_weights, metrics, losses,\n pred_prefix=pred_prefix, permit_dropping_corr=not training)\n losses, metrics = exp_loss(self.exp_loss, pred, gt, class_weight, metrics, losses,\n self.config.expression_balancing, self.num_classes, pred_prefix=pred_prefix, )\n\n return losses, metrics" }, { "identifier": "Expression7", "path": "inferno/datasets/AffWild2Dataset.py", "snippet": "class Expression7(Enum):\n Neutral = 0\n Anger = 1\n Disgust = 2\n Fear = 3\n Happiness = 4\n Sadness = 5\n Surprise = 6\n None_ = 7" }, { "identifier": "AffectNetExpressions", "path": "inferno/datasets/AffectNetDataModule.py", "snippet": "class AffectNetExpressions(Enum):\n Neutral = 0\n Happy = 1\n Sad = 2\n Surprise = 3\n Fear = 4\n Disgust = 5\n Anger = 6\n Contempt = 7\n None_ = 8\n Uncertain = 9\n Occluded = 10\n xxx = 11\n\n\n @staticmethod\n def from_str(string : str):\n string = string[0].upper() + string[1:]\n return AffectNetExpressions[string]\n\n # _expressions = {0: 'neutral', 1:'happy', 2:'sad', 3:'surprise', 4:'fear', 5:'disgust', 6:'anger', 7:'contempt', 8:'none'}" }, { "identifier": "_log_array_image", "path": "inferno/utils/lightning_logging.py", "snippet": "def _log_array_image(path, image, caption=None):\n image = _fix_image(image)\n if path is not None:\n imsave(path, image)\n return image" }, { "identifier": "_log_wandb_image", "path": "inferno/utils/lightning_logging.py", "snippet": "def _log_wandb_image(path, image, caption=None):\n path.parent.mkdir(parents=True, exist_ok=True)\n image = _fix_image(image)\n imsave(path, image)\n if caption is not None:\n caption_file = Path(path).parent / (Path(path).stem + \".txt\")\n with open(caption_file, \"w\") as f:\n f.write(caption)\n wandb_image = Image(str(path), caption=caption)\n return wandb_image" }, { "identifier": "_torch_image2np", "path": "inferno/utils/lightning_logging.py", "snippet": "def _torch_image2np(torch_image):\n image = torch_image.detach().cpu().numpy()\n if len(image.shape) == 4:\n image = image.transpose([0, 2, 3, 1])\n elif len(image.shape) == 3:\n image = image.transpose([1, 2, 0])\n return image" }, { "identifier": "class_from_str", "path": "inferno/utils/other.py", "snippet": "def class_from_str(str, module=None, none_on_fail = False) -> type:\n if module is None:\n module = sys.modules[__name__]\n if hasattr(module, str):\n cl = getattr(module, str)\n return cl\n elif str.lower() == 'none' or none_on_fail:\n return None\n raise RuntimeError(f\"Class '{str}' not found.\")" }, { "identifier": "get_path_to_assets", "path": "inferno/utils/other.py", "snippet": "def get_path_to_assets() -> Path:\n import inferno\n return Path(inferno.__file__).parents[1] / \"assets\"" }, { "identifier": "VGG19Loss", "path": "inferno/layers/losses/VGGLoss.py", "snippet": "class VGG19Loss(nn.Module):\n\n def __init__(self, layer_activation_indices_weights, diff=torch.nn.functional.l1_loss, batch_norm=False):\n super().__init__()\n self.batch_norm = batch_norm\n self.vgg19 = VGG19(sorted(layer_activation_indices_weights.keys()), batch_norm=batch_norm)\n self.layer_activation_indices_weights = layer_activation_indices_weights\n self.diff = diff\n\n def forward(self, x, y):\n feat_x = self.vgg19(x)\n feat_y = self.vgg19(y)\n\n out = {}\n loss = 0\n for idx, weight in self.layer_activation_indices_weights.items():\n d = self.diff(feat_x[idx], feat_y[idx])\n out[idx] = d\n loss += d*weight\n return loss, out" }, { "identifier": "EmoNetRegressor", "path": "inferno/models/EmoNetRegressor.py", "snippet": "class EmoNetRegressor(torch.nn.Module):\n\n def __init__(self, outsize, last_op=None):\n super().__init__()\n self.emonet = get_emonet().eval()\n # self.emonet.eval()\n # self.emonet = self.emonet.requires_grad_(False)\n # self.transforms = Resize((256, 256))\n self.input_image_size = (256, 256) # for now, emonet is pretrained for this particual image size (the official impl)\n\n self.feature_to_use = 'emo_feat_2'\n\n if self.feature_to_use == 'emo_feat_2':\n self.emonet_feature_size = 256\n self.fc_size = 256\n else:\n raise NotImplementedError(f\"Not yet implemented for feature '{self.feature_to_use}'\")\n\n self.layers = torch.nn.Sequential(\n torch.nn.Linear(self.emonet_feature_size, self.fc_size),\n torch.nn.ReLU(),\n torch.nn.Linear(self.fc_size, outsize)\n )\n self.last_op = last_op\n\n def forward(self, images):\n images = F.interpolate(images, self.input_image_size, mode='bilinear')\n out = self.emonet(images, intermediate_features=True)\n # out has the following keys: 'heatmap', 'expression' 'valence', 'arousal', 'emo_feat', 'emo_feat_2'\n out = self.layers(out[self.feature_to_use])\n return out\n\n def reset_last_layer(self):\n # initialize the last layer to zero to help the network \n # predict the initial pose a bit more stable\n torch.nn.init.constant_(self.layers[-1].weight, 0)\n torch.nn.init.constant_(self.layers[-1].bias, 0)" }, { "identifier": "EmonetRegressorStatic", "path": "inferno/models/EmoNetRegressor.py", "snippet": "class EmonetRegressorStatic(EmoNetRegressor):\n\n def __init__(self, outsize, last_op=None):\n super().__init__(outsize, last_op)\n self.emonet.requires_grad_(False)\n self.emonet.eval()\n\n def train(self, mode=True):\n # this one only trains the FC layers\n self.emonet.eval()\n self.layers.train(mode)\n return self\n\n\n def reset_last_layer(self):\n # initialize the last layer to zero to help the network \n # predict the initial pose a bit more stable\n torch.nn.init.constant_(self.layers[-1].weight, 0)\n torch.nn.init.constant_(self.layers[-1].bias, 0)" } ]
import os, sys import torch import torchvision import torch.nn.functional as F import torchvision.transforms.functional as F_v import numpy as np import cv2 import inferno.layers.losses.DecaLosses as lossfunc import inferno.layers.losses.MediaPipeLandmarkLosses as lossfunc_mp import inferno.utils.DecaUtils as util import pytorch_lightning.plugins.environments.lightning_environment as le import psutil import adabound import copy from pytorch_lightning import LightningModule from pytorch_lightning.loggers import WandbLogger from inferno.layers.losses.EmoNetLoss import EmoNetLoss, create_emo_loss, create_au_loss from skimage.io import imread from skimage.transform import resize from pathlib import Path from inferno.models.Renderer import SRenderY from inferno.models.DecaEncoder import ResnetEncoder, SecondHeadResnet, SwinEncoder from inferno.models.DecaDecoder import Generator, GeneratorAdaIn from inferno.models.DecaFLAME import FLAME, FLAMETex, FLAME_mediapipe from inferno.models.EmotionMLP import EmotionMLP from inferno.datasets.AffWild2Dataset import Expression7 from inferno.datasets.AffectNetDataModule import AffectNetExpressions from inferno.utils.lightning_logging import _log_array_image, _log_wandb_image, _torch_image2np from enum import Enum from inferno.utils.other import class_from_str, get_path_to_assets from inferno.layers.losses.VGGLoss import VGG19Loss from omegaconf import OmegaConf, open_dict from inferno.models.temporal.external.LipReadingLoss import LipReadingLoss from .StarGAN import StarGANWrapper from inferno.models.EmoNetRegressor import EmoNetRegressor, EmonetRegressorStatic from .mica.config import get_cfg_defaults from .mica.mica import MICA from .mica.MicaInputProcessing import MicaInputProcessor from inferno.utils.other import get_path_to_assets from inferno.models.IO import locate_checkpoint
19,546
uv_detail_vertices = uv_coarse_vertices + \ uv_z * uv_coarse_normals + \ self.fixed_uv_dis[None, None, :,:] * uv_coarse_normals #.detach() dense_vertices = uv_detail_vertices.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]) uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1)) uv_detail_normals = uv_detail_normals.reshape( [batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0, 3, 1, 2) # uv_detail_normals = uv_detail_normals*self.uv_face_eye_mask + uv_coarse_normals*(1-self.uv_face_eye_mask) # uv_detail_normals = util.gaussian_blur(uv_detail_normals) return uv_detail_normals, uv_coarse_vertices def visualize(self, visdict, savepath, catdim=1): grids = {} for key in visdict: # print(key) if visdict[key] is None: continue grids[key] = torchvision.utils.make_grid( F.interpolate(visdict[key], [self.config.image_size, self.config.image_size])).detach().cpu() grid = torch.cat(list(grids.values()), catdim) grid_image = (grid.numpy().transpose(1, 2, 0).copy() * 255)[:, :, [2, 1, 0]] grid_image = np.minimum(np.maximum(grid_image, 0), 255).astype(np.uint8) if savepath is not None: cv2.imwrite(savepath, grid_image) return grid_image def create_mesh(self, opdict, dense_template): ''' vertices: [nv, 3], tensor texture: [3, h, w], tensor ''' i = 0 vertices = opdict['verts'][i].cpu().numpy() faces = self.render.faces[0].cpu().numpy() if 'uv_texture_gt' in opdict.keys(): texture = util.tensor2image(opdict['uv_texture_gt'][i]) else: texture = None uvcoords = self.render.raw_uvcoords[0].cpu().numpy() uvfaces = self.render.uvfaces[0].cpu().numpy() # save coarse mesh, with texture and normal map if 'uv_detail_normals' in opdict.keys(): normal_map = util.tensor2image(opdict['uv_detail_normals'][i]*0.5 + 0.5) # upsample mesh, save detailed mesh texture = texture[:, :, [2, 1, 0]] normals = opdict['normals'][i].cpu().numpy() displacement_map = opdict['displacement_map'][i].detach().cpu().numpy().squeeze() dense_vertices, dense_colors, dense_faces = util.upsample_mesh(vertices, normals, faces, displacement_map, texture, dense_template) else: normal_map = None dense_vertices = None dense_colors = None dense_faces = None return vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors def save_obj(self, filename, opdict, dense_template, mode ='detail'): if mode not in ['coarse', 'detail', 'both']: raise ValueError(f"Invalid mode '{mode}. Expected modes are: 'coarse', 'detail', 'both'") vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors \ = self.create_mesh(opdict, dense_template) if mode == 'both': if isinstance(filename, list): filename_coarse = filename[0] filename_detail = filename[1] else: filename_coarse = filename filename_detail = filename.replace('.obj', '_detail.obj') elif mode == 'coarse': filename_coarse = filename else: filename_detail = filename if mode in ['coarse', 'both']: util.write_obj(str(filename_coarse), vertices, faces, texture=texture, uvcoords=uvcoords, uvfaces=uvfaces, normal_map=normal_map) if mode in ['detail', 'both']: util.write_obj(str(filename_detail), dense_vertices, dense_faces, colors = dense_colors, inverse_face_order=True) class ExpDECAInterface(object): """ This serves as an interface for EMOCA-like classes that need to use a different sub class but retain the EMOCA functionality. See EMICA_v2 for an example. """ def _create_model(self): # E_flame should be fixed for expression EMOCA self.E_flame.requires_grad_(False) # 2) add expression decoder if self.config.expression_backbone == 'deca_parallel': ## a) Attach a parallel flow of FCs onto the original DECA coarse backbone. (Only the second FC head is trainable) self.E_expression = SecondHeadResnet(self.E_flame, self.n_exp_param, 'same') elif self.config.expression_backbone == 'deca_clone': ## b) Clones the original DECA coarse decoder (and the entire decoder will be trainable) - This is in final EMOCA. #TODO this will only work for Resnet. Make this work for the other backbones (Swin) as well. self.E_expression = ResnetEncoder(self.n_exp_param) # clone parameters of the ResNet self.E_expression.encoder.load_state_dict(self.E_flame.encoder.state_dict()) elif self.config.expression_backbone == 'emonet_trainable': # Trainable EmoNet instead of Resnet (deprecated) self.E_expression = EmoNetRegressor(self.n_exp_param) elif self.config.expression_backbone == 'emonet_static': # Frozen EmoNet with a trainable head instead of Resnet (deprecated)
""" Author: Radek Danecek Copyright (c) 2022, Radek Danecek All rights reserved. # Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is # holder of all proprietary rights on this computer program. # Using this computer program means that you agree to the terms # in the LICENSE file included with this software distribution. # Any use not explicitly granted by the LICENSE is prohibited. # # Copyright©2022 Max-Planck-Gesellschaft zur Förderung # der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute # for Intelligent Systems. All rights reserved. # # For comments or questions, please email us at [email protected] # For commercial licensing contact, please contact [email protected] Parts of the code were adapted from the original DECA release: https://github.com/YadiraF/DECA/ """ # from time import time torch.backends.cudnn.benchmark = True class DecaMode(Enum): COARSE = 1 # when switched on, only coarse part of DECA-based networks is used DETAIL = 2 # when switched on, only coarse and detail part of DECA-based networks is used class DecaModule(LightningModule): """ DecaModule is a PL module that implements DECA-inspired face reconstruction networks. """ def __init__(self, model_params, learning_params, inout_params, stage_name = ""): """ :param model_params: a DictConfig of parameters about the model itself :param learning_params: a DictConfig of parameters corresponding to the learning process (such as optimizer, lr and others) :param inout_params: a DictConfig of parameters about input and output (where checkpoints and visualizations are saved) """ super().__init__() self.learning_params = learning_params self.inout_params = inout_params # detail conditioning - what is given as the conditioning input to the detail generator in detail stage training if 'detail_conditioning' not in model_params.keys(): # jaw, expression and detail code by default self.detail_conditioning = ['jawpose', 'expression', 'detail'] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detail_conditioning = self.detail_conditioning else: self.detail_conditioning = model_params.detail_conditioning # deprecated and is not used if 'detailemo_conditioning' not in model_params.keys(): self.detailemo_conditioning = [] OmegaConf.set_struct(model_params, True) with open_dict(model_params): model_params.detailemo_conditioning = self.detailemo_conditioning else: self.detailemo_conditioning = model_params.detailemo_conditioning supported_conditioning_keys = ['identity', 'jawpose', 'expression', 'detail', 'detailemo'] for c in self.detail_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") for c in self.detailemo_conditioning: if c not in supported_conditioning_keys: raise ValueError(f"Conditioning on '{c}' is not supported. Supported conditionings: {supported_conditioning_keys}") # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) self.mode = DecaMode[str(model_params.mode).upper()] self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" # initialize the emotion perceptual loss (used for EMOCA supervision) self.emonet_loss = None self._init_emotion_loss() # initialize the au perceptual loss (not currently used in EMOCA) self.au_loss = None self._init_au_loss() # initialize the lip reading perceptual loss (not currently used in original EMOCA) self.lipread_loss = None self._init_lipread_loss() # MPL regressor from the encoded space to emotion labels (not used in EMOCA but could be used for direct emotion supervision) if 'mlp_emotion_predictor' in self.deca.config.keys(): # self._build_emotion_mlp(self.deca.config.mlp_emotion_predictor) self.emotion_mlp = EmotionMLP(self.deca.config.mlp_emotion_predictor, model_params) else: self.emotion_mlp = None def get_input_image_size(self): return (self.deca.config.image_size, self.deca.config.image_size) def _instantiate_deca(self, model_params): """ Instantiate the DECA network. """ # which type of DECA network is used if 'deca_class' not in model_params.keys() or model_params.deca_class is None: print(f"Deca class is not specified. Defaulting to {str(DECA.__class__.__name__)}") # vanilla DECA by default (not EMOCA) deca_class = DECA else: # other type of DECA-inspired networks possible (such as ExpDECA, which is what EMOCA) deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) # instantiate the network self.deca = deca_class(config=model_params) def _init_emotion_loss(self): """ Initialize the emotion perceptual loss (used for EMOCA supervision) """ if 'emonet_weight' in self.deca.config.keys() and bool(self.deca.config.get('emonet_model_path', False)): if self.emonet_loss is not None: emoloss_force_override = True if 'emoloss_force_override' in self.deca.config.keys() and self.deca.config.emoloss_force_override else False if self.emonet_loss.is_trainable(): if not emoloss_force_override: print("The old emonet loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old emonet loss is trainable but override is set so it will be replaced.") else: print("The old emonet loss is not trainable. It will be replaced.") if 'emonet_model_path' in self.deca.config.keys(): emonet_model_path = self.deca.config.emonet_model_path else: emonet_model_path=None # self.emonet_loss = EmoNetLoss(self.device, emonet=emonet_model_path) emoloss_trainable = True if 'emoloss_trainable' in self.deca.config.keys() and self.deca.config.emoloss_trainable else False emoloss_dual = True if 'emoloss_dual' in self.deca.config.keys() and self.deca.config.emoloss_dual else False normalize_features = self.deca.config.normalize_features if 'normalize_features' in self.deca.config.keys() else None emo_feat_loss = self.deca.config.emo_feat_loss if 'emo_feat_loss' in self.deca.config.keys() else None old_emonet_loss = self.emonet_loss self.emonet_loss = create_emo_loss(self.device, emoloss=emonet_model_path, trainable=emoloss_trainable, dual=emoloss_dual, normalize_features=normalize_features, emo_feat_loss=emo_feat_loss) if old_emonet_loss is not None and type(old_emonet_loss) != self.emonet_loss: print(f"The old emonet loss {old_emonet_loss.__class__.__name__} is replaced during reconfiguration by " f"new emotion loss {self.emonet_loss.__class__.__name__}") else: self.emonet_loss = None def _init_au_loss(self): """ Initialize the au perceptual loss (not currently used in EMOCA) """ if 'au_loss' in self.deca.config.keys(): if self.au_loss is not None: force_override = True if 'force_override' in self.deca.config.au_loss.keys() \ and self.deca.config.au_loss.force_override else False if self.au_loss.is_trainable(): if not force_override: print("The old AU loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old AU loss is trainable but override is set so it will be replaced.") else: print("The old AU loss is not trainable. It will be replaced.") old_au_loss = self.emonet_loss self.au_loss = create_au_loss(self.device, self.deca.config.au_loss) else: self.au_loss = None def _init_lipread_loss(self): """ Initialize the au perceptual loss (not currently used in EMOCA) """ if 'lipread_loss' in self.deca.config.keys() and self.deca.config.lipread_loss.get('load', True): if self.lipread_loss is not None: force_override = True if 'force_override' in self.deca.config.lipread_loss.keys() \ and self.deca.config.lipread_loss.force_override else False assert self.lipread_loss.is_trainable(), "Trainable lip reading loss is not supported yet." if self.lipread_loss.is_trainable(): if not force_override: print("The old lip reading loss is trainable and will not be overrided or replaced.") return # raise NotImplementedError("The old emonet loss was trainable. Changing a trainable loss is probably now " # "what you want implicitly. If you need this, use the '`'emoloss_force_override' config.") else: print("The old lip reading loss is trainable but override is set so it will be replaced.") else: print("The old lip reading loss is not trainable. It will be replaced.") # old_lipread_loss = self.emonet_loss self.lipread_loss = LipReadingLoss(self.device, self.deca.config.lipread_loss.lipread_loss) self.lipread_loss.eval() self.lipread_loss.requires_grad_(False) else: self.lipread_loss = None def reconfigure(self, model_params, inout_params, learning_params, stage_name="", downgrade_ok=False, train=True): """ Reconfigure the model. Usually used to switch between detail and coarse stages (which have separate configs) """ if (self.mode == DecaMode.DETAIL and model_params.mode != DecaMode.DETAIL) and not downgrade_ok: raise RuntimeError("You're switching the EMOCA mode from DETAIL to COARSE. Is this really what you want?!") self.inout_params = inout_params self.learning_params = learning_params if self.deca.__class__.__name__ != model_params.deca_class: old_deca_class = self.deca.__class__.__name__ state_dict = self.deca.state_dict() if 'deca_class' in model_params.keys(): deca_class = class_from_str(model_params.deca_class, sys.modules[__name__]) else: deca_class = DECA self.deca = deca_class(config=model_params) diff = set(state_dict.keys()).difference(set(self.deca.state_dict().keys())) if len(diff) > 0: raise RuntimeError(f"Some values from old state dict will not be used. This is probably not what you " f"want because it most likely means that the pretrained model's weights won't be used. " f"Maybe you messed up backbone compatibility (i.e. SWIN vs ResNet?) {diff}") ret = self.deca.load_state_dict(state_dict, strict=False) if len(ret.unexpected_keys) > 0: raise print(f"Unexpected keys: {ret.unexpected_keys}") missing_modules = set([s.split(".")[0] for s in ret.missing_keys]) print(f"Missing modules when upgrading from {old_deca_class} to {model_params.deca_class}:") print(missing_modules) else: self.deca._reconfigure(model_params) self._init_emotion_loss() self._init_au_loss() self.stage_name = stage_name if self.stage_name is None: self.stage_name = "" if len(self.stage_name) > 0: self.stage_name += "_" self.mode = DecaMode[str(model_params.mode).upper()] self.train(mode=train) print(f"EMOCA MODE RECONFIGURED TO: {self.mode}") if 'shape_contrain_type' in self.deca.config.keys() and str(self.deca.config.shape_constrain_type).lower() != 'none': shape_constraint = self.deca.config.shape_constrain_type else: shape_constraint = None if 'expression_constrain_type' in self.deca.config.keys() and str(self.deca.config.expression_constrain_type).lower() != 'none': expression_constraint = self.deca.config.expression_constrain_type else: expression_constraint = None if shape_constraint is not None and expression_constraint is not None: raise ValueError("Both shape constraint and expression constraint are active. This is probably not what we want.") def uses_texture(self): """ Check if the model uses texture """ return self.deca.uses_texture() def visualize(self, visdict, savepath, catdim=1): return self.deca.visualize(visdict, savepath, catdim) def train(self, mode: bool = True): # super().train(mode) # not necessary self.deca.train(mode) if self.emotion_mlp is not None: self.emotion_mlp.train(mode) if self.emonet_loss is not None: self.emonet_loss.eval() if self.deca.perceptual_loss is not None: self.deca.perceptual_loss.eval() if self.deca.id_loss is not None: self.deca.id_loss.eval() return self def to(self, *args, **kwargs): super().to(*args, **kwargs) return self def cuda(self, device=None): super().cuda(device) return self def cpu(self): super().cpu() return self def forward(self, batch): values = self.encode(batch, training=False) values = self.decode(values, training=False) return values def _unwrap_list(self, codelist): shapecode, texcode, expcode, posecode, cam, lightcode = codelist return shapecode, texcode, expcode, posecode, cam, lightcode def _unwrap_list_to_dict(self, codelist): shapecode, texcode, expcode, posecode, cam, lightcode = codelist return {'shape': shapecode, 'tex': texcode, 'exp': expcode, 'pose': posecode, 'cam': cam, 'light': lightcode} # return shapecode, texcode, expcode, posecode, cam, lightcode def _encode_flame(self, images, **kwargs): if self.mode == DecaMode.COARSE or \ (self.mode == DecaMode.DETAIL and self.deca.config.train_coarse): # forward pass with gradients (for coarse stage (used), or detail stage with coarse training (not used)) parameters = self.deca._encode_flame(images, **kwargs) elif self.mode == DecaMode.DETAIL: # in detail stage, the coarse forward pass does not need gradients with torch.no_grad(): parameters = self.deca._encode_flame(images, **kwargs) else: raise ValueError(f"Invalid EMOCA Mode {self.mode}") code_list, original_code = self.deca.decompose_code(parameters) # shapecode, texcode, expcode, posecode, cam, lightcode = code_list # return shapecode, texcode, expcode, posecode, cam, lightcode, original_code return code_list, original_code def _expression_ring_exchange(self, original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode=None, detailemocode=None, exprw=None, lmk_mp=None, mica_images=None): """ Deprecated. Expression ring exchange is not used in EMOCA (nor DECA). """ new_order = np.array([np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() expcode_new = expcode[new_order] ## append new shape code data expcode = torch.cat([expcode, expcode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) shapecode = torch.cat([shapecode, shapecode], dim=0) globpose = posecode[..., :3] jawpose = posecode[..., 3:] if self.deca.config.expression_constrain_use_jaw_pose: jawpose_new = jawpose[new_order] jawpose = torch.cat([jawpose, jawpose_new], dim=0) else: jawpose = torch.cat([jawpose, jawpose], dim=0) if self.deca.config.expression_constrain_use_global_pose: globpose_new = globpose[new_order] globpose = torch.cat([globpose, globpose_new], dim=0) else: globpose = torch.cat([globpose, globpose], dim=0) if self.deca.config.expression_constrain_use_jaw_pose or self.deca.config.expression_constrain_use_global_pose: posecode = torch.cat([globpose, jawpose], dim=-1) # posecode_new = torch.cat([globpose, jawpose], dim=-1) else: # posecode_new = posecode # posecode_new = posecode posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) # NOTE: # Here we could think about what makes sense to exchange # 1) Do we exchange all emotion GT (VA and expression) within the ring? # 2) Do we exchange only the GT on which the ring is constructed (AffectNet ring based on binned VA or expression or Emonet feature?) # note: if we use EmoMLP that goes from (expression, jawpose, detailcode) -> (v,a,expr) and we exchange # ALL of these, the EmoMLP prediction will of course be the same. The output image still changes, # so EmoNet loss (if used) would be different. Same for the photometric/landmark losses. # TODO: # For now I decided to exchange everything but this should probably be experimented with # I would argue though, that exchanging the GT is the right thing to do if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) if affectnetexp is not None: affectnetexp = torch.cat([affectnetexp, affectnetexp[new_order]], dim=0) if exprw is not None: exprw = torch.cat([exprw, exprw[new_order]], dim=0) if detailcode is not None: #TODO: to exchange or not to exchange, that is the question, the answer is probably NO detailcode = torch.cat([detailcode, detailcode], dim=0) # detailcode = torch.cat([detailcode, detailcode[new_order]], dim=0) if detailemocode is not None: # TODO: to exchange or not to exchange, that is the question, the answer is probably YES detailemocode = torch.cat([detailemocode, detailemocode[new_order]], dim=0) return expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, \ detailcode, detailemocode, exprw, lmk_mp, mica_images # return expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7 def encode(self, batch, training=True) -> dict: """ Forward encoding pass of the model. Takes a batch of images and returns the corresponding latent codes for each image. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. For a testing pass, the images suffice. :param training: Whether the forward pass is for training or testing. """ codedict = {} original_batch_size = batch['image'].shape[0] images = batch['image'] if 'mica_images' in batch.keys(): mica_images = batch['mica_images'] else: mica_images = None if len(images.shape) == 5: K = images.shape[1] elif len(images.shape) == 4: K = 1 else: raise RuntimeError("Invalid image batch dimensions.") # [B, K, 3, size, size] ==> [BxK, 3, size, size] images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = mica_images.view(-1, mica_images.shape[-3], mica_images.shape[-2], mica_images.shape[-1]) if 'landmark' in batch.keys(): lmk = batch['landmark'] lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if 'landmark_mediapipe' in batch.keys(): lmk_mp = batch['landmark_mediapipe'] lmk_mp = lmk_mp.view(-1, lmk_mp.shape[-2], lmk_mp.shape[-1]) else: lmk_mp = None if 'mask' in batch.keys(): masks = batch['mask'] masks = masks.view(-1, images.shape[-2], images.shape[-1]) # valence / arousal - not necessary unless we want to use VA for supervision (not done in EMOCA) if 'va' in batch: va = batch['va'] va = va.view(-1, va.shape[-1]) else: va = None # 7 basic expression - not necessary unless we want to use expression for supervision (not done in EMOCA or DECA) if 'expr7' in batch: expr7 = batch['expr7'] expr7 = expr7.view(-1, expr7.shape[-1]) else: expr7 = None # affectnet basic expression - not necessary unless we want to use expression for supervision (not done in EMOCA or DECA) if 'affectnetexp' in batch: affectnetexp = batch['affectnetexp'] affectnetexp = affectnetexp.view(-1, affectnetexp.shape[-1]) else: affectnetexp = None # expression weights if supervising by expression is used (to balance the classification loss) - not done in EMOCA or DECA if 'expression_weight' in batch: exprw = batch['expression_weight'] exprw = exprw.view(-1, exprw.shape[-1]) else: exprw = None # 1) COARSE STAGE # forward pass of the coarse encoder # shapecode, texcode, expcode, posecode, cam, lightcode = self._encode_flame(images) code, original_code = self._encode_flame(images, mica_image=mica_images) shapecode, texcode, expcode, posecode, cam, lightcode = self._unwrap_list(code) if original_code is not None: original_code = self._unwrap_list_to_dict(original_code) if training: # If training, we employ the disentanglement strategy if self.mode == DecaMode.COARSE: if self.deca.config.shape_constrain_type == 'same': ## Enforce that all identity shape codes within ring are the same. The batch is duplicated ## and the duplicated part's shape codes are shuffled. # reshape shapecode => [B, K, n_shape] # shapecode_idK = shapecode.view(self.batch_size, self.deca.K, -1) shapecode_idK = shapecode.view(original_batch_size, K, -1) # get mean id shapecode_mean = torch.mean(shapecode_idK, dim=[1]) # shapecode_new = shapecode_mean[:, None, :].repeat(1, self.deca.K, 1) shapecode_new = shapecode_mean[:, None, :].repeat(1, K, 1) shapecode = shapecode_new.view(-1, self.deca._get_num_shape_params()) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_idK = shapecode_orig.view(original_batch_size, K, -1) shapecode_orig_mean = torch.mean(shapecode_orig_idK, dim=[1]) shapecode_orig_new = shapecode_orig_mean[:, None, :].repeat(1, K, 1) original_code['shape'] = shapecode_orig_new.view(-1, self.deca._get_num_shape_params()) elif self.deca.config.shape_constrain_type == 'exchange': ## Shuffle identitys shape codes within ring (they should correspond to the same identity) ''' make sure s0, s1 is something to make shape close the difference from ||so - s1|| is the later encourage s0, s1 is cloase in l2 space, but not really ensure shape will be close ''' # new_order = np.array([np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(self.deca.config.batch_size_train)]) # new_order = np.array([np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(original_batch_size)]) new_order = np.array([np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_new = shapecode_orig[new_order] original_code['shape'] = torch.cat([shapecode_orig, shapecode_orig_new], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp']], dim=0) original_code['pose'] = torch.cat([original_code['pose'], original_code['pose']], dim=0) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) elif self.deca.config.shape_constrain_type == 'shuffle_expression': assert original_code is not None ## DEPRECATED, NOT USED IN EMOCA OR DECA new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order # exchange expression expcode_new = expcode[new_order] expcode = torch.cat([expcode, expcode_new], dim=0) # exchange jaw pose (but not global pose) global_pose = posecode[:, :3] jaw_pose = posecode[:, 3:] jaw_pose_new = jaw_pose[new_order] jaw_pose = torch.cat([jaw_pose, jaw_pose_new], dim=0) global_pose = torch.cat([global_pose, global_pose], dim=0) posecode = torch.cat([global_pose, jaw_pose], dim=1) ## duplicate the rest shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## duplicate gt if any images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) print(f"TRAINING: {training}") if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) if lmk_mp is not None: lmk_mp = torch.cat([lmk_mp, lmk_mp], dim=0) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, old_order]) ref_images_expression_idxs = np.concatenate([old_order, new_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) # do the same for the original code dict original_code['shape'] = torch.cat([original_code['shape'], original_code['shape']], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp'][new_order]], dim=0) original_global_pose = original_code['pose'][:, :3] original_jaw_pose = original_code['pose'][:, 3:] original_jaw_pose = torch.cat([original_jaw_pose, original_jaw_pose[new_order]], dim=0) original_global_pose = torch.cat([original_global_pose, original_global_pose], dim=0) original_code['pose'] = torch.cat([original_global_pose, original_jaw_pose], dim=1) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) elif self.deca.config.shape_constrain_type == 'shuffle_shape': ## The shape codes are shuffled without duplication new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, new_order]) ref_images_expression_idxs = np.concatenate([old_order, old_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) # do the same for the original code dict shapecode_orig = original_code['shape'] shapecode_orig_new = shapecode_orig[new_order] original_code['shape'] = torch.cat([shapecode_orig, shapecode_orig_new], dim=0) original_code['tex'] = torch.cat([original_code['tex'], original_code['tex']], dim=0) original_code['exp'] = torch.cat([original_code['exp'], original_code['exp']], dim=0) original_code['pose'] = torch.cat([original_code['pose'], original_code['pose']], dim=0) original_code['cam'] = torch.cat([original_code['cam'], original_code['cam']], dim=0) original_code['light'] = torch.cat([original_code['light'], original_code['light']], dim=0) original_code['ref_images_identity_idxs'] = ref_images_identity_idxs original_code['ref_images_expression_idxs'] = ref_images_expression_idxs elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'same': ## NOT USED IN EMOCA OR DECA, deprecated # reshape shapecode => [B, K, n_shape] # shapecode_idK = shapecode.view(self.batch_size, self.deca.K, -1) expcode_idK = expcode.view(original_batch_size, K, -1) # get mean id expcode_mean = torch.mean(expcode_idK, dim=[1]) # shapecode_new = shapecode_mean[:, None, :].repeat(1, self.deca.K, 1) expcode_new = expcode_mean[:, None, :].repeat(1, K, 1) expcode = expcode_new.view(-1, self.deca._get_num_shape_params()) # do the same thing for the original code dict expcode_idK = original_code['exp'].view(original_batch_size, K, -1) expcode_mean = torch.mean(expcode_idK, dim=[1]) expcode_new = expcode_mean[:, None, :].repeat(1, K, 1) original_code['exp'] = expcode_new.view(-1, self.deca._get_num_shape_params()) elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'exchange': ## NOT USED IN EMOCA OR DECA, deprecated expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, \ masks, va, expr7, affectnetexp, _, _, exprw, lmk_mp, mica_images = \ self._expression_ring_exchange(original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, None, None, exprw, lmk_mp, mica_images) # (self, original_batch_size, K, # expcode, posecode, shapecode, lightcode, texcode, # images, cam, lmk, masks, va, expr7, affectnetexp, # detailcode=None, detailemocode=None, exprw=None): # 2) DETAIL STAGE if self.mode == DecaMode.DETAIL: all_detailcode = self.deca.E_detail(images) # identity-based detail code detailcode = all_detailcode[:, :self.deca.n_detail] # detail emotion code is deprecated and will be empty detailemocode = all_detailcode[:, self.deca.n_detail:(self.deca.n_detail + self.deca.n_detail_emo)] if training: # If training, we employ the disentanglement strategy if self.deca.config.detail_constrain_type == 'exchange': # Identity within the same ring should be the same, so they should have the same code. # This can be enforced by shuffling. The batch is duplicated and the duplicated part's code shuffled ''' make sure s0, s1 is something to make shape close the difference from ||so - s1|| is the later encourage s0, s1 is cloase in l2 space, but not really ensure shape will be close ''' # this creates a per-ring random permutation. The detail exchange happens ONLY between the same # identities (within the ring) but not outside (no cross-identity detail exchange) new_order = np.array( # [np.random.permutation(self.deca.config.train_K) + i * self.deca.config.train_K for i in range(original_batch_size)]) [np.random.permutation(K) + i * K for i in range(original_batch_size)]) new_order = new_order.flatten() detailcode_new = detailcode[new_order] detailcode = torch.cat([detailcode, detailcode_new], dim=0) detailemocode = torch.cat([detailemocode, detailemocode], dim=0) ## append new shape code data shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) elif self.deca.config.detail_constrain_type == 'shuffle_expression': ## Deprecated and not used in EMOCA or DECA new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order # exchange expression expcode_new = expcode[new_order] expcode = torch.cat([expcode, expcode_new], dim=0) # exchange emotion code, but not (identity-based) detailcode detailemocode_new = detailemocode[new_order] detailemocode = torch.cat([detailemocode, detailemocode_new], dim=0) detailcode = torch.cat([detailcode, detailcode], dim=0) # exchange jaw pose (but not global pose) global_pose = posecode[:, :3] jaw_pose = posecode[:, 3:] jaw_pose_new = jaw_pose[new_order] jaw_pose = torch.cat([jaw_pose, jaw_pose_new], dim=0) global_pose = torch.cat([global_pose, global_pose], dim=0) posecode = torch.cat([global_pose, jaw_pose], dim=1) ## duplicate the rest shapecode = torch.cat([shapecode, shapecode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## duplicate gt if any images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) print(f"TRAINING: {training}") if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, old_order]) ref_images_expression_idxs = np.concatenate([old_order, new_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va[new_order]], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7[new_order]], dim=0) elif self.deca.config.detail_constrain_type == 'shuffle_shape': ## Shuffles teh shape code without duplicating the batch new_order = np.random.permutation(K*original_batch_size) old_order = np.arange(K*original_batch_size) while (new_order == old_order).any(): # ugly hacky way of assuring that every element is permuted new_order = np.random.permutation(K * original_batch_size) codedict['new_order'] = new_order shapecode_new = shapecode[new_order] ## append new shape code data shapecode = torch.cat([shapecode, shapecode_new], dim=0) # exchange (identity-based) detailcode, but not emotion code detailcode_new = detailcode[new_order] detailcode = torch.cat([detailcode, detailcode_new], dim=0) detailemocode = torch.cat([detailemocode, detailemocode], dim=0) texcode = torch.cat([texcode, texcode], dim=0) expcode = torch.cat([expcode, expcode], dim=0) posecode = torch.cat([posecode, posecode], dim=0) cam = torch.cat([cam, cam], dim=0) lightcode = torch.cat([lightcode, lightcode], dim=0) ## append gt images = torch.cat([images, images], dim=0) # images = images.view(-1, images.shape[-3], images.shape[-2], images.shape[-1]) if mica_images is not None: mica_images = torch.cat([mica_images, mica_images], dim=0) if lmk is not None: lmk = torch.cat([lmk, lmk], dim=0) # lmk = lmk.view(-1, lmk.shape[-2], lmk.shape[-1]) masks = torch.cat([masks, masks], dim=0) ref_images_identity_idxs = np.concatenate([old_order, new_order]) ref_images_expression_idxs = np.concatenate([old_order, old_order]) codedict["ref_images_identity_idxs"] = ref_images_identity_idxs codedict["ref_images_expression_idxs"] = ref_images_expression_idxs if va is not None: va = torch.cat([va, va], dim=0) if expr7 is not None: expr7 = torch.cat([expr7, expr7], dim=0) elif 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type == 'exchange': expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode, detailemocode, exprw, lmk_mp, mica_images = \ self._expression_ring_exchange(original_batch_size, K, expcode, posecode, shapecode, lightcode, texcode, images, cam, lmk, masks, va, expr7, affectnetexp, detailcode, detailemocode, exprw, lmk_mp, mica_images) codedict['shapecode'] = shapecode codedict['texcode'] = texcode codedict['expcode'] = expcode codedict['posecode'] = posecode codedict['cam'] = cam codedict['lightcode'] = lightcode if self.mode == DecaMode.DETAIL: codedict['detailcode'] = detailcode codedict['detailemocode'] = detailemocode codedict['images'] = images if mica_images is not None: codedict['mica_images'] = mica_images if 'mask' in batch.keys(): codedict['masks'] = masks if 'landmark' in batch.keys(): codedict['lmk'] = lmk if lmk_mp is not None: codedict['lmk_mp'] = lmk_mp if 'va' in batch.keys(): codedict['va'] = va if 'expr7' in batch.keys(): codedict['expr7'] = expr7 if 'affectnetexp' in batch.keys(): codedict['affectnetexp'] = affectnetexp if 'expression_weight' in batch.keys(): codedict['expression_weight'] = exprw if original_code is not None: codedict['original_code'] = original_code return codedict def _create_conditioning_lists(self, codedict, condition_list): detail_conditioning_list = [] if 'globalpose' in condition_list: detail_conditioning_list += [codedict["posecode"][:, :3]] if 'jawpose' in condition_list: detail_conditioning_list += [codedict["posecode"][:, 3:]] if 'identity' in condition_list: detail_conditioning_list += [codedict["shapecode"]] if 'expression' in condition_list: detail_conditioning_list += [codedict["expcode"]] if isinstance(self.deca.D_detail, Generator): # the detail codes might be excluded from conditioning based on the Generator architecture (for instance # for AdaIn Generator) if 'detail' in condition_list: detail_conditioning_list += [codedict["detailcode"]] if 'detailemo' in condition_list: detail_conditioning_list += [codedict["detailemocode"]] return detail_conditioning_list def decode(self, codedict, training=True, render=True, **kwargs) -> dict: """ Forward decoding pass of the model. Takes the latent code predicted by the encoding stage and reconstructs and renders the shape. :param codedict: Batch dict of the predicted latent codes :param training: Whether the forward pass is for training or testing. """ shapecode = codedict['shapecode'] expcode = codedict['expcode'] posecode = codedict['posecode'] texcode = codedict['texcode'] cam = codedict['cam'] lightcode = codedict['lightcode'] images = codedict['images'] if 'masks' in codedict.keys(): masks = codedict['masks'] else: masks = None effective_batch_size = images.shape[0] # this is the current batch size after all training augmentations modifications # 1) Reconstruct the face mesh # FLAME - world space if not isinstance(self.deca.flame, FLAME_mediapipe): verts, landmarks2d, landmarks3d = self.deca.flame(shape_params=shapecode, expression_params=expcode, pose_params=posecode) landmarks2d_mediapipe = None else: verts, landmarks2d, landmarks3d, landmarks2d_mediapipe = self.deca.flame(shapecode, expcode, posecode) # world to camera trans_verts = util.batch_orth_proj(verts, cam) predicted_landmarks = util.batch_orth_proj(landmarks2d, cam)[:, :, :2] # camera to image space trans_verts[:, :, 1:] = -trans_verts[:, :, 1:] predicted_landmarks[:, :, 1:] = - predicted_landmarks[:, :, 1:] if landmarks2d_mediapipe is not None: predicted_landmarks_mediapipe = util.batch_orth_proj(landmarks2d_mediapipe, cam)[:, :, :2] predicted_landmarks_mediapipe[:, :, 1:] = - predicted_landmarks_mediapipe[:, :, 1:] if self.uses_texture(): albedo = self.deca.flametex(texcode) else: # if not using texture, default to gray albedo = torch.ones([effective_batch_size, 3, self.deca.config.uv_size, self.deca.config.uv_size], device=images.device) * 0.5 # 2) Render the coarse image if render: ops = self.deca.render(verts, trans_verts, albedo, lightcode) # mask mask_face_eye = F.grid_sample(self.deca.uv_face_eye_mask.expand(effective_batch_size, -1, -1, -1), ops['grid'].detach(), align_corners=False) # images predicted_images = ops['images'] # predicted_images = ops['images'] * mask_face_eye * ops['alpha_images'] # predicted_images_no_mask = ops['images'] #* mask_face_eye * ops['alpha_images'] segmentation_type = None if isinstance(self.deca.config.useSeg, bool): if self.deca.config.useSeg: segmentation_type = 'gt' else: segmentation_type = 'rend' elif isinstance(self.deca.config.useSeg, str): segmentation_type = self.deca.config.useSeg else: raise RuntimeError(f"Invalid 'useSeg' type: '{type(self.deca.config.useSeg)}'") if segmentation_type not in ["gt", "rend", "intersection", "union"]: raise ValueError(f"Invalid segmentation type for masking '{segmentation_type}'") if masks is None: # if mask not provided, the only mask available is the rendered one segmentation_type = 'rend' elif masks.shape[-1] != predicted_images.shape[-1] or masks.shape[-2] != predicted_images.shape[-2]: # resize masks if need be (this is only done if configuration was changed at some point after training) dims = masks.ndim == 3 if dims: masks = masks[:, None, :, :] masks = F.interpolate(masks, size=predicted_images.shape[-2:], mode='bilinear') if dims: masks = masks[:, 0, ...] # resize images if need be (this is only done if configuration was changed at some point after training) if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed images_resized = F.interpolate(images, size=predicted_images.shape[-2:], mode='bilinear') else: images_resized = images # what type of segmentation we use if segmentation_type == "gt": # GT stands for external segmetnation predicted by face parsing or similar masks = masks[:, None, :, :] elif segmentation_type == "rend": # mask rendered as a silhouette of the face mesh masks = mask_face_eye * ops['alpha_images'] elif segmentation_type == "intersection": # intersection of the two above masks = masks[:, None, :, :] * mask_face_eye * ops['alpha_images'] elif segmentation_type == "union": # union of the first two options masks = torch.max(masks[:, None, :, :], mask_face_eye * ops['alpha_images']) else: raise RuntimeError(f"Invalid segmentation type for masking '{segmentation_type}'") if self.deca.config.background_from_input in [True, "input"]: if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed predicted_images = (1. - masks) * images_resized + masks * predicted_images else: predicted_images = (1. - masks) * images + masks * predicted_images elif self.deca.config.background_from_input in [False, "black"]: predicted_images = masks * predicted_images elif self.deca.config.background_from_input in ["none"]: predicted_images = predicted_images else: raise ValueError(f"Invalid type of background modification {self.deca.config.background_from_input}") # 3) Render the detail image if self.mode == DecaMode.DETAIL: detailcode = codedict['detailcode'] detailemocode = codedict['detailemocode'] # a) Create the detail conditioning lists detail_conditioning_list = self._create_conditioning_lists(codedict, self.detail_conditioning) detailemo_conditioning_list = self._create_conditioning_lists(codedict, self.detailemo_conditioning) final_detail_conditioning_list = detail_conditioning_list + detailemo_conditioning_list # b) Pass the detail code and the conditions through the detail generator to get displacement UV map if isinstance(self.deca.D_detail, Generator): uv_z = self.deca.D_detail(torch.cat(final_detail_conditioning_list, dim=1)) elif isinstance(self.deca.D_detail, GeneratorAdaIn): uv_z = self.deca.D_detail(z=torch.cat([detailcode, detailemocode], dim=1), cond=torch.cat(final_detail_conditioning_list, dim=1)) else: raise ValueError(f"This class of generarator is not supported: '{self.deca.D_detail.__class__.__name__}'") # if there is a displacement mask, apply it (DEPRECATED and not USED in DECA or EMOCA) if hasattr(self.deca, 'displacement_mask') and self.deca.displacement_mask is not None: if 'apply_displacement_masks' in self.deca.config.keys() and self.deca.config.apply_displacement_masks: uv_z = uv_z * self.deca.displacement_mask # uv_z = self.deca.D_detail(torch.cat([posecode[:, 3:], expcode, detailcode], dim=1)) # render detail if render: detach_from_coarse_geometry = not self.deca.config.train_coarse uv_detail_normals, uv_coarse_vertices = self.deca.displacement2normal(uv_z, verts, ops['normals'], detach=detach_from_coarse_geometry) uv_shading = self.deca.render.add_SHlight(uv_detail_normals, lightcode.detach()) uv_texture = albedo.detach() * uv_shading # batch size X image_rows X image_cols X 2 # you can query the grid for UV values of the face mesh at pixel locations grid = ops['grid'] if detach_from_coarse_geometry: # if the grid is detached, the gradient of the positions of UV-values in image space won't flow back to the geometry grid = grid.detach() predicted_detailed_image = F.grid_sample(uv_texture, grid, align_corners=False) if self.deca.config.background_from_input in [True, "input"]: if images.shape[-1] != predicted_images.shape[-1] or images.shape[-2] != predicted_images.shape[-2]: ## special case only for inference time if the rendering image sizes have been changed # images_resized = F.interpolate(images, size=predicted_images.shape[-2:], mode='bilinear') ## before bugfix # predicted_images = (1. - masks) * images_resized + masks * predicted_images ## after bugfix predicted_detailed_image = (1. - masks) * images_resized + masks * predicted_detailed_image else: predicted_detailed_image = (1. - masks) * images + masks * predicted_detailed_image elif self.deca.config.background_from_input in [False, "black"]: predicted_detailed_image = masks * predicted_detailed_image elif self.deca.config.background_from_input in ["none"]: predicted_detailed_image = predicted_detailed_image else: raise ValueError(f"Invalid type of background modification {self.deca.config.background_from_input}") # --- extract texture uv_pverts = self.deca.render.world2uv(trans_verts).detach() uv_gt = F.grid_sample(torch.cat([images_resized, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], mode='bilinear') uv_texture_gt = uv_gt[:, :3, :, :].detach() uv_mask_gt = uv_gt[:, 3:, :, :].detach() # self-occlusion normals = util.vertex_normals(trans_verts, self.deca.render.faces.expand(effective_batch_size, -1, -1)) uv_pnorm = self.deca.render.world2uv(normals) uv_mask = (uv_pnorm[:, -1, :, :] < -0.05).float().detach() uv_mask = uv_mask[:, None, :, :] ## combine masks uv_vis_mask = uv_mask_gt * uv_mask * self.deca.uv_face_eye_mask else: uv_detail_normals = None predicted_detailed_image = None ## 4) (Optional) NEURAL RENDERING - not used in neither DECA nor EMOCA # If neural rendering is enabled, the differentiable rendered synthetic images are translated using an image translation net (such as StarGan) predicted_translated_image = None predicted_detailed_translated_image = None translated_uv_texture = None if render: if self.deca._has_neural_rendering(): predicted_translated_image = self.deca.image_translator( { "input_image" : predicted_images, "ref_image" : images, "target_domain" : torch.tensor([0]*predicted_images.shape[0], dtype=torch.int64, device=predicted_images.device) } ) if self.mode == DecaMode.DETAIL: predicted_detailed_translated_image = self.deca.image_translator( { "input_image" : predicted_detailed_image, "ref_image" : images, "target_domain" : torch.tensor([0]*predicted_detailed_image.shape[0], dtype=torch.int64, device=predicted_detailed_image.device) } ) translated_uv = F.grid_sample(torch.cat([predicted_detailed_translated_image, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], mode='bilinear') translated_uv_texture = translated_uv[:, :3, :, :].detach() else: predicted_detailed_translated_image = None translated_uv_texture = None # no need in coarse mode # translated_uv = F.grid_sample(torch.cat([predicted_translated_image, masks], dim=1), uv_pverts.permute(0, 2, 3, 1)[:, :, :, :2], # mode='bilinear') # translated_uv_texture = translated_uv_gt[:, :3, :, :].detach() if self.emotion_mlp is not None: codedict = self.emotion_mlp(codedict, "emo_mlp_") # populate the value dict for metric computation/visualization if render: codedict['predicted_images'] = predicted_images codedict['predicted_detailed_image'] = predicted_detailed_image codedict['predicted_translated_image'] = predicted_translated_image codedict['ops'] = ops codedict['normals'] = ops['normals'] codedict['mask_face_eye'] = mask_face_eye codedict['verts'] = verts codedict['albedo'] = albedo codedict['landmarks2d'] = landmarks2d codedict['landmarks3d'] = landmarks3d codedict['predicted_landmarks'] = predicted_landmarks if landmarks2d_mediapipe is not None: codedict['predicted_landmarks_mediapipe'] = predicted_landmarks_mediapipe codedict['trans_verts'] = trans_verts codedict['masks'] = masks if self.mode == DecaMode.DETAIL: if render: codedict['predicted_detailed_translated_image'] = predicted_detailed_translated_image codedict['translated_uv_texture'] = translated_uv_texture codedict['uv_texture_gt'] = uv_texture_gt codedict['uv_texture'] = uv_texture codedict['uv_detail_normals'] = uv_detail_normals codedict['uv_shading'] = uv_shading codedict['uv_vis_mask'] = uv_vis_mask codedict['uv_mask'] = uv_mask codedict['uv_z'] = uv_z codedict['displacement_map'] = uv_z + self.deca.fixed_uv_dis[None, None, :, :] return codedict def _compute_emotion_loss(self, images, predicted_images, loss_dict, metric_dict, prefix, va=None, expr7=None, with_grad=True, batch_size=None, ring_size=None): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict emo_feat_loss_1, emo_feat_loss_2, valence_loss, arousal_loss, expression_loss, au_loss = \ self.emonet_loss.compute_loss(images, predicted_images, batch_size=batch_size, ring_size=ring_size) else: d = metric_dict with torch.no_grad(): emo_feat_loss_1, emo_feat_loss_2, valence_loss, arousal_loss, expression_loss, au_loss = \ self.emonet_loss.compute_loss(images, predicted_images, batch_size=batch_size, ring_size=ring_size) # EmoNet self-consistency loss terms if emo_feat_loss_1 is not None: loss_or_metric(prefix + '_emonet_feat_1_L1', emo_feat_loss_1 * self.deca.config.emonet_weight, self.deca.config.use_emonet_feat_1 and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_feat_2_L1', emo_feat_loss_2 * self.deca.config.emonet_weight, self.deca.config.use_emonet_feat_2 and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_valence_L1', valence_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_valence and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_arousal_L1', arousal_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_arousal and self.deca.config.use_emonet_loss) # loss_or_metric(prefix + 'emonet_expression_KL', expression_loss * self.deca.config.emonet_weight) # KL seems to be causing NaN's loss_or_metric(prefix + '_emonet_expression_L1',expression_loss * self.deca.config.emonet_weight, self.deca.config.use_emonet_expression and self.deca.config.use_emonet_loss) loss_or_metric(prefix + '_emonet_combined', ((emo_feat_loss_1 if emo_feat_loss_1 is not None else 0) + emo_feat_loss_2 + valence_loss + arousal_loss + expression_loss) * self.deca.config.emonet_weight, self.deca.config.use_emonet_combined and self.deca.config.use_emonet_loss) # Log also the VA metric_dict[prefix + "_valence_input"] = self.emonet_loss.input_emotion['valence'].mean().detach() metric_dict[prefix + "_valence_output"] = self.emonet_loss.output_emotion['valence'].mean().detach() metric_dict[prefix + "_arousal_input"] = self.emonet_loss.input_emotion['arousal'].mean().detach() metric_dict[prefix + "_arousal_output"] = self.emonet_loss.output_emotion['arousal'].mean().detach() input_ex = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() input_ex = np.argmax(input_ex, axis=1).mean() output_ex = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() output_ex = np.argmax(output_ex, axis=1).mean() metric_dict[prefix + "_expression_input"] = torch.tensor(input_ex, device=self.device) metric_dict[prefix + "_expression_output"] = torch.tensor(output_ex, device=self.device) # # GT emotion loss terms # if self.deca.config.use_gt_emotion_loss: # d = loss_dict # else: # d = metric_dict # TODO: uncomment this after you handle the case when certain entries are NaN (GT missing, not a bug) # if va is not None: # d[prefix + 'emo_sup_val_L1'] = F.l1_loss(self.emonet_loss.output_emotion['valence'], va[:, 0]) \ # * self.deca.config.gt_emotion_reg # d[prefix + 'emo_sup_ar_L1'] = F.l1_loss(self.emonet_loss.output_emotion['arousal'], va[:, 1]) \ # * self.deca.config.gt_emotion_reg # # metric_dict[prefix + "_valence_gt"] = va[:, 0].mean().detach() # metric_dict[prefix + "_arousal_gt"] = va[:, 1].mean().detach() # # if expr7 is not None: # affectnet_gt = [expr7_to_affect_net(int(expr7[i])).value for i in range(len(expr7))] # affectnet_gt = torch.tensor(np.array(affectnet_gt), device=self.device, dtype=torch.long) # d[prefix + '_emo_sup_expr_CE'] = F.cross_entropy(self.emonet_loss.output_emotion['expression'], affectnet_gt) * self.deca.config.gt_emotion_reg # metric_dict[prefix + "_expr_gt"] = affectnet_gt.mean().detach() def _compute_au_loss(self, images, predicted_images, loss_dict, metric_dict, prefix, au=None, with_grad=True): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict au_feat_loss_1, au_feat_loss_2, _, _, _, au_loss = \ self.au_loss.compute_loss(images, predicted_images) else: d = metric_dict with torch.no_grad(): au_feat_loss_1, au_feat_loss_2, _, _, _, au_loss = \ self.au_loss.compute_loss(images, predicted_images) # EmoNet self-consistency loss terms if au_feat_loss_1 is not None: loss_or_metric(prefix + '_au_feat_1_L1', au_feat_loss_1 * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_feat_1 and self.deca.config.au_loss.use_as_loss) loss_or_metric(prefix + '_au_feat_2_L1', au_feat_loss_2 * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_feat_2 and self.deca.config.au_loss.use_as_loss) loss_or_metric(prefix + '_au_loss', au_loss * self.deca.config.au_loss.au_weight, self.deca.config.au_loss.use_aus and self.deca.config.au_loss.use_as_loss) # loss_or_metric(prefix + '_au_losses_L1', arousal_loss * self.deca.config.au_loss.au_weight, # self.deca.config.au_loss.use_emonet_arousal and self.deca.config.au_loss.use_as_loss) # loss_or_metric(prefix + 'emonet_expression_KL', expression_loss * self.deca.config.au_loss.au_weight) # KL seems to be causing NaN's # # Log also the VA # metric_dict[prefix + "_valence_input"] = self.emonet_loss.input_emotion['valence'].mean().detach() # metric_dict[prefix + "_valence_output"] = self.emonet_loss.output_emotion['valence'].mean().detach() # metric_dict[prefix + "_arousal_input"] = self.emonet_loss.input_emotion['arousal'].mean().detach() # metric_dict[prefix + "_arousal_output"] = self.emonet_loss.output_emotion['arousal'].mean().detach() # input_ex = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() # input_ex = np.argmax(input_ex, axis=1).mean() # output_ex = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'].detach().cpu().numpy() # output_ex = np.argmax(output_ex, axis=1).mean() # metric_dict[prefix + "_expression_input"] = torch.tensor(input_ex, device=self.device) # metric_dict[prefix + "_expression_output"] = torch.tensor(output_ex, device=self.device) # # GT emotion loss terms # if self.deca.config.use_gt_emotion_loss: # d = loss_dict # else: # d = metric_dict def _cut_mouth_vectorized(self, images, landmarks, convert_grayscale=True): # mouth_window_margin = 12 mouth_window_margin = 1 # not temporal mouth_crop_height = 96 mouth_crop_width = 96 mouth_landmark_start_idx = 48 mouth_landmark_stop_idx = 68 B, T = images.shape[:2] landmarks = landmarks.to(torch.float32) with torch.no_grad(): image_size = images.shape[-1] / 2 landmarks = landmarks * image_size + image_size # #1) smooth the landmarks with temporal convolution # landmarks are of shape (T, 68, 2) # reshape to (T, 136) landmarks_t = landmarks.reshape(*landmarks.shape[:2], -1) # make temporal dimension last landmarks_t = landmarks_t.permute(0, 2, 1) # change chape to (N, 136, T) # landmarks_t = landmarks_t.unsqueeze(0) # smooth with temporal convolution temporal_filter = torch.ones(mouth_window_margin, device=images.device) / mouth_window_margin # pad the the landmarks landmarks_t_padded = F.pad(landmarks_t, (mouth_window_margin // 2, mouth_window_margin // 2), mode='replicate') # convolve each channel separately with the temporal filter num_channels = landmarks_t.shape[1] if temporal_filter.numel() > 1: smooth_landmarks_t = F.conv1d(landmarks_t_padded, temporal_filter.unsqueeze(0).unsqueeze(0).expand(num_channels,1,temporal_filter.numel()), groups=num_channels, padding='valid' ) smooth_landmarks_t = smooth_landmarks_t[..., 0:landmarks_t.shape[-1]] else: smooth_landmarks_t = landmarks_t # reshape back to the original shape smooth_landmarks_t = smooth_landmarks_t.permute(0, 2, 1).view(landmarks.shape) smooth_landmarks_t = smooth_landmarks_t + landmarks.mean(dim=2, keepdims=True) - smooth_landmarks_t.mean(dim=2, keepdims=True) # #2) get the mouth landmarks mouth_landmarks_t = smooth_landmarks_t[..., mouth_landmark_start_idx:mouth_landmark_stop_idx, :] # #3) get the mean of the mouth landmarks mouth_landmarks_mean_t = mouth_landmarks_t.mean(dim=-2, keepdims=True) # #4) get the center of the mouth center_x_t = mouth_landmarks_mean_t[..., 0] center_y_t = mouth_landmarks_mean_t[..., 1] # #5) use grid_sample to crop the mouth in every image # create the grid height = mouth_crop_height//2 width = mouth_crop_width//2 torch.arange(0, mouth_crop_width, device=images.device) grid = torch.stack(torch.meshgrid(torch.linspace(-height, height, mouth_crop_height).to(images.device) / (images.shape[-2] /2), torch.linspace(-width, width, mouth_crop_width).to(images.device) / (images.shape[-1] /2) ), dim=-1) grid = grid[..., [1, 0]] grid = grid.unsqueeze(0).unsqueeze(0).repeat(*images.shape[:2], 1, 1, 1) center_x_t -= images.shape[-1] / 2 center_y_t -= images.shape[-2] / 2 center_x_t /= images.shape[-1] / 2 center_y_t /= images.shape[-2] / 2 grid = grid + torch.cat([center_x_t, center_y_t ], dim=-1).unsqueeze(-2).unsqueeze(-2) images = images.view(B*T, *images.shape[2:]) grid = grid.view(B*T, *grid.shape[2:]) if convert_grayscale: images = F_v.rgb_to_grayscale(images) image_crops = F.grid_sample( images, grid, align_corners=True, padding_mode='zeros', mode='bicubic' ) image_crops = image_crops.view(B, T, *image_crops.shape[1:]) if convert_grayscale: image_crops = image_crops#.squeeze(1) # import matplotlib.pyplot as plt # plt.figure() # plt.imshow(image_crops[0, 0].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[0, 10].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[0, 20].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 0].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 10].permute(1,2,0).cpu().numpy()) # plt.show() # plt.figure() # plt.imshow(image_crops[1, 20].permute(1,2,0).cpu().numpy()) # plt.show() return image_crops def _compute_lipread_loss(self, images, predicted_images, landmarks, predicted_landmarks, loss_dict, metric_dict, prefix, with_grad=True): def loss_or_metric(name, loss, is_loss): if not is_loss: metric_dict[name] = loss else: loss_dict[name] = loss # shape of images is: (B, R, C, H, W) # convert to (B * R, 1, H, W, C) images = images.unsqueeze(1) predicted_images = predicted_images.unsqueeze(1) landmarks = landmarks.unsqueeze(1) predicted_landmarks = predicted_landmarks.unsqueeze(1) # cut out the mouth region images_mouth = self._cut_mouth_vectorized(images, landmarks) predicted_images_mouth = self._cut_mouth_vectorized(predicted_images, predicted_landmarks) # make sure that the lip reading net interprests things with depth=1, # if self.deca.config.use_emonet_loss: if with_grad: d = loss_dict loss = self.lipread_loss.compute_loss(images_mouth, predicted_images_mouth) else: d = metric_dict with torch.no_grad(): loss = self.lipread_loss.compute_loss(images_mouth, predicted_images_mouth) d[prefix + '_lipread'] = loss * self.deca.config.lipread_loss.weight def _metric_or_loss(self, loss_dict, metric_dict, is_loss): if is_loss: d = loss_dict else: d = metric_dict return d def _compute_id_loss(self, codedict, batch, training, testing, losses, batch_size, ring_size): # if self.deca.config.idw > 1e-3: if self.deca.id_loss is not None: images = codedict["images"] ops = codedict["ops"] mask_face_eye = codedict["mask_face_eye"] shading_images = self.deca.render.add_SHlight(ops['normal_images'], codedict["lightcode"].detach()) albedo_images = F.grid_sample(codedict["albedo"].detach(), ops['grid'], align_corners=False) # TODO: get to the bottom of this weird overlay thing - why is it there? # answer: This renders the face and takes background from the image overlay = albedo_images * shading_images * mask_face_eye + images * (1 - mask_face_eye) if self.global_step >= self.deca.id_loss_start_step: if 'id_metric' in self.deca.config.keys() and 'barlow_twins' in self.deca.config.id_metric: assert ring_size == 1 or ring_size == 2 effective_bs = images.shape[0] # losses['identity'] = self.deca.id_loss(overlay, images, batch_size=batch_size, # ring_size=ring_size) * self.deca.config.idw if "ref_images_identity_idxs" in codedict.keys(): # in case there was shuffling, this ensures that the proper images are used for identity loss images_ = images[codedict["ref_images_identity_idxs"]] else: images_ = images losses['identity'] = self.deca.id_loss(overlay, images_, batch_size=effective_bs, ring_size=1) * self.deca.config.idw if 'id_contrastive' in self.deca.config.keys() and bool(self.deca.config.id_contrastive): if ring_size == 2: assert effective_bs % 2 == 0 assert self.deca.id_loss.trainable has_been_shuffled = 'new_order' in codedict.keys() idxs_a = torch.arange(0, images.shape[0], 2) # indices of first images within the ring idxs_b = torch.arange(1, images.shape[0], 2) # indices of second images within the ring # WARNING - this assumes the ring is identity-based if self.deca.config.id_contrastive in [True, "real", "both"]: # we are taking this from the original batch dict because we do not care about the # shuffled, duplicated samples (they don't have to be doubled) images_0 = batch["image"][:, 0, ...] images_1 = batch["image"][:, 1, ...] losses['identity_contrastive_real'] = self.deca.id_loss( images_0, # first images within the ring images_1, # second images within the ring batch_size=images_0.shape[0], ring_size=1) * self.deca.config.idw * 2 if self.deca.config.id_contrastive in [True, "synth", "both"]: if self.deca.config.shape_constrain_type in ['exchange', 'same']: # we can take all when identity has been exchange within rings overlay_0 = overlay[idxs_a] overlay_1 = overlay[idxs_b] else: #if the batch was double otherwise (global shuffling) we only take the first half # if batch_size * ring_size < effective_bs: overlay_0 = overlay[0:batch_size * ring_size:2] overlay_1 = overlay[1:batch_size * ring_size:2] losses['identity_contrastive_synthetic'] = self.deca.id_loss( overlay_0, # first images within the ring overlay_1, # second images within the ring batch_size=overlay_0.shape[0], ring_size=1) * self.deca.config.idw if has_been_shuffled: new_order = codedict['new_order'] # TODO: compare the idxs to these: # codedict["ref_images_identity_idxs"] if self.deca.config.shape_constrain_type == 'shuffle_expression': idxs_a_synth = np.arange(new_order.shape[0]) # first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch elif self.deca.config.shape_constrain_type == 'shuffle_shape': idxs_a_synth = new_order # shuffled first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch else: raise NotImplementedError("Unexpected shape consistency value ") # if this doesn't go through, something went wrong with the shuffling indexations assert codedict["shapecode"][idxs_a_synth].allclose(codedict["shapecode"][idxs_b_synth]) losses['identity_contrastive_synthetic_shuffled'] = self.deca.id_loss( overlay[idxs_a_synth], # synthetic images of identities with reconstructed expressions overlay[idxs_b_synth], # synthetic images of identities with shuffled expressions batch_size=idxs_a_synth.size, ring_size=1) * self.deca.config.idw losses['identity_contrastive_synthetic2real_shuffled'] = self.deca.id_loss( images[idxs_a_synth], # synthetic images of identities with reconstructed expressions overlay[idxs_b_synth], # synthetic images of identities with shuffled expressions batch_size=idxs_a_synth.size, ring_size=1) * self.deca.config.idw elif ring_size > 2: raise NotImplementedError("Contrastive loss does not support ring sizes > 2.") return losses def _compute_emonet_loss_wrapper(self, codedict, batch, training, testing, losses, metrics, prefix, image_key, with_grad, batch_size, ring_size): if self.emonet_loss is not None: if 'va' in codedict: va = codedict['va'] va = va.view(-1, va.shape[-1]) else: va = None if 'expr7' in codedict: expr7 = codedict['expr7'] expr7 = expr7.view(-1, expr7.shape[-1]) else: expr7 = None # with torch.no_grad(): # TODO: if expression shuffled, this needs to be changed, the input images no longer correspond images = codedict["images"] predicted_images = codedict[image_key] effective_bs = images.shape[0] if "ref_images_expression_idxs" in codedict.keys(): # in case there was shuffling, this ensures that the proper images are used for emotion loss images_ = images[codedict["ref_images_expression_idxs"]] else: images_ = images effective_bs = images.shape[0] self._compute_emotion_loss(images_, predicted_images, losses, metrics, f"{prefix}", va, expr7, with_grad=with_grad, batch_size=effective_bs, ring_size=1) codedict[f"{prefix}_valence_input"] = self.emonet_loss.input_emotion['valence'] codedict[f"{prefix}_arousal_input"] = self.emonet_loss.input_emotion['arousal'] codedict[f"{prefix}_expression_input"] = self.emonet_loss.input_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] codedict[f"{prefix}_valence_output"] = self.emonet_loss.output_emotion['valence'] codedict[f"{prefix}_arousal_output"] = self.emonet_loss.output_emotion['arousal'] codedict[f"{prefix}_expression_output"] = self.emonet_loss.output_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] if 'emo_contrastive' in self.deca.config.keys() and self.deca.config.emo_contrastive: assert ring_size == 2 or ring_size == 1 assert self.emonet_loss.trainable or ( hasattr(self.emonet_loss, 'clone_is_trainable') and self.emonet_lossclone_is_trainable) has_been_shuffled = 'new_order' in codedict.keys() # if self.deca.config.shape_constrain_type == 'shuffle_expression' and has_been_shuffled: # new_order = codedict['new_order'] # if self.deca.config.emo_contrastive in [True, "real", "both"]: if ring_size == 2: assert effective_bs % 2 == 0 if not isinstance(self.deca, ExpDECA): raise NotImplementedError("Cross-ring emotion contrast means the ring has to be " "expression based, not identity based. This is not guaranteed " "for vanilla EMOCA (or its datasets).") # we are taking this from the original batch dict because we do not care about the # shuffled, duplicated samples (they don't have to be doubled) images_0 = batch["image"][:, 0, ...] images_1 = batch["image"][:, 1, ...] self._compute_emotion_loss(images_0, # real images of first expressions in the ring images_1, # real images of second expressions in the ring losses, metrics, f"{prefix}_contrastive_real", va, expr7, with_grad=self.deca.config.use_emonet_loss, batch_size=images_0.shape[0], ring_size=1) else: print("[WARNING] Cannot compute real contrastive emotion loss because there is no ring!") if self.deca.config.emo_contrastive in [True, "synth", "both"]: if ring_size == 2: assert effective_bs % 2 == 0 idxs_a = torch.arange(0, images.shape[0], 2) # indices of first expressions within a ring idxs_b = torch.arange(1, images.shape[0], 2) # indices of second expressions within a ring if 'expression_constrain_type' in self.deca.config.keys() and \ self.deca.config.expression_constrain_type in ['exchange', 'same']: # we can take all when identity has been exchange within rings predicted_images_0 = predicted_images[idxs_a] predicted_images_1 = predicted_images[idxs_b] raise RuntimeError("This should work but it was never tested or intended. Make sure this works.") else: # if the batch was double otherwise (global shuffling) we only take the first half # if batch_size * ring_size < effective_bs: predicted_images_0 = predicted_images[0:batch_size * ring_size:2] predicted_images_1 = predicted_images[1:batch_size * ring_size:2] if not isinstance(self.deca, ExpDECA): raise NotImplementedError("Cross-ring emotion contrast means the ring has to be " "expression based, not identity based. This is not guaranteed " "for vanilla EMOCA.") self._compute_emotion_loss(predicted_images_0, # rec images of first expressions in the ring predicted_images_1, # rec images of second expressions in the ring losses, metrics, f"{prefix}_contrastive_synth", va, expr7, with_grad=self.deca.config.use_emonet_loss, batch_size=predicted_images_1.shape[0], ring_size=1) else: print("[WARNING] Cannot compute synthetic contrastive emotion loss because there is no ring!") if has_been_shuffled: new_order = codedict['new_order'] if self.deca.config.shape_constrain_type == 'shuffle_expression': # this gets tricky, in this case the images are not duplicates -> we need all, but the second # half's order is shuffled, so we need to be careful here idxs_a_synth = new_order # shuffled first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch elif self.deca.config.shape_constrain_type == 'shuffle_shape': idxs_a_synth = np.arange(new_order.shape[0]) # first half of the batch idxs_b_synth = np.arange(new_order.shape[0], 2 * new_order.shape[0]) # second half of the batch # if this doesn't go through, something went wrong with the shuffling indexations assert codedict["expcode"][idxs_a_synth].allclose(codedict["expcode"][idxs_b_synth]) # the expressions at corresponding index positions of idxs_a_synth and idxs_b_synth should match now self._compute_emotion_loss(predicted_images[idxs_a_synth], # synthetic images of reconstructed expressions and corresponding identities predicted_images[idxs_b_synth], # synthetic images of reconstructed expressions and shuffled identities losses, metrics, f"{prefix}_contrastive_synth_shuffled", va, expr7, with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=idxs_a_synth.size, ring_size=1) self._compute_emotion_loss(images[idxs_a_synth], # synthetic images of reconstructed expressions and corresponding identities predicted_images[idxs_b_synth], # synthetic images of reconstructed expressions and shuffled identities losses, metrics, f"{prefix}_contrastive_synth2real_shuffled", va, expr7, with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=idxs_a_synth.size, ring_size=1) if va is not None: codedict[f"{prefix}_valence_gt"] = va[:, 0] codedict[f"{prefix}_arousal_gt"] = va[:, 1] if expr7 is not None: codedict[f"{prefix}_expression_gt"] = expr7 if self.deca._has_neural_rendering(): assert 'emo_contrastive' not in self.deca.config.keys() or self.deca.config.emo_contrastive is False # TODO possible to make this more GPU efficient by not recomputing emotion for input image self._compute_emotion_loss(images, predicted_translated_image, losses, metrics, f"{prefix}_translated", va, expr7, with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=1) # codedict[f"{prefix}_valence_input"] = self.emonet_loss.input_emotion['valence'] # codedict[f"{prefix}_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # codedict[f"{prefix}_expression_input"] = self.emonet_loss.input_emotion['expression'] codedict[f"{prefix}_translated_valence_output"] = self.emonet_loss.output_emotion['valence'] codedict[f"{prefix}_translated_arousal_output"] = self.emonet_loss.output_emotion['arousal'] codedict[f"{prefix}_translated_expression_output"] = self.emonet_loss.output_emotion[ 'expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] return losses, metrics, codedict def _compute_loss(self, codedict, batch, training=True, testing=False): #### ----------------------- Losses losses = {} metrics = {} predicted_landmarks = codedict["predicted_landmarks"] predicted_landmarks_mediapipe = codedict.get("predicted_landmarks_mediapipe", None) if "lmk" in codedict.keys(): lmk = codedict["lmk"] else: lmk = None if "lmk_mp" in codedict.keys(): lmk_mp = codedict["lmk_mp"] else: lmk_mp = None if "masks" in codedict.keys(): masks = codedict["masks"] else: masks = None batch_size = codedict["predicted_images"].shape[0] use_geom_losses = 'use_geometric_losses_expression_exchange' in self.deca.config.keys() and \ self.deca.config.use_geometric_losses_expression_exchange if training and ('expression_constrain_type' in self.deca.config.keys() \ and ('expression_constrain_type' in self.deca.config.keys() and self.deca.config.expression_constrain_type == 'exchange') or ( 'shape_constrain_type' in self.deca.config.keys() and self.deca.config.shape_constrain_type in ['shuffle_expression', 'shuffle_shape'])) \ and (self.deca.mode == DecaMode.COARSE or self.deca.config.train_coarse) \ and (not use_geom_losses): if batch_size % 2 != 0: raise RuntimeError("The batch size should be even because it should have " f"got doubled in expression ring exchange. Instead it was odd: {batch_size}") # THIS IS DONE BECAUSE LANDMARK AND PHOTOMETRIC LOSSES MAKE NO SENSE FOR EXPRESSION EXCHANGE geom_losses_idxs = batch_size // 2 else: geom_losses_idxs = batch_size predicted_images = codedict["predicted_images"] images = codedict["images"] lightcode = codedict["lightcode"] albedo = codedict["albedo"] mask_face_eye = codedict["mask_face_eye"] shapecode = codedict["shapecode"] expcode = codedict["expcode"] texcode = codedict["texcode"] ops = codedict["ops"] if self.mode == DecaMode.DETAIL: uv_texture = codedict["uv_texture"] uv_texture_gt = codedict["uv_texture_gt"] # this determines the configured batch size that is currently used (training, validation or testing) # the reason why this is important is because of potential multi-gpu training and loss functions (such as Barlow Twins) # that might need the full size of the batch (not just the chunk of the current GPU). if training: bs = self.learning_params.batch_size_train rs = self.learning_params.train_K else: if not testing: bs = self.learning_params.batch_size_val rs = self.learning_params.val_K else: bs = self.learning_params.batch_size_test rs = self.learning_params.test_K ## COARSE loss only if self.mode == DecaMode.COARSE or (self.mode == DecaMode.DETAIL and self.deca.config.train_coarse): # landmark losses (only useful if coarse model is being trained # if training or lmk is not None: if lmk is not None: # if self.deca.config.use_landmarks: # d = losses # else: # d = metrics d = self._metric_or_loss(losses, metrics, self.deca.config.use_landmarks) if self.deca.config.useWlmk: d['landmark'] = \ lossfunc.weighted_landmark_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight else: d['landmark'] = \ lossfunc.landmark_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight d = self._metric_or_loss(losses, metrics, 'use_eye_distance' not in self.deca.config.keys() or self.deca.config.use_eye_distance) # losses['eye_distance'] = lossfunc.eyed_loss(predicted_landmarks, lmk) * self.deca.config.lmk_weight * 2 d['eye_distance'] = lossfunc.eyed_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.eyed d = self._metric_or_loss(losses, metrics, 'use_lip_distance' not in self.deca.config.keys() or self.deca.config.use_lip_distance) d['lip_distance'] = lossfunc.lipd_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lipd d = self._metric_or_loss(losses, metrics, 'use_mouth_corner_distance' in self.deca.config.keys() and self.deca.config.use_mouth_corner_distance) d['mouth_corner_distance'] = lossfunc.mouth_corner_loss(predicted_landmarks[:geom_losses_idxs, ...], lmk[:geom_losses_idxs, ...]) * self.deca.config.lipd if predicted_landmarks_mediapipe is not None and lmk_mp is not None: use_mediapipe_landmarks = self.deca.config.get('use_mediapipe_landmarks', False) d = self._metric_or_loss(losses, metrics, use_mediapipe_landmarks) d['landmark_mediapipe'] =lossfunc_mp.landmark_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lmk_weight_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_eye_distance_mediapipe', False) ) d['eye_distance_mediapipe'] = lossfunc_mp.eyed_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.eyed_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_lip_distance_mediapipe', False) ) d['lip_distance_mediapipe'] = lossfunc_mp.lipd_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lipd_mp d = self._metric_or_loss(losses, metrics, self.deca.config.get('use_mouth_corner_distance_mediapipe', False)) d['mouth_corner_distance_mediapipe'] = lossfunc_mp.mouth_corner_loss(predicted_landmarks_mediapipe[:geom_losses_idxs, ...], lmk_mp[:geom_losses_idxs, ...]) * self.deca.config.lipd_mp #TODO: fix this on the next iteration lipd_loss # d['lip_distance'] = lossfunc.lipd_loss(predicted_landmarks, lmk) * self.deca.config.lipd # photometric loss # if training or masks is not None: if masks is not None: # if self.deca.config.use_photometric: # d = losses # else: # d = metrics # d['photometric_texture'] = (masks * (predicted_images - images).abs()).mean() * self.deca.config.photow photometric = masks[:geom_losses_idxs, ...] * ((predicted_images[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()) if 'photometric_normalization' not in self.deca.config.keys() or self.deca.config.photometric_normalization == 'mean': photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'rel_mask_value': photometric = photometric * masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'neg_rel_mask_value': mu = 1. - masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric * mu photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'inv_rel_mask_value': mu = 1./ masks[:geom_losses_idxs, ...].mean(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric * mu photometric = photometric.mean() elif self.deca.config.photometric_normalization == 'abs_mask_value': photometric = photometric * masks[:geom_losses_idxs, ...].sum(dim=tuple(range(1,masks.ndim)), keepdim=True) photometric = photometric.mean() else: raise ValueError(f"Invalid photometric loss normalization: '{self.deca.config.photometric_normalization}'") self._metric_or_loss(losses, metrics, self.deca.config.use_photometric)['photometric_texture'] = \ photometric * self.deca.config.photow if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_images[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg'] = vggl * self.deca.config.vggw if self.deca._has_neural_rendering(): predicted_translated_image = codedict["predicted_translated_image"] photometric_translated = (masks[:geom_losses_idxs, ...] * ( predicted_translated_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_photometric: losses['photometric_translated_texture'] = photometric_translated else: metrics['photometric_translated_texture'] = photometric_translated if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_translated_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg_translated'] = vggl * self.deca.config.vggw else: raise ValueError("Is this line ever reached?") losses = self._compute_id_loss(codedict, batch, training, testing, losses, batch_size=bs, ring_size=rs) losses['shape_reg'] = (torch.sum(shapecode ** 2) / 2) * self.deca.config.shape_reg losses['expression_reg'] = (torch.sum(expcode ** 2) / 2) * self.deca.config.exp_reg losses['tex_reg'] = (torch.sum(texcode ** 2) / 2) * self.deca.config.tex_reg losses['light_reg'] = ((torch.mean(lightcode, dim=2)[:, :, None] - lightcode) ** 2).mean() * self.deca.config.light_reg if 'original_code' in codedict.keys(): # original jaw pose regularization if self.deca.config.get('exp_deca_jaw_pose', False) and \ 'deca_jaw_reg' in self.deca.config.keys() and self.deca.config.deca_jaw_reg > 0: jaw_pose_orig = codedict['original_code']['pose'][:, 3:] jaw_pose = codedict['posecode'][..., 3:] deca_jaw_pose_reg = (torch.sum((jaw_pose - jaw_pose_orig) ** 2) / 2) * self.deca.config.deca_jaw_reg losses['deca_jaw_pose_reg'] = deca_jaw_pose_reg if self.deca.config.get('exp_deca_global_pose', False) and \ 'deca_global_reg' in self.deca.config.keys() and self.deca.config.deca_global_reg > 0: global_pose_orig = codedict['original_code']['pose'][:, :3] global_pose = codedict['posecode'][..., :3] global_pose_reg = (torch.sum((global_pose - global_pose_orig) ** 2) / 2) * self.deca.config.deca_global_reg losses['deca_global_pose_reg'] = global_pose_reg # original expression regularization if 'deca_expression_reg' in self.deca.config.keys() and self.deca.config.deca_expression_reg > 0: expression_orig = codedict['original_code']['exp'] expression = codedict['expcode'] deca_expression_reg = (torch.sum((expression - expression_orig) ** 2) / 2) * self.deca.config.deca_expression_reg losses['deca_expression_reg'] = deca_expression_reg losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="coarse", image_key="predicted_images", with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) if self.deca._has_neural_rendering(): losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="coarse_translated", image_key="predicted_translated_image", with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs ) if self.au_loss is not None: # with torch.no_grad(): self._compute_au_loss(images, predicted_images, losses, metrics, "coarse", au=None, with_grad=self.deca.config.au_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_au_loss(images, predicted_translated_image, losses, metrics, "coarse", au=None, with_grad=self.deca.config.au_loss.use_as_loss and self.deca._has_neural_rendering()) if self.lipread_loss is not None: # with torch.no_grad(): self._compute_lipread_loss(images, predicted_images, lmk, predicted_landmarks, losses, metrics, "coarse", with_grad=self.deca.config.lipread_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_lipread_loss(images, predicted_translated_image, lmk, predicted_landmarks, losses, metrics, "coarse", with_grad=self.deca.config.lipread_loss.use_as_loss and self.deca._has_neural_rendering()) ## DETAIL loss only if self.mode == DecaMode.DETAIL: predicted_detailed_image = codedict["predicted_detailed_image"] uv_z = codedict["uv_z"] # UV displacement map uv_shading = codedict["uv_shading"] uv_vis_mask = codedict["uv_vis_mask"] # uv_mask of what is visible photometric_detailed = (masks[:geom_losses_idxs, ...] * ( predicted_detailed_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_detailed_photo: losses['photometric_detailed_texture'] = photometric_detailed else: metrics['photometric_detailed_texture'] = photometric_detailed if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_detailed_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)['vgg_detailed'] = vggl * self.deca.config.vggw if self.deca._has_neural_rendering(): predicted_detailed_translated_image = codedict["predicted_detailed_translated_image"] photometric_detailed_translated = (masks[:geom_losses_idxs, ...] * ( predicted_detailed_translated_image[:geom_losses_idxs, ...] - images[:geom_losses_idxs, ...]).abs()).mean() * self.deca.config.photow if self.deca.config.use_detailed_photo: losses['photometric_translated_detailed_texture'] = photometric_detailed_translated else: metrics['photometric_translated_detailed_texture'] = photometric_detailed_translated if self.deca.vgg_loss is not None: vggl, _ = self.deca.vgg_loss( masks[:geom_losses_idxs, ...] * images[:geom_losses_idxs, ...], # masked input image masks[:geom_losses_idxs, ...] * predicted_detailed_translated_image[:geom_losses_idxs, ...], # masked output image ) self._metric_or_loss(losses, metrics, self.deca.config.use_vgg)[ 'vgg_detailed_translated'] = vggl * self.deca.config.vggw losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="detail", image_key = "predicted_detailed_image", with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) if self.deca._has_neural_rendering(): losses, metrics, codedict = self._compute_emonet_loss_wrapper(codedict, batch, training, testing, losses, metrics, prefix="detail_translated", image_key="predicted_detailed_translated_image", with_grad=self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), batch_size=bs, ring_size=rs) # if self.emonet_loss is not None: # self._compute_emotion_loss(images, predicted_detailed_image, losses, metrics, "detail", # with_grad=self.deca.config.use_emonet_loss and not self.deca._has_neural_rendering(), # batch_size=bs, ring_size=rs) # codedict["detail_valence_input"] = self.emonet_loss.input_emotion['valence'] # codedict["detail_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # codedict["detail_expression_input"] = self.emonet_loss.input_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] # codedict["detail_valence_output"] = self.emonet_loss.output_emotion['valence'] # codedict["detail_arousal_output"] = self.emonet_loss.output_emotion['arousal'] # codedict["detail_expression_output"] = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] # # if va is not None: # codedict["detail_valence_gt"] = va[:,0] # codedict["detail_arousal_gt"] = va[:,1] # if expr7 is not None: # codedict["detail_expression_gt"] = expr7 # if self.deca._has_neural_rendering(): # #TODO possible to make this more GPU efficient by not recomputing emotion for input image # self._compute_emotion_loss(images, predicted_detailed_translated_image, # losses, metrics, "detail_translated", # va, expr7, # with_grad= self.deca.config.use_emonet_loss and self.deca._has_neural_rendering(), # batch_size=bs, ring_size=rs) # # # codedict["coarse_valence_input"] = self.emonet_loss.input_emotion['valence'] # # codedict["coarse_arousal_input"] = self.emonet_loss.input_emotion['arousal'] # # codedict["coarse_expression_input"] = self.emonet_loss.input_emotion['expression'] # codedict["detail_translated_valence_output"] = self.emonet_loss.output_emotion['valence'] # codedict["detail_translated_arousal_output"] = self.emonet_loss.output_emotion['arousal'] # codedict["detail_translated_expression_output"] = self.emonet_loss.output_emotion['expression' if 'expression' in self.emonet_loss.input_emotion.keys() else 'expr_classification'] if self.au_loss is not None: self._compute_au_loss(images, predicted_images, losses, metrics, "detail", au=None, with_grad=self.deca.config.au_loss.use_as_loss and not self.deca._has_neural_rendering()) if self.deca._has_neural_rendering(): self._compute_au_loss(images, predicted_detailed_translated_image, losses, metrics, "detail", au=None, with_grad=self.deca.config.au_loss.use_as_loss and self.deca._has_neural_rendering()) for pi in range(3): # self.deca.face_attr_mask.shape[0]): if self.deca.config.sfsw[pi] != 0: # if pi==0: new_size = 256 # else: # new_size = 128 # if self.deca.config.uv_size != 256: # new_size = 128 uv_texture_patch = F.interpolate( uv_texture[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') uv_texture_gt_patch = F.interpolate( uv_texture_gt[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') uv_vis_mask_patch = F.interpolate( uv_vis_mask[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') detail_l1 = (uv_texture_patch * uv_vis_mask_patch - uv_texture_gt_patch * uv_vis_mask_patch).abs().mean() * \ self.deca.config.sfsw[pi] if self.deca.config.use_detail_l1 and not self.deca._has_neural_rendering(): losses['detail_l1_{}'.format(pi)] = detail_l1 else: metrics['detail_l1_{}'.format(pi)] = detail_l1 if self.deca.config.use_detail_mrf and not self.deca._has_neural_rendering(): mrf = self.deca.perceptual_loss(uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr losses['detail_mrf_{}'.format(pi)] = mrf else: with torch.no_grad(): mrf = self.deca.perceptual_loss(uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr metrics['detail_mrf_{}'.format(pi)] = mrf if self.deca._has_neural_rendering(): # raise NotImplementedError("Gotta implement the texture extraction first.") translated_uv_texture = codedict["translated_uv_texture"] translated_uv_texture_patch = F.interpolate( translated_uv_texture[:geom_losses_idxs, :, self.deca.face_attr_mask[pi][2]:self.deca.face_attr_mask[pi][3], self.deca.face_attr_mask[pi][0]:self.deca.face_attr_mask[pi][1]], [new_size, new_size], mode='bilinear') translated_detail_l1 = (translated_uv_texture_patch * uv_vis_mask_patch - uv_texture_gt_patch * uv_vis_mask_patch).abs().mean() * \ self.deca.config.sfsw[pi] if self.deca.config.use_detail_l1: losses['detail_translated_l1_{}'.format(pi)] = translated_detail_l1 else: metrics['detail_translated_l1_{}'.format(pi)] = translated_detail_l1 if self.deca.config.use_detail_mrf: translated_mrf = self.deca.perceptual_loss(translated_uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr losses['detail_translated_mrf_{}'.format(pi)] = translated_mrf else: with torch.no_grad(): mrf = self.deca.perceptual_loss(translated_uv_texture_patch * uv_vis_mask_patch, uv_texture_gt_patch * uv_vis_mask_patch) * \ self.deca.config.sfsw[pi] * self.deca.config.mrfwr metrics['detail_translated_mrf_{}'.format(pi)] = mrf # Old piece of debug code. Good to delete. # if pi == 2: # uv_texture_gt_patch_ = uv_texture_gt_patch # uv_texture_patch_ = uv_texture_patch # uv_vis_mask_patch_ = uv_vis_mask_patch losses['z_reg'] = torch.mean(uv_z.abs()) * self.deca.config.zregw losses['z_diff'] = lossfunc.shading_smooth_loss(uv_shading) * self.deca.config.zdiffw nonvis_mask = (1 - util.binary_erosion(uv_vis_mask)) losses['z_sym'] = (nonvis_mask * (uv_z - torch.flip(uv_z, [-1]).detach()).abs()).sum() * self.deca.config.zsymw if self.emotion_mlp is not None:# and not testing: mlp_losses, mlp_metrics = self.emotion_mlp.compute_loss( codedict, batch, training=training, pred_prefix="emo_mlp_") for key in mlp_losses.keys(): if key in losses.keys(): raise RuntimeError(f"Duplicate loss label {key}") losses[key] = self.deca.config.mlp_emotion_predictor_weight * mlp_losses[key] for key in mlp_metrics.keys(): if key in metrics.keys(): raise RuntimeError(f"Duplicate metric label {key}") # let's report the metrics (which are a superset of losses when it comes to EmoMLP) without the weight, # it's hard to plot the metrics otherwise metrics[key] = mlp_metrics[key] # metrics[key] = self.deca.config.mlp_emotion_predictor_weight * mlp_metrics[key] # else: # uv_texture_gt_patch_ = None # uv_texture_patch_ = None # uv_vis_mask_patch_ = None return losses, metrics def compute_loss(self, values, batch, training=True, testing=False) -> dict: """ The function used to compute the loss on a training batch. : training should be set to true when calling from training_step only """ losses, metrics = self._compute_loss(values, batch, training=training, testing=testing) all_loss = 0. losses_key = losses.keys() for key in losses_key: all_loss = all_loss + losses[key] # losses['all_loss'] = all_loss losses = {'loss_' + key: value for key, value in losses.items()} # add prefix loss for better logging losses['loss'] = all_loss # add metrics that do not effect the loss function (if any) for key in metrics.keys(): losses['metric_' + key] = metrics[key] return losses def _val_to_be_logged(self, d): if not hasattr(self, 'val_dict_list'): self.val_dict_list = [] self.val_dict_list += [d] def _train_to_be_logged(self, d): if not hasattr(self, 'train_dict_list'): self.train_dict_list = [] self.train_dict_list += [d] def validation_step(self, batch, batch_idx, dataloader_idx=None): """ Training step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ with torch.no_grad(): training = False values = self.encode(batch, training=training) values = self.decode(values, training=training) losses_and_metrics = self.compute_loss(values, batch, training=training) #### self.log_dict(losses_and_metrics, on_step=False, on_epoch=True) # prefix = str(self.mode.name).lower() prefix = self._get_logging_prefix() # if dataloader_idx is not None: # dataloader_str = str(dataloader_idx) + "_" # else: dataloader_str = '' stage_str = dataloader_str + 'val_' # losses_and_metrics_to_log = {prefix + dataloader_str +'_val_' + key: value.detach().cpu() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # log val_loss also without any prefix for a model checkpoint to track it losses_and_metrics_to_log[stage_str + 'loss'] = losses_and_metrics_to_log[prefix + '_' + stage_str + 'loss'] losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[stage_str + 'step'] = self.global_step losses_and_metrics_to_log[stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + stage_str + 'mem_usage'] = self.process.memory_info().rss losses_and_metrics_to_log[stage_str + 'mem_usage'] = self.process.memory_info().rss # self._val_to_be_logged(losses_and_metrics_to_log) if self.logger is not None: self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch # recommended if self.trainer.is_global_zero: if self.deca.config.val_vis_frequency > 0: if batch_idx % self.deca.config.val_vis_frequency == 0: uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, batch_idx, stage_str[:-1], prefix) vis_dict = self._create_visualizations_to_log(stage_str[:-1], visualizations, values, batch_idx, indices=0, dataloader_idx=dataloader_idx) # image = Image(grid_image, caption="full visualization") # vis_dict[prefix + '_val_' + "visualization"] = image if isinstance(self.logger, WandbLogger): self.logger.log_metrics(vis_dict) return None def _get_logging_prefix(self): prefix = self.stage_name + str(self.mode.name).lower() return prefix def test_step(self, batch, batch_idx, dataloader_idx=None): """ Testing step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations without gradient :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ prefix = self._get_logging_prefix() losses_and_metrics_to_log = {} # if dataloader_idx is not None: # dataloader_str = str(dataloader_idx) + "_" # else: dataloader_str = '' stage_str = dataloader_str + 'test_' with torch.no_grad(): training = False testing = True values = self.encode(batch, training=training) values = self.decode(values, training=training) if 'mask' in batch.keys(): losses_and_metrics = self.compute_loss(values, batch, training=False, testing=testing) # losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_' + stage_str + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} else: losses_and_metric = None # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch # losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = torch.tensor(self.global_step, device=self.device) # losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = torch.tensor(batch_idx, device=self.device) # losses_and_metrics_to_log[stage_str + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) # losses_and_metrics_to_log[stage_str + 'step'] = torch.tensor(self.global_step, device=self.device) # losses_and_metrics_to_log[stage_str + 'batch_idx'] = torch.tensor(batch_idx, device=self.device) losses_and_metrics_to_log[prefix + '_' + stage_str + 'epoch'] = self.current_epoch losses_and_metrics_to_log[prefix + '_' + stage_str + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_' + stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + stage_str + 'mem_usage'] = self.process.memory_info().rss losses_and_metrics_to_log[stage_str + 'epoch'] = self.current_epoch losses_and_metrics_to_log[stage_str + 'step'] = self.global_step losses_and_metrics_to_log[stage_str + 'batch_idx'] = batch_idx losses_and_metrics_to_log[stage_str + 'mem_usage'] = self.process.memory_info().rss if self.logger is not None: # self.logger.log_metrics(losses_and_metrics_to_log) self.log_dict(losses_and_metrics_to_log, sync_dist=True, on_step=False, on_epoch=True) # if self.global_step % 200 == 0: uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] if self.deca.config.test_vis_frequency > 0: # Log visualizations every once in a while if batch_idx % self.deca.config.test_vis_frequency == 0: # if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, self.global_step, stage_str[:-1], prefix) visdict = self._create_visualizations_to_log(stage_str[:-1], visualizations, values, batch_idx, indices=0, dataloader_idx=dataloader_idx) self.logger.log_metrics(visdict) return None @property def process(self): if not hasattr(self,"process_"): self.process_ = psutil.Process(os.getpid()) return self.process_ def training_step(self, batch, batch_idx, *args, **kwargs): #, debug=True): """ Training step override of pytorch lightning module. It makes the encoding, decoding passes, computes the loss and logs the losses/visualizations. :param batch: Batch of images to encode. batch['image'] [batch_size, ring_size, 3, image_size, image_size]. For a training forward pass, additional corresponding data are necessery such as 'landmarks' and 'masks'. :batch_idx batch index """ values = self.encode(batch, training=True) values = self.decode(values, training=True) losses_and_metrics = self.compute_loss(values, batch, training=True) uv_detail_normals = None if 'uv_detail_normals' in values.keys(): uv_detail_normals = values['uv_detail_normals'] # prefix = str(self.mode.name).lower() prefix = self._get_logging_prefix() # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach() for key, value in losses_and_metrics.items()} losses_and_metrics_to_log = {prefix + '_train_' + key: value.detach().cpu().item() for key, value in losses_and_metrics.items()} # losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log[prefix + '_train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log[prefix + '_train_' + 'step'] = self.global_step losses_and_metrics_to_log[prefix + '_train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log[prefix + '_' + "train_" + 'mem_usage'] = self.process.memory_info().rss # losses_and_metrics_to_log['train_' + 'epoch'] = torch.tensor(self.current_epoch, device=self.device) losses_and_metrics_to_log['train_' + 'epoch'] = self.current_epoch losses_and_metrics_to_log['train_' + 'step'] = self.global_step losses_and_metrics_to_log['train_' + 'batch_idx'] = batch_idx losses_and_metrics_to_log["train_" + 'mem_usage'] = self.process.memory_info().rss # log loss also without any prefix for a model checkpoint to track it losses_and_metrics_to_log['loss'] = losses_and_metrics_to_log[prefix + '_train_loss'] if self.logger is not None: self.log_dict(losses_and_metrics_to_log, on_step=False, on_epoch=True, sync_dist=True) # log per epoch, # recommended if self.deca.config.train_vis_frequency > 0: if self.global_step % self.deca.config.train_vis_frequency == 0: if self.trainer.is_global_zero: visualizations, grid_image = self._visualization_checkpoint(values['verts'], values['trans_verts'], values['ops'], uv_detail_normals, values, batch_idx, "train", prefix) visdict = self._create_visualizations_to_log('train', visualizations, values, batch_idx, indices=0) if isinstance(self.logger, WandbLogger): self.logger.log_metrics(visdict)#, step=self.global_step) # self.log_dict(visdict, sync_dist=True) # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=False) # log per step # self.log_dict(losses_and_metrics_to_log, on_step=True, on_epoch=True) # log per both # return losses_and_metrics return losses_and_metrics['loss'] ### STEP ENDS ARE PROBABLY NOT NECESSARY BUT KEEP AN EYE ON THEM IF MULI-GPU TRAINING DOESN'T WORK # def training_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def validation_step_end(self, batch_parts): # return self._step_end(batch_parts) # # def _step_end(self, batch_parts): # # gpu_0_prediction = batch_parts.pred[0]['pred'] # # gpu_1_prediction = batch_parts.pred[1]['pred'] # N = len(batch_parts) # loss_dict = {} # for key in batch_parts[0]: # for i in range(N): # if key not in loss_dict.keys(): # loss_dict[key] = batch_parts[i] # else: # loss_dict[key] = batch_parts[i] # loss_dict[key] = loss_dict[key] / N # return loss_dict def vae_2_str(self, valence=None, arousal=None, affnet_expr=None, expr7=None, prefix=""): caption = "" if len(prefix) > 0: prefix += "_" if valence is not None and not np.isnan(valence).any(): caption += prefix + "valence= %.03f\n" % valence if arousal is not None and not np.isnan(arousal).any(): caption += prefix + "arousal= %.03f\n" % arousal if affnet_expr is not None and not np.isnan(affnet_expr).any(): caption += prefix + "expression= %s \n" % AffectNetExpressions(affnet_expr).name if expr7 is not None and not np.isnan(expr7).any(): caption += prefix +"expression= %s \n" % Expression7(expr7).name return caption def _create_visualizations_to_log(self, stage, visdict, values, step, indices=None, dataloader_idx=None, output_dir=None): mode_ = str(self.mode.name).lower() prefix = self._get_logging_prefix() output_dir = output_dir or self.inout_params.full_run_dir log_dict = {} for key in visdict.keys(): images = _torch_image2np(visdict[key]) if images.dtype == np.float32 or images.dtype == np.float64 or images.dtype == np.float16: images = np.clip(images, 0, 1) if indices is None: indices = np.arange(images.shape[0]) if isinstance(indices, int): indices = [indices,] if isinstance(indices, str) and indices == 'all': image = np.concatenate([images[i] for i in range(images.shape[0])], axis=1) savepath = Path(f'{output_dir}/{prefix}_{stage}/{key}/{self.current_epoch:04d}_{step:04d}_all.png') # im2log = Image(image, caption=key) if isinstance(self.logger, WandbLogger): im2log = _log_wandb_image(savepath, image) else: im2log = _log_array_image(savepath, image) name = prefix + "_" + stage + "_" + key if dataloader_idx is not None: name += "/dataloader_idx_" + str(dataloader_idx) log_dict[name] = im2log else: for i in indices: caption = key + f" batch_index={step}\n" caption += key + f" index_in_batch={i}\n" if self.emonet_loss is not None: if key == 'inputs': if mode_ + "_valence_input" in values.keys(): caption += self.vae_2_str( values[mode_ + "_valence_input"][i].detach().cpu().item(), values[mode_ + "_arousal_input"][i].detach().cpu().item(), np.argmax(values[mode_ + "_expression_input"][i].detach().cpu().numpy()), prefix="emonet") + "\n" if 'va' in values.keys() and mode_ + "valence_gt" in values.keys(): # caption += self.vae_2_str( # values[mode_ + "_valence_gt"][i].detach().cpu().item(), # values[mode_ + "_arousal_gt"][i].detach().cpu().item(), caption += self.vae_2_str( values[mode_ + "valence_gt"][i].detach().cpu().item(), values[mode_ + "arousal_gt"][i].detach().cpu().item(), prefix="gt") + "\n" if 'expr7' in values.keys() and mode_ + "_expression_gt" in values.keys(): caption += "\n" + self.vae_2_str( expr7=values[mode_ + "_expression_gt"][i].detach().cpu().numpy(), prefix="gt") + "\n" if 'affectnetexp' in values.keys() and mode_ + "_expression_gt" in values.keys(): caption += "\n" + self.vae_2_str( affnet_expr=values[mode_ + "_expression_gt"][i].detach().cpu().numpy(), prefix="gt") + "\n" elif 'geometry_detail' in key: if "emo_mlp_valence" in values.keys(): caption += self.vae_2_str( values["emo_mlp_valence"][i].detach().cpu().item(), values["emo_mlp_arousal"][i].detach().cpu().item(), prefix="mlp") if 'emo_mlp_expr_classification' in values.keys(): caption += "\n" + self.vae_2_str( affnet_expr=values["emo_mlp_expr_classification"][i].detach().cpu().argmax().numpy(), prefix="mlp") + "\n" elif key == 'output_images_' + mode_: if mode_ + "_valence_output" in values.keys(): caption += self.vae_2_str(values[mode_ + "_valence_output"][i].detach().cpu().item(), values[mode_ + "_arousal_output"][i].detach().cpu().item(), np.argmax(values[mode_ + "_expression_output"][i].detach().cpu().numpy())) + "\n" elif key == 'output_translated_images_' + mode_: if mode_ + "_translated_valence_output" in values.keys(): caption += self.vae_2_str(values[mode_ + "_translated_valence_output"][i].detach().cpu().item(), values[mode_ + "_translated_arousal_output"][i].detach().cpu().item(), np.argmax(values[mode_ + "_translated_expression_output"][i].detach().cpu().numpy())) + "\n" # elif key == 'output_images_detail': # caption += "\n" + self.vae_2_str(values["detail_output_valence"][i].detach().cpu().item(), # values["detail_output_valence"][i].detach().cpu().item(), # np.argmax(values["detail_output_expression"][ # i].detach().cpu().numpy())) savepath = Path(f'{output_dir}/{prefix}_{stage}/{key}/{self.current_epoch:04d}_{step:04d}_{i:02d}.png') image = images[i] # im2log = Image(image, caption=caption) if isinstance(self.logger, WandbLogger): im2log = _log_wandb_image(savepath, image, caption) elif self.logger is not None: im2log = _log_array_image(savepath, image, caption) else: im2log = _log_array_image(None, image, caption) name = prefix + "_" + stage + "_" + key if dataloader_idx is not None: name += "/dataloader_idx_" + str(dataloader_idx) log_dict[name] = im2log return log_dict def _visualization_checkpoint(self, verts, trans_verts, ops, uv_detail_normals, additional, batch_idx, stage, prefix, save=False): batch_size = verts.shape[0] visind = np.arange(batch_size) shape_images = self.deca.render.render_shape(verts, trans_verts) if uv_detail_normals is not None: detail_normal_images = F.grid_sample(uv_detail_normals.detach(), ops['grid'].detach(), align_corners=False) shape_detail_images = self.deca.render.render_shape(verts, trans_verts, detail_normal_images=detail_normal_images) else: shape_detail_images = None visdict = {} if 'images' in additional.keys(): visdict['inputs'] = additional['images'][visind] if 'images' in additional.keys() and 'lmk' in additional.keys(): visdict['landmarks_gt'] = util.tensor_vis_landmarks(additional['images'][visind], additional['lmk'][visind]) if 'images' in additional.keys() and 'predicted_landmarks' in additional.keys(): visdict['landmarks_predicted'] = util.tensor_vis_landmarks(additional['images'][visind], additional['predicted_landmarks'][visind]) if 'predicted_images' in additional.keys(): visdict['output_images_coarse'] = additional['predicted_images'][visind] if 'predicted_translated_image' in additional.keys() and additional['predicted_translated_image'] is not None: visdict['output_translated_images_coarse'] = additional['predicted_translated_image'][visind] visdict['geometry_coarse'] = shape_images[visind] if shape_detail_images is not None: visdict['geometry_detail'] = shape_detail_images[visind] if 'albedo_images' in additional.keys(): visdict['albedo_images'] = additional['albedo_images'][visind] if 'masks' in additional.keys(): visdict['mask'] = additional['masks'].repeat(1, 3, 1, 1)[visind] if 'albedo' in additional.keys(): visdict['albedo'] = additional['albedo'][visind] if 'predicted_detailed_image' in additional.keys() and additional['predicted_detailed_image'] is not None: visdict['output_images_detail'] = additional['predicted_detailed_image'][visind] if 'predicted_detailed_translated_image' in additional.keys() and additional['predicted_detailed_translated_image'] is not None: visdict['output_translated_images_detail'] = additional['predicted_detailed_translated_image'][visind] if 'shape_detail_images' in additional.keys(): visdict['shape_detail_images'] = additional['shape_detail_images'][visind] if 'uv_detail_normals' in additional.keys(): visdict['uv_detail_normals'] = additional['uv_detail_normals'][visind] * 0.5 + 0.5 if 'uv_texture_patch' in additional.keys(): visdict['uv_texture_patch'] = additional['uv_texture_patch'][visind] if 'uv_texture_gt' in additional.keys(): visdict['uv_texture_gt'] = additional['uv_texture_gt'][visind] if 'translated_uv_texture' in additional.keys() and additional['translated_uv_texture'] is not None: visdict['translated_uv_texture'] = additional['translated_uv_texture'][visind] if 'uv_vis_mask_patch' in additional.keys(): visdict['uv_vis_mask_patch'] = additional['uv_vis_mask_patch'][visind] if save: savepath = f'{self.inout_params.full_run_dir}/{prefix}_{stage}/combined/{self.current_epoch:04d}_{batch_idx:04d}.png' Path(savepath).parent.mkdir(exist_ok=True, parents=True) visualization_image = self.deca.visualize(visdict, savepath) return visdict, visualization_image[..., [2, 1, 0]] else: visualization_image = None return visdict, None def _get_trainable_parameters(self): trainable_params = [] if self.mode == DecaMode.COARSE: trainable_params += self.deca._get_coarse_trainable_parameters() elif self.mode == DecaMode.DETAIL: trainable_params += self.deca._get_detail_trainable_parameters() else: raise ValueError(f"Invalid deca mode: {self.mode}") if self.emotion_mlp is not None: trainable_params += list(self.emotion_mlp.parameters()) if self.emonet_loss is not None: trainable_params += self.emonet_loss._get_trainable_params() if self.deca.id_loss is not None: trainable_params += self.deca.id_loss._get_trainable_params() return trainable_params def configure_optimizers(self): # optimizer = torch.optim.Adam(self.parameters(), lr=1e-3) print("Configuring optimizer") trainable_params = self._get_trainable_parameters() if self.learning_params.optimizer == 'Adam': self.deca.opt = torch.optim.Adam( trainable_params, lr=self.learning_params.learning_rate, amsgrad=False) elif self.config.learning.optimizer == 'AdaBound': self.deca.opt = adabound.AdaBound( trainable_params, lr=self.config.learning.learning_rate, final_lr=self.config.learning.final_learning_rate ) elif self.learning_params.optimizer == 'SGD': self.deca.opt = torch.optim.SGD( trainable_params, lr=self.learning_params.learning_rate) else: raise ValueError(f"Unsupported optimizer: '{self.learning_params.optimizer}'") optimizers = [self.deca.opt] schedulers = [] if 'learning_rate_decay' in self.learning_params.keys(): scheduler = torch.optim.lr_scheduler.ExponentialLR(self.deca.opt, gamma=self.learning_params.learning_rate_decay) schedulers += [scheduler] if len(schedulers) == 0: return self.deca.opt return optimizers, schedulers class DECA(torch.nn.Module): """ The original DECA class which contains the encoders, FLAME decoder and the detail decoder. """ def __init__(self, config): """ :config corresponds to a model_params from DecaModule """ super().__init__() # ID-MRF perceptual loss (kept here from the original DECA implementation) self.perceptual_loss = None # Face Recognition loss self.id_loss = None # VGG feature loss self.vgg_loss = None self._reconfigure(config) self._reinitialize() def _dirty_init(self): pass # not used here, implemented for EMICA def get_input_image_size(self): return (self.config.image_size, self.config.image_size) def _reconfigure(self, config): self.config = config self.n_param = config.n_shape + config.n_tex + config.n_exp + config.n_pose + config.n_cam + config.n_light # identity-based detail code self.n_detail = config.n_detail # emotion-based detail code (deprecated, not use by DECA or EMOCA) self.n_detail_emo = config.n_detail_emo if 'n_detail_emo' in config.keys() else 0 # count the size of the conidition vector if 'detail_conditioning' in self.config.keys(): self.n_cond = 0 if 'globalpose' in self.config.detail_conditioning: self.n_cond += 3 if 'jawpose' in self.config.detail_conditioning: self.n_cond += 3 if 'identity' in self.config.detail_conditioning: self.n_cond += config.n_shape if 'expression' in self.config.detail_conditioning: self.n_cond += config.n_exp else: self.n_cond = 3 + config.n_exp self.mode = DecaMode[str(config.mode).upper()] self._create_detail_generator() self._init_deep_losses() self._setup_neural_rendering() def _reinitialize(self): self._create_model() self._setup_renderer() self._init_deep_losses() self.face_attr_mask = util.load_local_mask(image_size=self.config.uv_size, mode='bbx') def _get_num_shape_params(self): return self.config.n_shape def _init_deep_losses(self): """ Initialize networks for deep losses """ # TODO: ideally these networks should be moved out the DECA class and into DecaModule, # but that would break backwards compatility with the original DECA and would not be able to load DECA's weights if 'mrfwr' not in self.config.keys() or self.config.mrfwr == 0: self.perceptual_loss = None else: if self.perceptual_loss is None: self.perceptual_loss = lossfunc.IDMRFLoss().eval() self.perceptual_loss.requires_grad_(False) # TODO, move this to the constructor if 'idw' not in self.config.keys() or self.config.idw == 0: self.id_loss = None else: if self.id_loss is None: id_metric = self.config.id_metric if 'id_metric' in self.config.keys() else None id_trainable = self.config.id_trainable if 'id_trainable' in self.config.keys() else False self.id_loss_start_step = self.config.id_loss_start_step if 'id_loss_start_step' in self.config.keys() else 0 self.id_loss = lossfunc.VGGFace2Loss(self.config.pretrained_vgg_face_path, id_metric, id_trainable) self.id_loss.freeze_nontrainable_layers() if 'vggw' not in self.config.keys() or self.config.vggw == 0: self.vgg_loss = None else: if self.vgg_loss is None: vgg_loss_batch_norm = 'vgg_loss_batch_norm' in self.config.keys() and self.config.vgg_loss_batch_norm self.vgg_loss = VGG19Loss(dict(zip(self.config.vgg_loss_layers, self.config.lambda_vgg_layers)), batch_norm=vgg_loss_batch_norm).eval() self.vgg_loss.requires_grad_(False) # TODO, move this to the constructor def _setup_renderer(self): self.render = SRenderY(self.config.image_size, obj_filename=self.config.topology_path, uv_size=self.config.uv_size) # .to(self.device) # face mask for rendering details mask = imread(self.config.face_mask_path).astype(np.float32) / 255. mask = torch.from_numpy(mask[:, :, 0])[None, None, :, :].contiguous() self.uv_face_mask = F.interpolate(mask, [self.config.uv_size, self.config.uv_size]) mask = imread(self.config.face_eye_mask_path).astype(np.float32) / 255. mask = torch.from_numpy(mask[:, :, 0])[None, None, :, :].contiguous() uv_face_eye_mask = F.interpolate(mask, [self.config.uv_size, self.config.uv_size]) self.register_buffer('uv_face_eye_mask', uv_face_eye_mask) # displacement mask is deprecated and not used by DECA or EMOCA if 'displacement_mask' in self.config.keys(): displacement_mask_ = 1-np.load(self.config.displacement_mask).astype(np.float32) # displacement_mask_ = np.load(self.config.displacement_mask).astype(np.float32) displacement_mask_ = torch.from_numpy(displacement_mask_)[None, None, ...].contiguous() displacement_mask_ = F.interpolate(displacement_mask_, [self.config.uv_size, self.config.uv_size]) self.register_buffer('displacement_mask', displacement_mask_) ## displacement correct if os.path.isfile(self.config.fixed_displacement_path): fixed_dis = np.load(self.config.fixed_displacement_path) fixed_uv_dis = torch.tensor(fixed_dis).float() else: fixed_uv_dis = torch.zeros([512, 512]).float() print("Warning: fixed_displacement_path not found, using zero displacement") self.register_buffer('fixed_uv_dis', fixed_uv_dis) def uses_texture(self): if 'use_texture' in self.config.keys(): return self.config.use_texture return True # true by default def _disable_texture(self, remove_from_model=False): self.config.use_texture = False if remove_from_model: self.flametex = None def _enable_texture(self): self.config.use_texture = True def _has_neural_rendering(self): return hasattr(self.config, "neural_renderer") and bool(self.config.neural_renderer) def _setup_neural_rendering(self): if self._has_neural_rendering(): if self.config.neural_renderer.class_ == "StarGAN": print("Creating StarGAN neural renderer") self.image_translator = StarGANWrapper(self.config.neural_renderer.cfg, self.config.neural_renderer.stargan_repo) else: raise ValueError(f"Unsupported neural renderer class '{self.config.neural_renderer.class_}'") if self.image_translator.background_mode == "input": if self.config.background_from_input not in [True, "input"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "Background should be inpainted from the input") elif self.image_translator.background_mode == "black": if self.config.background_from_input not in [False, "black"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "Background should be black.") elif self.image_translator.background_mode == "none": if self.config.background_from_input not in ["none"]: raise NotImplementedError("The background mode of the neural renderer and deca is not synchronized. " "The background should not be handled") else: raise NotImplementedError(f"Unsupported mode of the neural renderer backroungd: " f"'{self.image_translator.background_mode}'") def _create_detail_generator(self): #backwards compatibility hack: if hasattr(self, 'D_detail'): if (not "detail_conditioning_type" in self.config.keys() or self.config.detail_conditioning_type == "concat") \ and isinstance(self.D_detail, Generator): return if self.config.detail_conditioning_type == "adain" and isinstance(self.D_detail, GeneratorAdaIn): return print("[WARNING]: We are reinitializing the detail generator!") del self.D_detail # just to make sure we free the CUDA memory, probably not necessary if not "detail_conditioning_type" in self.config.keys() or str(self.config.detail_conditioning_type).lower() == "concat": # concatenates detail latent and conditioning (this one is used by DECA/EMOCA) print("Creating classic detail generator.") self.D_detail = Generator(latent_dim=self.n_detail + self.n_detail_emo + self.n_cond, out_channels=1, out_scale=0.01, sample_mode='bilinear') elif str(self.config.detail_conditioning_type).lower() == "adain": # conditioning passed in through adain layers (this one is experimental and not currently used) print("Creating AdaIn detail generator.") self.D_detail = GeneratorAdaIn(self.n_detail + self.n_detail_emo, self.n_cond, out_channels=1, out_scale=0.01, sample_mode='bilinear') else: raise NotImplementedError(f"Detail conditioning invalid: '{self.config.detail_conditioning_type}'") def _create_model(self): # 1) build coarse encoder e_flame_type = 'ResnetEncoder' if 'e_flame_type' in self.config.keys(): e_flame_type = self.config.e_flame_type if e_flame_type == 'ResnetEncoder': self.E_flame = ResnetEncoder(outsize=self.n_param) elif e_flame_type[:4] == 'swin': self.E_flame = SwinEncoder(outsize=self.n_param, img_size=self.config.image_size, swin_type=e_flame_type) else: raise ValueError(f"Invalid 'e_flame_type' = {e_flame_type}") flame_cfg = copy.deepcopy(self.config) flame_cfg.n_shape = self._get_num_shape_params() if 'flame_mediapipe_lmk_embedding_path' not in flame_cfg.keys(): self.flame = FLAME(flame_cfg) else: self.flame = FLAME_mediapipe(flame_cfg) if self.uses_texture(): self.flametex = FLAMETex(self.config) else: self.flametex = None # 2) build detail encoder e_detail_type = 'ResnetEncoder' if 'e_detail_type' in self.config.keys(): e_detail_type = self.config.e_detail_type if e_detail_type == 'ResnetEncoder': self.E_detail = ResnetEncoder(outsize=self.n_detail + self.n_detail_emo) elif e_flame_type[:4] == 'swin': self.E_detail = SwinEncoder(outsize=self.n_detail + self.n_detail_emo, img_size=self.config.image_size, swin_type=e_detail_type) else: raise ValueError(f"Invalid 'e_detail_type'={e_detail_type}") self._create_detail_generator() # self._load_old_checkpoint() def _get_coarse_trainable_parameters(self): print("Add E_flame.parameters() to the optimizer") return list(self.E_flame.parameters()) def _get_detail_trainable_parameters(self): trainable_params = [] if self.config.train_coarse: trainable_params += self._get_coarse_trainable_parameters() print("Add E_flame.parameters() to the optimizer") trainable_params += list(self.E_detail.parameters()) print("Add E_detail.parameters() to the optimizer") trainable_params += list(self.D_detail.parameters()) print("Add D_detail.parameters() to the optimizer") return trainable_params def train(self, mode: bool = True): super().train(mode) if mode: if self.mode == DecaMode.COARSE: self.E_flame.train() # print("Setting E_flame to train") self.E_detail.eval() # print("Setting E_detail to eval") self.D_detail.eval() # print("Setting D_detail to eval") elif self.mode == DecaMode.DETAIL: if self.config.train_coarse: # print("Setting E_flame to train") self.E_flame.train() else: # print("Setting E_flame to eval") self.E_flame.eval() self.E_detail.train() # print("Setting E_detail to train") self.D_detail.train() # print("Setting D_detail to train") else: raise ValueError(f"Invalid mode '{self.mode}'") else: self.E_flame.eval() # print("Setting E_flame to eval") self.E_detail.eval() # print("Setting E_detail to eval") self.D_detail.eval() # print("Setting D_detail to eval") # these are set to eval no matter what, they're never being trained (the FLAME shape and texture spaces are pretrained) self.flame.eval() if self.flametex is not None: self.flametex.eval() return self def _load_old_checkpoint(self): """ Loads the DECA model weights from the original DECA implementation: https://github.com/YadiraF/DECA """ if self.config.resume_training: model_path = self.config.pretrained_modelpath print(f"Loading model state from '{model_path}'") checkpoint = torch.load(model_path) # model util.copy_state_dict(self.E_flame.state_dict(), checkpoint['E_flame']) # util.copy_state_dict(self.opt.state_dict(), checkpoint['opt']) # deprecate # detail model if 'E_detail' in checkpoint.keys(): util.copy_state_dict(self.E_detail.state_dict(), checkpoint['E_detail']) util.copy_state_dict(self.D_detail.state_dict(), checkpoint['D_detail']) # training state self.start_epoch = 0 # checkpoint['epoch'] self.start_iter = 0 # checkpoint['iter'] else: print('Start training from scratch') self.start_epoch = 0 self.start_iter = 0 def _encode_flame(self, images, **kwargs): return self.E_flame(images) def decompose_code(self, code): ''' config.n_shape + config.n_tex + config.n_exp + config.n_pose + config.n_cam + config.n_light ''' code_list = [] # num_list = [self.config.n_shape, self.config.n_tex, self.config.n_exp, self.config.n_pose, self.config.n_cam, # self.config.n_light] num_list = [self._get_num_shape_params(), self.config.n_tex, self.config.n_exp, self.config.n_pose, self.config.n_cam, self.config.n_light] start = 0 for i in range(len(num_list)): code_list.append(code[:, start:start + num_list[i]]) start = start + num_list[i] # shapecode, texcode, expcode, posecode, cam, lightcode = code_list code_list[-1] = code_list[-1].reshape(code.shape[0], 9, 3) return code_list, None def displacement2normal(self, uv_z, coarse_verts, coarse_normals, detach=True): """ Converts the displacement uv map (uv_z) and coarse_verts to a normal map coarse_normals. """ batch_size = uv_z.shape[0] uv_coarse_vertices = self.render.world2uv(coarse_verts)#.detach() if detach: uv_coarse_vertices = uv_coarse_vertices.detach() uv_coarse_normals = self.render.world2uv(coarse_normals)#.detach() if detach: uv_coarse_normals = uv_coarse_normals.detach() uv_z = uv_z * self.uv_face_eye_mask # detail vertices = coarse vertice + predicted displacement*normals + fixed displacement*normals uv_detail_vertices = uv_coarse_vertices + \ uv_z * uv_coarse_normals + \ self.fixed_uv_dis[None, None, :,:] * uv_coarse_normals #.detach() dense_vertices = uv_detail_vertices.permute(0, 2, 3, 1).reshape([batch_size, -1, 3]) uv_detail_normals = util.vertex_normals(dense_vertices, self.render.dense_faces.expand(batch_size, -1, -1)) uv_detail_normals = uv_detail_normals.reshape( [batch_size, uv_coarse_vertices.shape[2], uv_coarse_vertices.shape[3], 3]).permute(0, 3, 1, 2) # uv_detail_normals = uv_detail_normals*self.uv_face_eye_mask + uv_coarse_normals*(1-self.uv_face_eye_mask) # uv_detail_normals = util.gaussian_blur(uv_detail_normals) return uv_detail_normals, uv_coarse_vertices def visualize(self, visdict, savepath, catdim=1): grids = {} for key in visdict: # print(key) if visdict[key] is None: continue grids[key] = torchvision.utils.make_grid( F.interpolate(visdict[key], [self.config.image_size, self.config.image_size])).detach().cpu() grid = torch.cat(list(grids.values()), catdim) grid_image = (grid.numpy().transpose(1, 2, 0).copy() * 255)[:, :, [2, 1, 0]] grid_image = np.minimum(np.maximum(grid_image, 0), 255).astype(np.uint8) if savepath is not None: cv2.imwrite(savepath, grid_image) return grid_image def create_mesh(self, opdict, dense_template): ''' vertices: [nv, 3], tensor texture: [3, h, w], tensor ''' i = 0 vertices = opdict['verts'][i].cpu().numpy() faces = self.render.faces[0].cpu().numpy() if 'uv_texture_gt' in opdict.keys(): texture = util.tensor2image(opdict['uv_texture_gt'][i]) else: texture = None uvcoords = self.render.raw_uvcoords[0].cpu().numpy() uvfaces = self.render.uvfaces[0].cpu().numpy() # save coarse mesh, with texture and normal map if 'uv_detail_normals' in opdict.keys(): normal_map = util.tensor2image(opdict['uv_detail_normals'][i]*0.5 + 0.5) # upsample mesh, save detailed mesh texture = texture[:, :, [2, 1, 0]] normals = opdict['normals'][i].cpu().numpy() displacement_map = opdict['displacement_map'][i].detach().cpu().numpy().squeeze() dense_vertices, dense_colors, dense_faces = util.upsample_mesh(vertices, normals, faces, displacement_map, texture, dense_template) else: normal_map = None dense_vertices = None dense_colors = None dense_faces = None return vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors def save_obj(self, filename, opdict, dense_template, mode ='detail'): if mode not in ['coarse', 'detail', 'both']: raise ValueError(f"Invalid mode '{mode}. Expected modes are: 'coarse', 'detail', 'both'") vertices, faces, texture, uvcoords, uvfaces, normal_map, dense_vertices, dense_faces, dense_colors \ = self.create_mesh(opdict, dense_template) if mode == 'both': if isinstance(filename, list): filename_coarse = filename[0] filename_detail = filename[1] else: filename_coarse = filename filename_detail = filename.replace('.obj', '_detail.obj') elif mode == 'coarse': filename_coarse = filename else: filename_detail = filename if mode in ['coarse', 'both']: util.write_obj(str(filename_coarse), vertices, faces, texture=texture, uvcoords=uvcoords, uvfaces=uvfaces, normal_map=normal_map) if mode in ['detail', 'both']: util.write_obj(str(filename_detail), dense_vertices, dense_faces, colors = dense_colors, inverse_face_order=True) class ExpDECAInterface(object): """ This serves as an interface for EMOCA-like classes that need to use a different sub class but retain the EMOCA functionality. See EMICA_v2 for an example. """ def _create_model(self): # E_flame should be fixed for expression EMOCA self.E_flame.requires_grad_(False) # 2) add expression decoder if self.config.expression_backbone == 'deca_parallel': ## a) Attach a parallel flow of FCs onto the original DECA coarse backbone. (Only the second FC head is trainable) self.E_expression = SecondHeadResnet(self.E_flame, self.n_exp_param, 'same') elif self.config.expression_backbone == 'deca_clone': ## b) Clones the original DECA coarse decoder (and the entire decoder will be trainable) - This is in final EMOCA. #TODO this will only work for Resnet. Make this work for the other backbones (Swin) as well. self.E_expression = ResnetEncoder(self.n_exp_param) # clone parameters of the ResNet self.E_expression.encoder.load_state_dict(self.E_flame.encoder.state_dict()) elif self.config.expression_backbone == 'emonet_trainable': # Trainable EmoNet instead of Resnet (deprecated) self.E_expression = EmoNetRegressor(self.n_exp_param) elif self.config.expression_backbone == 'emonet_static': # Frozen EmoNet with a trainable head instead of Resnet (deprecated)
self.E_expression = EmonetRegressorStatic(self.n_exp_param)
22
2023-11-07 20:13:32+00:00
24k
hxz393/ConfigCenterComparer
ui/action_start.py
[ { "identifier": "COL_INFO", "path": "config/settings.py", "snippet": "COL_INFO = {\n \"name\": {\"col\": 0},\n \"group\": {\"col\": 1},\n \"key\": {\"col\": 2},\n \"pro_value\": {\"col\": 3},\n \"pro_time\": {\"col\": 4},\n \"pre_value\": {\"col\": 5},\n \"pre_time\": {\"col\": 6},\n \"test_value\": {\"col\": 7},\n \"test_time\": {\"col\": 8},\n \"dev_value\": {\"col\": 9},\n \"dev_time\": {\"col\": 10},\n \"consistency\": {\"col\": 11},\n \"skip\": {\"col\": 12},\n\n}" }, { "identifier": "get_resource_path", "path": "lib/get_resource_path.py", "snippet": "def get_resource_path(relative_path: Union[str, os.PathLike]) -> Optional[str]:\n \"\"\"\n 获取资源的绝对路径。这个函数适用于 PyInstaller 打包后的可执行文件。\n\n :type relative_path: Union[str, os.PathLike]\n :param relative_path: 相对路径,可以是字符串或 os.PathLike 对象。\n :rtype: Optional[str]\n :return: 资源的绝对路径,如果发生错误则返回 None。\n \"\"\"\n\n try:\n base_path = getattr(sys, '_MEIPASS', os.path.abspath(\".\"))\n return os.path.join(base_path, os.path.normpath(relative_path))\n except Exception:\n logger.exception(\"An error occurred while retrieving resource path\")\n return None" }, { "identifier": "execute_queries", "path": "module/execute_queries.py", "snippet": "def execute_queries(config_connection: Dict[str, Dict[str, Union[Dict[str, str], bool]]],\n config_main: Dict[str, str]) -> Tuple[Dict[str, Dict[str, str]], Dict[str, bool]]:\n \"\"\"\n 执行数据库查询并返回格式化后的结果和查询状态。\n\n 此函数接收数据库连接配置和主要配置参数。它首先根据主配置生成SQL查询语句,然后对每个数据库环境执行查询。查询结果将被格式化,并更新查询状态。\n\n :param config_connection: 数据库连接配置,包含环境名称和对应的数据库配置。\n :type config_connection: Dict[str, Dict[str, Union[Dict[str, str], bool]]]\n :param config_main: 主要配置参数,用于生成查询SQL语句。\n :type config_main: Dict[str, str]\n :return: 包含格式化查询结果的字典和每个环境的查询状态。\n :rtype: Tuple[Dict[str, Dict[str, str]], Dict[str, bool]]\n\n :example:\n >>> os.chdir(os.path.dirname(os.getcwd()))\n >>> connection = {\"dev\": {'mysql_on': True, 'ssh_on': False, 'mysql': {'host': '192.168.2.204', \"port\": \"3306\", \"user\": \"root\", \"password\": \"QeqAr:%R+s5:hYnr\", \"db\": \"ApolloConfigDB_dev\"}}}\n >>> main = {'config_center': 'Apollo', 'apollo_name': 'AppId', 'fix_name_before': '', 'fix_name_after': '', 'fix_name_left': '', 'fix_name_right': '',}\n >>> results, statuses = execute_queries(connection, main)\n >>> assert type(results) == dict\n >>> assert type(statuses) == dict\n >>> statuses\n >>> results\n \"\"\"\n query_statuses = {env_name: False for env_name in config_connection.keys()}\n formatted_results = {}\n\n try:\n query_sql = get_query_sql(config_main)\n\n for env_name, db_config in config_connection.items():\n # 获取指定环境的查询结果\n query_results = get_query_result(db_config, query_sql)\n logger.debug(f\"ENV: {env_name}, SQL query finished.\")\n if query_results:\n query_statuses[env_name] = True\n # 格式化查询结果\n format_query_results(query_results, env_name, config_main, formatted_results)\n else:\n logger.warning(f\"No results obtained from database query for environment: {env_name}\")\n\n # 通过对比过滤列表,得到是否过滤信息,更新到结果字典\n update_skip_status(formatted_results)\n # 查询各配置环境的值,得到一致性信息,更新到结果字典。只对比查询成功的环境\n update_consistency_status(formatted_results, query_statuses)\n logger.debug(\"Status update finished.\")\n\n return formatted_results, query_statuses\n except Exception:\n logger.exception(\"Exception occurred during executing queries\")\n return {}, query_statuses" }, { "identifier": "ConfigManager", "path": "ui/config_manager.py", "snippet": "class ConfigManager(QObject):\n \"\"\"\n 配置管理器类,负责管理和更新应用程序的配置信息。\n\n 该类包括获取和设置主配置、连接配置和跳过列表的方法,同时提供信号以通知配置更新。\n\n :ivar config_main_updated: 当主配置更新时发出的信号。\n :ivar config_connection_updated: 当连接配置更新时发出的信号。\n :ivar skip_list_updated: 当跳过列表更新时发出的信号。\n \"\"\"\n config_main_updated = pyqtSignal()\n config_connection_updated = pyqtSignal()\n skip_list_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._config_main, self._config_apollo, self._config_nacos = read_config_all()\n self._skip_list = read_file_to_list(CONFIG_SKIP_PATH) or []\n\n def get_config_main(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取主配置的副本。\n\n :return: 包含主配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._config_main)\n except Exception:\n logger.exception(\"Failed to get config_main.\")\n return None\n\n def get_config_connection(self) -> Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]:\n \"\"\"\n 根据当前配置中心获取连接配置的副本。\n\n :return: 包含连接配置的字典,如果出现错误则返回 None。\n :rtype: Optional[Dict[str, Dict[str, Union[Dict[str, str], bool]]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n return copy.deepcopy(self._config_apollo)\n else:\n return copy.deepcopy(self._config_nacos)\n except Exception:\n logger.exception(\"Failed to get config_connection.\")\n return None\n\n def get_skip_list(self) -> Optional[List[str]]:\n \"\"\"\n 获取忽略列表的副本。\n\n :return: 包含跳过项的列表,如果出现错误则返回 None。\n :rtype: Optional[List[str]]\n \"\"\"\n try:\n return copy.deepcopy(self._skip_list)\n except Exception:\n logger.exception(\"Failed to get skip_list.\")\n return None\n\n def update_config_main(self, new_config: Dict[str, str]) -> None:\n \"\"\"\n 更新主配置。\n\n :param new_config: 新的主配置。\n :type new_config: Dict[str, str]\n \"\"\"\n try:\n self._config_main = new_config\n self.config_main_updated.emit()\n write_dict_to_json(CONFIG_MAIN_PATH, new_config)\n logger.info(\"Config updated: config_main\")\n except Exception:\n logger.exception(\"Failed to update config: config_main\")\n\n def update_config_connection(self, new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]) -> None:\n \"\"\"\n 更新连接配置。\n\n :param new_config: 新的连接配置。\n :type new_config: Dict[str, Dict[str, Union[Dict[str, str], bool]]]\n \"\"\"\n try:\n if self._config_main['config_center'] == 'Apollo':\n self._config_apollo = new_config\n write_dict_to_json(CONFIG_APOLLO_PATH, new_config)\n else:\n self._config_nacos = new_config\n write_dict_to_json(CONFIG_NACOS_PATH, new_config)\n self.config_connection_updated.emit()\n logger.info(\"Config updated: config_connection\")\n except Exception:\n logger.exception(\"Failed to update config: config_connection\")\n\n def update_skip_list(self, new_config: List[str]) -> None:\n \"\"\"\n 更新忽略列表。\n\n :param new_config: 新忽略列表。\n :type new_config: List[str]\n \"\"\"\n try:\n self._skip_list = new_config\n # 写入到配置文件\n self.skip_list_updated.emit()\n write_list_to_file(CONFIG_SKIP_PATH, new_config)\n logger.info(\"Config updated: skip_list\")\n except Exception:\n logger.exception(\"Failed to update config: skip_list\")" }, { "identifier": "FilterBar", "path": "ui/filter_bar.py", "snippet": "class FilterBar(QWidget):\n \"\"\"\n 过滤栏类,用于在用户界面中提供过滤和搜索功能。\n\n 此类创建了一个包含服务过滤、表格状态过滤和搜索框的组件,使用户能够根据不同条件过滤表格数据。\n\n :param lang_manager: 语言管理器,用于处理界面语言的更新。\n :type lang_manager: LangManager\n :param config_manager: 配置管理器,用于获取和更新配置信息。\n :type config_manager: ConfigManager\n :param table: 要应用过滤的表格。\n :type table: TableMain\n \"\"\"\n status_updated = pyqtSignal(str)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager,\n table: TableMain):\n super().__init__()\n # 实例化组件。\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.lang = self.lang_manager.get_lang()\n self.config_manager = config_manager\n self.table = table\n self.highlight_rows = []\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面。\n\n 创建并布局过滤栏中的所有组件,包括服务过滤、表格状态过滤和搜索值输入框。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 修改字体大小\n self.setStyleSheet(\"font-size: 14px;\")\n # 建立横向主布局\n self.layout = QHBoxLayout(self)\n # 创建过滤服务组件\n self._create_filter_app()\n # 创建过滤列表组件\n self._create_filter_table()\n # 创建搜索过滤值组件\n self._create_filter_value()\n # 设置布局的内容边距\n self.layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(self.layout)\n self.update_lang()\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 重新获取语言字典\n self.lang = self.lang_manager.get_lang()\n # 遍历filter_table下拉框中的所有项\n for index in range(self.filter_table_box.count()):\n # 检查数据值是否匹配\n if self.filter_table_box.itemData(index) == \"all\":\n # 更新显示值\n self.filter_table_box.setItemText(index, self.lang['ui.filter_bar_3'])\n elif self.filter_table_box.itemData(index) == \"fully\":\n self.filter_table_box.setItemText(index, self.lang['ui.filter_bar_4'])\n elif self.filter_table_box.itemData(index) == \"partially\":\n self.filter_table_box.setItemText(index, self.lang['ui.filter_bar_5'])\n elif self.filter_table_box.itemData(index) == \"skip\":\n self.filter_table_box.setItemText(index, self.lang['ui.filter_bar_6'])\n elif self.filter_table_box.itemData(index) == \"fully+skip\":\n self.filter_table_box.setItemText(index, f\"{self.lang['ui.filter_bar_4']}+{self.lang['ui.filter_bar_6']}\")\n elif self.filter_table_box.itemData(index) == \"fully+partially\":\n self.filter_table_box.setItemText(index, f\"{self.lang['ui.filter_bar_4']}+{self.lang['ui.filter_bar_5']}\")\n elif self.filter_table_box.itemData(index) == \"fully+partially+skip\":\n self.filter_table_box.setItemText(index, f\"{self.lang['ui.filter_bar_4']}+{self.lang['ui.filter_bar_5']}+{self.lang['ui.filter_bar_6']}\")\n # 直接更新服务名过滤默认选项文字\n self.filter_app_box.setItemText(0, self.lang['ui.filter_bar_3'])\n # 更新其他文字\n self.filter_app_label.setText(self.lang['ui.filter_bar_1'])\n self.filter_table_label.setText(self.lang['ui.filter_bar_2'])\n self.filter_table_check_box.setText(self.lang['ui.filter_bar_7'])\n self.filter_value_label.setText(self.lang['ui.filter_bar_8'])\n self.filter_value_button.setText(self.lang['ui.filter_bar_9'])\n self.filter_reset_button.setText(self.lang['ui.filter_bar_10'])\n\n def _create_filter_app(self) -> None:\n \"\"\"\n 创建服务过滤组件。\n\n 此方法初始化服务过滤下拉框,并设置其事件处理函数。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 建立标签,加入主布局\n self.filter_app_label = QLabel()\n self.layout.addWidget(self.filter_app_label)\n # 过滤服务下拉框\n self.filter_app_box = QComboBox()\n # 设置下拉框的尺寸策略和宽度\n self.filter_app_box.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n # 设置最大宽度,以免拉伸太长\n self.filter_app_box.setMinimumWidth(100)\n self.filter_app_box.setMaximumWidth(300)\n # 设置下拉框的事件处理\n self.filter_app_box.currentIndexChanged.connect(self.filter_table)\n # 设置下拉框的选项,通过函数填充\n self.filter_options_add()\n self.layout.addWidget(self.filter_app_box)\n # 创建一个 QFrame 作为分割线\n self._create_separator()\n\n def _create_filter_table(self) -> None:\n \"\"\"\n 创建表格状态过滤组件。\n\n 此方法初始化表格状态过滤下拉框和反向选择复选框,并设置它们的事件处理函数。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 建立标签,加入主布局\n self.filter_table_label = QLabel()\n self.layout.addWidget(self.filter_table_label)\n # 过滤列表下拉框\n self.filter_table_box = QComboBox()\n # 设置最小宽度,以免文字放不下\n self.filter_table_box.setMinimumWidth(270)\n # 设置下拉框的选项\n self.filter_table_box.addItem(\"\", \"all\")\n self.filter_table_box.addItem(\"\", \"fully\")\n self.filter_table_box.addItem(\"\", \"partially\")\n self.filter_table_box.addItem(\"\", \"skip\")\n self.filter_table_box.addItem(\"\", \"fully+skip\")\n self.filter_table_box.addItem(\"\", \"fully+partially\")\n self.filter_table_box.addItem(\"\", \"fully+partially+skip\")\n # 设置下拉框的事件处理\n self.filter_table_box.currentIndexChanged.connect(self.filter_table)\n self.layout.addWidget(self.filter_table_box)\n # 反向选择\n self.filter_table_check_box = QCheckBox()\n self.filter_table_check_box.stateChanged.connect(self.filter_table)\n self.layout.addWidget(self.filter_table_check_box)\n # 创建一个 QFrame 作为分割线\n self._create_separator()\n\n def _create_filter_value(self) -> None:\n \"\"\"\n 创建搜索过滤组件。\n\n 此方法初始化搜索框和相关按钮,并设置事件处理函数,以便用户可以根据特定文本过滤表格数据。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 建立标签,加入主布局\n self.filter_value_label = QLabel()\n self.layout.addWidget(self.filter_value_label)\n # 搜索输入框\n self.filter_value_box = QLineEdit()\n self.filter_value_box.returnPressed.connect(self.filter_table)\n # 设置搜索输入框的尺寸策略和最小宽度\n self.filter_value_box.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed)\n self.filter_value_box.setMinimumWidth(100)\n self.layout.addWidget(self.filter_value_box)\n # 搜索按钮\n self.filter_value_button = QPushButton()\n self.filter_value_button.clicked.connect(self.filter_table)\n self.layout.addWidget(self.filter_value_button)\n # 重置按钮\n self.filter_reset_button = QPushButton()\n self.filter_reset_button.clicked.connect(self.filter_reset)\n self.layout.addWidget(self.filter_reset_button)\n\n def _create_separator(self) -> None:\n \"\"\"\n 创建界面中的分隔线。\n\n 此方法用于在过滤器工具栏中添加分隔线。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 建好之后,直接加入主布局\n separator = QFrame()\n separator.setFrameShape(QFrame.VLine)\n separator.setFrameShadow(QFrame.Raised)\n self.layout.addWidget(separator)\n\n def filter_options_add(self) -> None:\n \"\"\"\n 填充服务过滤下拉框选项。\n\n 此方法从配置数据中提取所有唯一的服务名称,并将它们添加到服务过滤下拉框中。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 先断开信号\n self.filter_app_box.currentIndexChanged.disconnect(self.filter_table)\n self.filter_app_box.setEnabled(False)\n # 清空过滤器并添加显示所有行的选项\n self.filter_app_box.clear()\n self.filter_app_box.addItem(self.lang['ui.filter_bar_3'], \"all\")\n self.filter_app_box.setCurrentIndex(0)\n # 使用集合和列表推导式去重并获取所有唯一项\n unique_items = {self.table.item(row, COL_INFO['name']['col']).text()\n for row in range(self.table.rowCount())\n if self.table.item(row, COL_INFO['name']['col'])}\n # 添加唯一项到下拉框\n [self.filter_app_box.addItem(item, item) for item in unique_items]\n # 对下拉框进行排序\n model = self.filter_app_box.model()\n model.sort(0)\n except Exception:\n logger.exception(\"Exception occurred in adding filter options\")\n self.status_updated.emit(self.lang['label_status_error'])\n finally:\n # 重新连接信号\n self.filter_app_box.currentIndexChanged.connect(self.filter_table)\n self.filter_app_box.setEnabled(True)\n\n def filter_reset(self) -> None:\n \"\"\"\n 重置过滤条件。\n\n 此方法将所有过滤组件重置为默认状态,以便用户可以重新开始过滤操作。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 禁更新\n self.table.setUpdatesEnabled(False)\n # 断开信号连接\n self.filter_app_box.currentIndexChanged.disconnect(self.filter_table)\n self.filter_table_box.currentIndexChanged.disconnect(self.filter_table)\n self.filter_table_check_box.stateChanged.disconnect(self.filter_table)\n # 重置 QComboBox 为第一个项,通常是 \"--显示所有--\",\n self.filter_app_box.setCurrentIndex(0)\n self.filter_table_box.setCurrentIndex(0)\n # 还原反选框状态\n self.filter_table_check_box.setChecked(False)\n # 清空搜索框 QLineEdit\n self.filter_value_box.clear()\n # 手动调用过略器\n self.filter_table()\n except Exception:\n logger.exception(\"Error occurred while resetting filters\")\n self.status_updated.emit(self.lang['label_status_error'])\n finally:\n # 开启更新,重新连接信号\n self.table.setUpdatesEnabled(True)\n self.filter_app_box.currentIndexChanged.connect(self.filter_table)\n self.filter_table_box.currentIndexChanged.connect(self.filter_table)\n self.filter_table_check_box.stateChanged.connect(self.filter_table)\n\n @log_time\n def filter_table(self, rows: Optional[List[int]] = None) -> None:\n \"\"\"\n 应用过滤条件到表格。带有时间记录用于调试。\n\n 此方法根据用户设置的过滤条件(服务名称、表格状态、搜索文本)来决定哪些行在表格中可见。\n\n :param rows: 要应用过滤器的行号列表。如果为空则应用到整表。\n :type rows: Optional[List[int]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n self.table.setUpdatesEnabled(False)\n # 获取颜色开关\n color_switch = self.config_manager.get_config_main().get('color_set', 'ON')\n # 检查是否有rows参数\n valid_rows = rows if isinstance(rows, list) else None\n # 计算可见的行数\n visible_rows = 0\n # 搜索框输入内容\n search_value = self.filter_value_box.text().strip().lower()\n # 在新的搜索开始之前,恢复每个单元格的原始样式。\n if color_switch == 'ON':\n # 针对忽略操作,改变表格颜色。\n if valid_rows:\n self.table.apply_color_to_table(valid_rows)\n # 针对高亮操作\n elif self.highlight_rows:\n self._reset_styles()\n\n # 如果没有传入行列表,则应用到整个列表\n for row in valid_rows if valid_rows else range(self.table.rowCount()):\n consistency_data = self.table.item(row, COL_INFO['consistency']['col']).data(Qt.UserRole)\n skip_data = self.table.item(row, COL_INFO['skip']['col']).data(Qt.UserRole)\n name_data = self.table.item(row, COL_INFO['name']['col']).text()\n\n # 先匹配快速过滤,匹配过滤条件时为True,隐藏匹配的行\n table_match = self._get_table_match(consistency_data, skip_data)\n if table_match:\n self.table.setRowHidden(row, True)\n continue\n\n # 匹配选择所有或者选择服务名时为True,不设隐藏\n app_match = self._get_app_match(name_data)\n if not app_match:\n self.table.setRowHidden(row, True)\n continue\n\n # 匹配搜索条件或不输入时为True或结果列表,不设隐藏\n search_match = self._get_search_match(row, search_value)\n if not search_match:\n self.table.setRowHidden(row, True)\n continue\n\n # 仅当条件都匹配时才显示行\n self.table.setRowHidden(row, False)\n visible_rows += 1\n\n # 对单元格应用颜色\n if color_switch == 'ON' and isinstance(search_match, list):\n self.highlight_rows.append(self._generate_index_key(row))\n for column in search_match:\n self.table.apply_color(row, COLOR_HIGHLIGHT, column)\n\n # 更新状态栏信息展示过滤后的行数\n self.status_updated.emit(f\"{visible_rows} {self.lang['ui.filter_bar_11']}\")\n except Exception:\n logger.exception(\"Exception in filtering table\")\n self.status_updated.emit(self.lang['label_status_error'])\n finally:\n self.table.setUpdatesEnabled(True)\n\n def _get_app_match(self, name_data: str) -> bool:\n \"\"\"\n 检查当前行是否与选定的应用服务匹配。\n\n :param name_data: 行中的应用服务名称。\n :type name_data: str\n\n :return: 如果当前行与选定的应用服务匹配,则返回 True。\n :rtype: bool\n \"\"\"\n selected_app = self.filter_app_box.currentData()\n return True if selected_app == \"all\" or selected_app == name_data else False\n\n def _get_table_match(self,\n consistency_data: str,\n skip_data: str) -> bool:\n \"\"\"\n 根据表格状态过滤条件检查当前行是否匹配。\n\n :param consistency_data: 一致性状态数据。\n :type consistency_data: str\n :param skip_data: 跳过状态数据。\n :type skip_data: str\n\n :return: 如果当前行符合表格状态过滤条件,则返回 True。\n :rtype: bool\n \"\"\"\n selected_table = self.filter_table_box.currentData()\n reverse_checked = self.filter_table_check_box.isChecked()\n # 直接对比较结果赋值bool,相等则为True\n fully_match = consistency_data == \"fully\"\n partially_match = consistency_data == \"partially\"\n skip_match = skip_data == \"yes\"\n # 根据快速过滤条件,返回组合比较结果。\n if selected_table == \"fully\":\n return fully_match if not reverse_checked else not fully_match\n elif selected_table == \"partially\":\n return partially_match if not reverse_checked else not partially_match\n elif selected_table == 'skip':\n return skip_match if not reverse_checked else not skip_match\n elif selected_table == \"fully+skip\":\n return (fully_match or skip_match) if not reverse_checked else not (fully_match or skip_match)\n elif selected_table == \"fully+partially\":\n return (fully_match or partially_match) if not reverse_checked else not (fully_match or partially_match)\n elif selected_table == \"fully+partially+skip\":\n return (fully_match or partially_match or skip_match) if not reverse_checked else not (fully_match or partially_match or skip_match)\n else:\n return False if not reverse_checked else True\n\n def _get_search_match(self,\n row: int,\n search_value: str) -> Union[bool, List[int]]:\n \"\"\"\n 检查当前行是否与搜索条件匹配。\n\n :param row: 表格中的行号。\n :type row: int\n :param search_value: 需要搜索的值。\n :type search_value: str\n\n :return: 如果搜索值为空,则返回 True。否则返回空列表或匹配列号的列表\n :rtype: Union[bool, List[int]\n \"\"\"\n # 如果搜索值为空,则无需进行搜索\n if not search_value:\n return True\n # 禁止更新。主要着色时操作太多。\n self.table.setUpdatesEnabled(False)\n match_col = []\n # 遍历每列的内容\n for column in range(self.table.columnCount()):\n # 不搜索隐藏的列\n if self.table.isColumnHidden(column):\n continue\n\n # 获取单元格内容\n item = self.table.item(row, column)\n item_text = item.text().lower() if item else ''\n\n # 单元格列号插入到返回列表\n if search_value in item_text:\n match_col.append(column)\n\n # 启用更新\n self.table.setUpdatesEnabled(True)\n return match_col\n\n def _generate_index_key(self, row: int) -> str:\n \"\"\"\n 生成索引键。\n\n 此方法根据给定的行号生成一个唯一的索引键。索引键由行中特定列的值组合而成,用于标识表格中的唯一行。\n\n :param row: 表格中的行号。\n :type row: int\n\n :return: 生成的索引键。\n :rtype: str\n \"\"\"\n name = self.table.item(row, COL_INFO['name']['col']).text()\n group = self.table.item(row, COL_INFO['group']['col']).text()\n key = self.table.item(row, COL_INFO['key']['col']).text()\n return f\"{name}+{group}+{key}\"\n\n def _reset_styles(self) -> None:\n \"\"\"\n 重置表格样式到记录的状态。\n\n 此方法遍历表格中的所有行,对于每一行,恢复其单元格的背景颜色到初始状态。这通常在过滤条件发生变化或重置时调用。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n\n try:\n reset_rows = []\n # 还原字典为空则跳过\n if not self.highlight_rows:\n return\n # 遍历每行,但跳过隐藏行,因为隐藏行必然没有被更改颜色。\n # 反过来遍历还原字典并不可行,因为索引键并不储存在表格中,\n # 而且索引键和行号并不能形成牢固对应关系(行号可变),\n # 所以遍历所有行,但只操作匹配的单元格,最大程度减少对单元格的操作。\n for row in range(self.table.rowCount()):\n if self.table.isRowHidden(row):\n continue\n # 生成当行索引键,并检测是否在还原列表中。\n elif self._generate_index_key(row) in self.highlight_rows:\n # 索引键在还原字典中找到时,向reset_rows插入行数\n reset_rows.append(row)\n # 最后一次性还原单元格本来颜色。\n self.table.apply_color_to_table(reset_rows)\n # 完成后,清空还原列表。\n self.highlight_rows.clear()\n except Exception:\n logger.exception(\"Error occurred while resetting styles\")" }, { "identifier": "LangManager", "path": "ui/lang_manager.py", "snippet": "class LangManager(QObject):\n \"\"\"\n 语言管理类,用于管理和更新应用程序的语言字典。\n\n 此类继承自 QObject,可发出语言更新的信号。它通过 `get_lang_dict` 函数获取当前语言字典,并提供了更新语言的功能。\n\n :ivar _lang_dict: 当前使用的语言字典。\n :vartype _lang_dict: dict\n \"\"\"\n lang_updated = pyqtSignal()\n\n def __init__(self):\n super().__init__()\n self._lang_dict = get_lang_dict()\n\n def get_lang(self) -> Optional[Dict[str, str]]:\n \"\"\"\n 获取当前使用的语言字典的副本。\n\n :return: 当前语言字典的深拷贝。\n :rtype: Optional[Dict[str, str]]\n \"\"\"\n try:\n return copy.deepcopy(self._lang_dict)\n except Exception:\n logger.exception(\"Failed to retrieve language dictionary.\")\n return None\n\n def update_lang(self, new_lang: str) -> None:\n \"\"\"\n 更新当前使用的语言字典。\n\n :param new_lang: 新语言的标识符。\n :type new_lang: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n self._lang_dict = LANG_DICTS.get(new_lang, \"English\")\n self.lang_updated.emit()\n logger.info(f\"Language changed to {new_lang}\")\n except Exception:\n logger.exception(f\"Failed to changed language to {new_lang}\")" }, { "identifier": "message_show", "path": "ui/message_show.py", "snippet": "def message_show(message_type: str,\n text: str) -> None:\n \"\"\"\n 显示指定类型的消息框。\n\n 根据提供的消息类型和文本内容,显示相应的消息框。支持的消息类型包括 'Critical'、'Warning' 和 'Information'。\n\n :param message_type: 消息类型,支持 'Critical'、'Warning' 和 'Information'。\n :type message_type: str\n :param text: 消息框中显示的文本内容。\n :type text: str\n :return: 无返回值。\n :rtype: None\n \"\"\"\n try:\n msg_box = QMessageBox()\n msg_box.setText(text)\n msg_box.setStandardButtons(QMessageBox.Ok)\n msg_box.setWindowTitle(message_type)\n\n if message_type == 'Critical':\n msg_box.setIcon(QMessageBox.Critical)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-error-26')))\n elif message_type == 'Warning':\n msg_box.setIcon(QMessageBox.Warning)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-do-not-disturb-26')))\n elif message_type == 'Information':\n msg_box.setIcon(QMessageBox.Information)\n msg_box.setWindowIcon(QIcon(get_resource_path('media/icons8-about-26')))\n else:\n logger.warning(\"Invalid message type provided.\")\n\n msg_box.exec_()\n except Exception:\n logger.exception(\"An error occurred while displaying the message box\")" }, { "identifier": "TableMain", "path": "ui/table_main.py", "snippet": "class TableMain(QTableWidget):\n \"\"\"\n 主表格类,用于展示和管理数据行。\n\n 此类继承自 PyQt5 的 QTableWidget,提供了丰富的数据展示和管理功能。包括但不限于数据的展示、行的颜色标记、右键菜单功能以及快捷键支持。\n 通过与 LangManager 和 ConfigManager 的集成,支持动态语言切换和配置管理。\n\n :param lang_manager: 用于管理界面语言的 LangManager 实例。\n :type lang_manager: LangManager\n :param config_manager: 用于管理配置的 ConfigManager 实例。\n :type config_manager: ConfigManager\n\n :author: assassing\n :contact: https://github.com/hxz393\n :copyright: Copyright 2023, hxz393. 保留所有权利。\n \"\"\"\n status_updated = pyqtSignal(str)\n filter_updated = pyqtSignal(list)\n\n def __init__(self,\n lang_manager: LangManager,\n config_manager: ConfigManager):\n super().__init__()\n self.lang_manager = lang_manager\n self.lang_manager.lang_updated.connect(self.update_lang)\n self.config_manager = config_manager\n # 实例化用到的组件\n self.actionCopy = ActionCopy(self.lang_manager, self)\n self.actionSave = ActionSave(self.lang_manager, self)\n self.actionSkip = ActionSkip(self.lang_manager, self.config_manager, self)\n self.actionUnskip = ActionUnskip(self.lang_manager, self.config_manager, self)\n # 手动连接实例化的组件信号到转发函数\n self.actionCopy.status_updated.connect(self.forward_status)\n self.actionSave.status_updated.connect(self.forward_status)\n self.actionSkip.status_updated.connect(self.forward_status)\n self.actionSkip.filter_updated.connect(self.forward_filter)\n self.actionUnskip.status_updated.connect(self.forward_status)\n self.actionUnskip.filter_updated.connect(self.forward_filter)\n self.initUI()\n\n def initUI(self) -> None:\n \"\"\"\n 初始化用户界面。\n\n 此方法负责设置表格的基本属性,如列数、表头标签、选择行为等。还包括对特定列的隐藏和宽度调整策略的设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 先运行语言更新,里面有表头定义\n self.update_lang()\n self.hidden_cols = [\"pro_time\", \"pre_time\", \"test_time\", \"dev_time\"]\n self.resize_cols = [\"name\", \"group\", \"consistency\", \"skip\"]\n # 配置表格基本属性\n self.setColumnCount(len(self.column_headers))\n self.setHorizontalHeaderLabels(self.column_headers)\n self.setEditTriggers(QTableWidget.NoEditTriggers)\n self.setSelectionBehavior(QTableWidget.SelectItems)\n # 隐藏垂直表头\n self.verticalHeader().setVisible(False)\n # 启用自动换行,没生效\n self.setWordWrap(True)\n self.setTextElideMode(Qt.ElideNone)\n # 为表头视图设置上下文菜单事件\n self.horizontalHeader().setContextMenuPolicy(Qt.CustomContextMenu)\n self.horizontalHeader().customContextMenuRequested.connect(self._header_context_menu)\n # 为表单设置上下文菜单事件\n self.setContextMenuPolicy(Qt.CustomContextMenu)\n self.customContextMenuRequested.connect(self._cell_context_menu)\n # 隐藏指定列\n [self.hideColumn(COL_INFO[i]['col']) for i in self.hidden_cols]\n # 设置表宽度策略\n self.set_header_resize()\n\n def set_header_resize(self):\n \"\"\"\n 设置表头的列宽度和调整策略。\n\n 此方法负责定义表头列的宽度调整策略和其他相关属性。它设置了表头列的默认宽度、是否可拖动以及列的自动调整策略。\n 例如,某些列被设置为根据内容自动调整宽度,而其他列则被设置为可伸缩以适应表格的大小。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n # 设置默认列宽度,列宽调整策略,列可拖动\n self.horizontalHeader().setSectionsMovable(True)\n self.horizontalHeader().setSectionResizeMode(QHeaderView.Stretch)\n self.horizontalHeader().setMinimumSectionSize(100)\n # 设置要自动调整宽度的列\n [self.horizontalHeader().setSectionResizeMode(COL_INFO[i]['col'], QHeaderView.ResizeToContents) for i in self.resize_cols]\n\n def update_lang(self) -> None:\n \"\"\"\n 更新界面语言设置。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.lang = self.lang_manager.get_lang()\n self.column_headers = [\n self.lang['ui.table_main_1'],\n self.lang['ui.table_main_2'],\n self.lang['ui.table_main_3'],\n self.lang['ui.dialog_settings_connection_2'],\n f\"{self.lang['ui.dialog_settings_connection_2']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_3'],\n f\"{self.lang['ui.dialog_settings_connection_3']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_4'],\n f\"{self.lang['ui.dialog_settings_connection_4']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.dialog_settings_connection_5'],\n f\"{self.lang['ui.dialog_settings_connection_5']} {self.lang['ui.table_main_4']}\",\n self.lang['ui.table_main_5'],\n self.lang['ui.table_main_6'],\n ]\n # 重新应用到表头\n self.setHorizontalHeaderLabels(self.column_headers)\n # 定义数据和显示映射的字典\n consistency_status_mapping = {\n \"inconsistent\": self.lang['ui.action_start_8'],\n \"fully\": self.lang['ui.action_start_9'],\n \"partially\": self.lang['ui.action_start_10'],\n \"unknown\": self.lang['ui.action_start_13'],\n }\n skip_status_mapping = {\n \"no\": self.lang['ui.action_start_11'],\n \"yes\": self.lang['ui.action_start_12'],\n \"unknown\": self.lang['ui.action_start_13'],\n }\n for row in range(self.rowCount()):\n # 更新忽略状态文字\n self._update_item_text(row, \"skip\", skip_status_mapping)\n # 更新一致性状态文字\n self._update_item_text(row, \"consistency\", consistency_status_mapping)\n\n def _update_item_text(self,\n row: int,\n user_data_key: str,\n text_mapping: Dict[str, str]) -> None:\n \"\"\"\n 根据提供的文本映射更新指定行的项文本。\n\n 此方法用于更新表格或列表中特定行的文本。它根据用户数据键(user_data_key)获取对应行的项,然后根据提供的文本映射(text_mapping)更新该项的文本。\n\n :param row: 要更新的行索引。\n :type row: int\n :param user_data_key: 用于获取项的用户数据键。\n :type user_data_key: str\n :param text_mapping: 用户数据到文本的映射字典。\n :type text_mapping: Dict[str, str]\n\n :return: 无返回值。\n :rtype: None\n \"\"\"\n item = self.item(row, COL_INFO[user_data_key]['col'])\n if item is not None:\n user_data = item.data(Qt.UserRole)\n if user_data in text_mapping:\n item.setText(text_mapping[user_data])\n\n def keyPressEvent(self, event: QKeyEvent) -> None:\n \"\"\"\n 处理键盘事件。\n\n 此方法用于处理键盘事件,特别是复制功能的快捷键。如果按下 Ctrl+C,则复制选中的单元格内容。\n\n :param event: 键盘事件对象。\n :type event: QKeyEvent\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n if event.key() == Qt.Key_C and (event.modifiers() & Qt.ControlModifier):\n self.actionCopy.action_copy()\n else:\n super().keyPressEvent(event)\n\n def _cell_context_menu(self, pos: QPoint) -> None:\n \"\"\"\n 实现表格单元格的右键菜单功能。\n\n :param pos: 右键点击的位置。\n :type pos: QPoint\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n menu = QMenu(self)\n menu.addAction(self.actionCopy.action_copy)\n separator = QAction(menu)\n separator.setSeparator(True)\n menu.addAction(separator)\n menu.addAction(self.actionSkip.action_skip)\n menu.addAction(self.actionUnskip.action_unskip)\n sep = QAction(menu)\n sep.setSeparator(True)\n menu.addAction(sep)\n menu.addAction(self.actionSave.action_save)\n menu.exec_(self.viewport().mapToGlobal(pos))\n\n def _header_context_menu(self, pos: QPoint) -> None:\n \"\"\"\n 实现表头的右键菜单功能。\n\n :param pos: 右键点击的位置。\n :type pos: QPoint\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n menu = QMenu(self)\n # 动态创建一个菜单项,用于隐藏/显示列\n for index in range(self.columnCount()):\n column_name = self.horizontalHeaderItem(index).text()\n action = menu.addAction(f\"{column_name}\")\n action.setCheckable(True)\n action.setChecked(not self.isColumnHidden(index))\n action.setData(index)\n action.triggered.connect(self._toggle_column_visibility)\n # 在鼠标右键点击位置显示菜单\n menu.exec_(self.horizontalHeader().viewport().mapToGlobal(pos))\n\n def _toggle_column_visibility(self) -> None:\n \"\"\"\n 根据用户选择,切换列的可见性。\n\n 此方法用于根据用户在上下文菜单中的选择,显示或隐藏特定的列。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n action = self.sender()\n if isinstance(action, QAction):\n column_index = action.data()\n if action.isChecked():\n self.showColumn(column_index)\n else:\n self.hideColumn(column_index)\n\n def add_row(self, data: List[List[str]]) -> None:\n \"\"\"\n 向表格中添加一行数据。\n\n :param data: 要添加的数据列表,每个元素是一个列表,第一个元素代表显示的字符串,第二个元素代表附加数据。\n :type data: List[List[str]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n row_position = 0\n try:\n # 获取最后行数\n row_position = self.rowCount()\n # 插入最后一行\n self.insertRow(row_position)\n # 插入单元格数据\n self._fill_row_data(row_position, data)\n except Exception:\n logger.exception(f\"Error occurred while adding a new row at position {row_position}\")\n self.removeRow(row_position)\n\n def _fill_row_data(self,\n row_position: int,\n data: List[List[str]]) -> None:\n \"\"\"\n 填充指定行的数据。\n\n :param row_position: 行位置\n :param data: 行数据\n :type row_position: int\n :type data: List[List[str]]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n for column, (display_text, user_data) in enumerate(data):\n # 默认设置显示字符串,也叫 Qt.DisplayRole。获取方法item.text() 或 item.data(Qt.DisplayRole)\n item = QTableWidgetItem(str(display_text))\n # 设置实际数据,也叫 Qt.UserRole。获取方法 item.data(Qt.UserRole)\n item.setData(Qt.UserRole, user_data)\n # 设置单元格不可编辑状态\n item.setFlags(item.flags() & ~Qt.ItemIsEditable)\n # 正常表格插入方法\n self.setItem(row_position, column, item)\n\n @log_time\n def apply_color_to_table(self, rows: List[int] = None) -> None:\n \"\"\"\n 对整个表格进行着色。通常只有初始化时才不带rows参数,以应用到整表。\n\n :param rows: 可选,要应用颜色的行号列表。\n :type rows: List[int], optional\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n color_switch = self.config_manager.get_config_main().get('color_set', 'ON')\n if color_switch == 'OFF':\n return\n\n if rows is None or not isinstance(rows, list):\n rows = range(self.rowCount())\n\n try:\n for row in rows:\n # 不给隐藏行设置颜色\n if self.isRowHidden(row):\n continue\n\n self._process_row_for_color(row)\n except Exception:\n logger.exception(\"Exception in apply_color_to_table method\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def _process_row_for_color(self, row: int) -> None:\n \"\"\"\n 根据一致性、跳过状态和是否为空值给单行应用颜色。\n\n :param row: 行号,对每行进行颜色处理。\n :type row: int\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n consistency_data = self.item(row, COL_INFO['consistency']['col']).data(Qt.UserRole)\n skip_data = self.item(row, COL_INFO['skip']['col']).data(Qt.UserRole)\n # 忽略状态为是时设置颜色\n if skip_data == 'yes':\n self.apply_color(row, COLOR_SKIP)\n return\n\n # 根据一致性值设置颜色\n if consistency_data == 'fully':\n self.apply_color(row, COLOR_CONSISTENCY_FULLY)\n elif consistency_data == 'partially':\n self.apply_color(row, COLOR_CONSISTENCY_PARTIALLY)\n else:\n self.apply_color(row, COLOR_DEFAULT)\n\n # 遍历指定列检查空值,并赋予颜色\n for column in range(self.columnCount()):\n # 不给隐藏列设置颜色\n if not self.isColumnHidden(column):\n if self.item(row, column).text() == 'None':\n self.apply_color(row, COLOR_EMPTY, column)\n\n def apply_color(self,\n row: int,\n color: str,\n column: Optional[int] = None) -> None:\n \"\"\"\n 为指定的行或单元格应用颜色。\n\n :param row: 要着色的行索引。\n :type row: int\n :param color: 要应用的颜色。\n :type color: str\n :param column: 可选,指定要着色的列索引,如果未指定,则对整行应用颜色。\n :type column: int, optional\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n color_brush = QBrush(QColor(color))\n if column is not None:\n self.item(row, column).setBackground(color_brush)\n else:\n for col in range(self.columnCount()):\n # 不给隐藏列设置颜色\n if not self.isColumnHidden(col):\n self.item(row, col).setBackground(color_brush)\n except Exception:\n logger.exception(\"Error occurred while applying color to a cell\")\n self.status_updated.emit(self.lang['label_status_error'])\n\n def clear(self) -> None:\n \"\"\"\n 清空表格中的所有行。\n\n 此方法用于清除表格中的所有数据,通常在数据更新或重置时使用。\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n try:\n # 禁用更新以提高性能\n self.setUpdatesEnabled(False)\n # 首先清除所有单元格的内容\n self.clearContents()\n # 将行数设置为0,从而删除所有行\n self.setRowCount(0)\n except Exception:\n logger.exception(\"Error occurred while clearing the table.\")\n self.status_updated.emit(self.lang['label_status_error'])\n finally:\n # 确保即使发生错误也要重新启用更新\n self.setUpdatesEnabled(True)\n\n def forward_status(self, message: str) -> None:\n \"\"\"\n 用于转发状态信号。\n\n :param message: 要转发的消息。\n :type message: str\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.status_updated.emit(message)\n\n def forward_filter(self, rows: List[int]) -> None:\n \"\"\"\n 用于转发过滤信号。\n\n :param rows: 要转发的行列表。\n :type rows: List[int]\n\n :rtype: None\n :return: 无返回值。\n \"\"\"\n self.filter_updated.emit(rows)\n\n def get_table_data(self) -> Dict[int, Dict[str, str]]:\n \"\"\"\n 用于获取表格所有数据。\n\n :rtype: Dict[int, Dict[str, str]]\n :return: 返回嵌套字典。键为行号,值为字典,字典中键为列标题,值为内容。类似于:{882: {'服务': 'web', '分组': 'application'}, 883: {'服务': 'web', '分组': 'application'}}\n \"\"\"\n return {row: {self.horizontalHeaderItem(col).text(): self.item(row, col).data(Qt.UserRole)\n for col in range(self.columnCount())}\n for row in range(self.rowCount())}" } ]
import logging from typing import Dict, List from PyQt5.QtCore import Qt, QThread, pyqtSignal, QObject from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QAction, QHeaderView from config.settings import COL_INFO from lib.get_resource_path import get_resource_path from module.execute_queries import execute_queries from ui.config_manager import ConfigManager from ui.filter_bar import FilterBar from ui.lang_manager import LangManager from ui.message_show import message_show from ui.table_main import TableMain
15,024
def initialize(self) -> None: """ 初始化界面和状态,在开始操作前执行。 此方法用于设置 UI 元素的初始状态,如禁用按钮、清空表格等。 :rtype: None :return: 无返回值。 """ logger.info('Start running') # 状态栏发送提示消息 self.status_updated.emit(self.lang['ui.action_start_3']) # 开始按钮不可点击 self.action_start.setEnabled(False) # 禁用表格排序 self.table.setSortingEnabled(False) # 禁用表格更新 self.table.setUpdatesEnabled(False) # 禁用过滤栏组件 self.filter_bar.filter_app_box.setEnabled(False) self.filter_bar.filter_table_box.setEnabled(False) self.filter_bar.filter_table_check_box.setEnabled(False) self.filter_bar.filter_value_box.setEnabled(False) self.filter_bar.filter_value_button.setEnabled(False) self.filter_bar.filter_reset_button.setEnabled(False) # 清空表格数据 self.table.clear() # 初始化表宽 self.table.set_header_resize() logger.debug('Initialization finished') def table_insert(self, table_rows: List[List[List[str]]]) -> None: """ 将查询结果插入到主表格中。 此方法接收查询结果作为输入,并将其格式化后插入到应用程序的主表格中。每个元素是一个三重列表,表示表格的一行数据。 :param table_rows: 待插入的表格数据,每个元素代表一行数据。 :type table_rows: List[List[List[str]]] :rtype: None :return: 无返回值。 """ for row in table_rows: self.table.add_row(row) logger.debug('Table filling finished.') def table_column_hide(self, query_statuses: Dict[str, bool]) -> None: """ 根据查询状态决定是否隐藏表格的某些列。 :param query_statuses: 各查询状态的字典,键为环境名,值为布尔值指示是否开启。 :type query_statuses: Dict[str, bool] :rtype: None :return: 无返回值。 """ # env_name类似'PRO_CONFIG',COL_INFO中的键类似'pro_value',env_switch是布尔值。 for env_name, env_switch in query_statuses.items(): # 建立env_name和COL_INFO中的键的映射 column_name_mapping = {'PRO_CONFIG': 'pro_value', 'PRE_CONFIG': 'pre_value', 'TEST_CONFIG': 'test_value', 'DEV_CONFIG': 'dev_value'} # 获取列序号 col = COL_INFO[column_name_mapping[env_name]]['col'] # 根据环境开关,决定列是否隐藏。 self.table.showColumn(col) if env_switch else self.table.hideColumn(col) def finalize(self) -> None: """ 完成查询后的收尾工作,包括重新启用表格排序和更新等。 :rtype: None :return: 无返回值。 """ # 先应用颜色和过滤器 self.table.apply_color_to_table() self.filter_bar.filter_table() # 启动排序 self.table.setSortingEnabled(True) # 默认按第一列升序排序 self.table.sortByColumn(0, Qt.AscendingOrder) # 允许用户调整列宽 self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Interactive) # 更新过滤器,过滤服务中插入值 self.filter_bar.filter_options_add() # 调用过滤器 self.filter_bar.highlight_rows.clear() self.filter_bar.filter_table() # 启用表格更新 self.table.setUpdatesEnabled(True) # 启用过滤栏组件 self.filter_bar.filter_app_box.setEnabled(True) self.filter_bar.filter_table_box.setEnabled(True) self.filter_bar.filter_table_check_box.setEnabled(True) self.filter_bar.filter_value_box.setEnabled(True) self.filter_bar.filter_value_button.setEnabled(True) self.filter_bar.filter_reset_button.setEnabled(True) def show_result_message(self, result: str) -> None: """ 显示结果消息。 根据运行结果,显示不同的状态消息或错误信息。 :param result: 运行结果的描述。 :type result: str :rtype: None :return: 无返回值。 """ self.action_start.setEnabled(True) if result == 'done': logger.info('Run Completed') else: message = { 'no query result': ('Warning', self.lang['ui.action_start_4']), 'prepare table rows failed': ('Warning', self.lang['ui.action_start_6']), 'run error': ('Critical', self.lang['ui.action_start_7']) }.get(result) if message:
""" 提供应用程序的主要功能,包括用户界面初始化、数据库查询执行、数据展示和处理。 本模块中包含的类负责应用程序的主要操作流程,如用户界面的初始化、按钮动作的处理、后台数据查询、数据展示等。主要类包括`ActionStart`和`StartWork`,分别负责处理用户界面动作和执行后台工作。 :author: assassing :contact: https://github.com/hxz393 :copyright: Copyright 2023, hxz393. 保留所有权利。 """ logger = logging.getLogger(__name__) class ActionStart(QObject): """ 负责处理用户界面动作,例如初始化界面、响应按钮点击等。 此类包含了界面的主要动作逻辑,如开始按钮的点击处理、用户界面语言的更新、表格的数据填充等。它与后台线程`StartWork`协作,实现数据的查询和展示。 :param lang_manager: 语言管理器,用于界面语言的加载和更新。 :param config_manager: 配置管理器,提供应用程序的配置信息。 :param table: 主表格界面,用于数据的展示。 :param filter_bar: 过滤条,用于数据的筛选。 :type lang_manager: LangManager :type config_manager: ConfigManager :type table: TableMain :type filter_bar: FilterBar """ status_updated = pyqtSignal(str) def __init__(self, lang_manager: LangManager, config_manager: ConfigManager, table: TableMain, filter_bar: FilterBar): super().__init__() # 实例化组件 self.lang_manager = lang_manager self.lang_manager.lang_updated.connect(self.update_lang) self.config_manager = config_manager self.table = table self.filter_bar = filter_bar self.initUI() def initUI(self) -> None: """ 初始化用户界面。 创建并配置界面中的开始动作按钮,包括图标、快捷键和触发事件。 :rtype: None :return: 无返回值。 """ self.action_start = QAction(QIcon(get_resource_path('media/icons8-start-26.png')), 'Start') self.action_start.setShortcut('F10') self.action_start.triggered.connect(self.start) self.update_lang() def update_lang(self) -> None: """ 更新界面语言设置。 :rtype: None :return: 无返回值。 """ self.lang = self.lang_manager.get_lang() self.action_start.setText(self.lang['ui.action_start_1']) self.action_start.setStatusTip(self.lang['ui.action_start_2']) def start(self) -> None: """ 启动更新动作的处理流程。 此方法负责初始化和启动一个后台线程 `StartWork`,该线程执行数据查询和表格更新。同时,该方法还负责连接信号和槽以进行 UI 更新。 :rtype: None :return: 无返回值。 """ try: # 初始化子线程,传入语言字典和配置 self.start_work = StartWork(self.lang, self.config_manager) # 连接信号槽,都是 UI 操作,必须主线程中进行 self.start_work.initialize_signal.connect(self.initialize) self.start_work.table_insert_signal.connect(self.table_insert) self.start_work.table_column_hide_signal.connect(self.table_column_hide) self.start_work.finalize_signal.connect(self.finalize) self.start_work.message.connect(self.show_result_message) # 开始运行 self.start_work.start() except Exception: logger.exception('Failed to initiate start action.') self.status_updated.emit(self.lang['label_status_error']) def initialize(self) -> None: """ 初始化界面和状态,在开始操作前执行。 此方法用于设置 UI 元素的初始状态,如禁用按钮、清空表格等。 :rtype: None :return: 无返回值。 """ logger.info('Start running') # 状态栏发送提示消息 self.status_updated.emit(self.lang['ui.action_start_3']) # 开始按钮不可点击 self.action_start.setEnabled(False) # 禁用表格排序 self.table.setSortingEnabled(False) # 禁用表格更新 self.table.setUpdatesEnabled(False) # 禁用过滤栏组件 self.filter_bar.filter_app_box.setEnabled(False) self.filter_bar.filter_table_box.setEnabled(False) self.filter_bar.filter_table_check_box.setEnabled(False) self.filter_bar.filter_value_box.setEnabled(False) self.filter_bar.filter_value_button.setEnabled(False) self.filter_bar.filter_reset_button.setEnabled(False) # 清空表格数据 self.table.clear() # 初始化表宽 self.table.set_header_resize() logger.debug('Initialization finished') def table_insert(self, table_rows: List[List[List[str]]]) -> None: """ 将查询结果插入到主表格中。 此方法接收查询结果作为输入,并将其格式化后插入到应用程序的主表格中。每个元素是一个三重列表,表示表格的一行数据。 :param table_rows: 待插入的表格数据,每个元素代表一行数据。 :type table_rows: List[List[List[str]]] :rtype: None :return: 无返回值。 """ for row in table_rows: self.table.add_row(row) logger.debug('Table filling finished.') def table_column_hide(self, query_statuses: Dict[str, bool]) -> None: """ 根据查询状态决定是否隐藏表格的某些列。 :param query_statuses: 各查询状态的字典,键为环境名,值为布尔值指示是否开启。 :type query_statuses: Dict[str, bool] :rtype: None :return: 无返回值。 """ # env_name类似'PRO_CONFIG',COL_INFO中的键类似'pro_value',env_switch是布尔值。 for env_name, env_switch in query_statuses.items(): # 建立env_name和COL_INFO中的键的映射 column_name_mapping = {'PRO_CONFIG': 'pro_value', 'PRE_CONFIG': 'pre_value', 'TEST_CONFIG': 'test_value', 'DEV_CONFIG': 'dev_value'} # 获取列序号 col = COL_INFO[column_name_mapping[env_name]]['col'] # 根据环境开关,决定列是否隐藏。 self.table.showColumn(col) if env_switch else self.table.hideColumn(col) def finalize(self) -> None: """ 完成查询后的收尾工作,包括重新启用表格排序和更新等。 :rtype: None :return: 无返回值。 """ # 先应用颜色和过滤器 self.table.apply_color_to_table() self.filter_bar.filter_table() # 启动排序 self.table.setSortingEnabled(True) # 默认按第一列升序排序 self.table.sortByColumn(0, Qt.AscendingOrder) # 允许用户调整列宽 self.table.horizontalHeader().setSectionResizeMode(QHeaderView.Interactive) # 更新过滤器,过滤服务中插入值 self.filter_bar.filter_options_add() # 调用过滤器 self.filter_bar.highlight_rows.clear() self.filter_bar.filter_table() # 启用表格更新 self.table.setUpdatesEnabled(True) # 启用过滤栏组件 self.filter_bar.filter_app_box.setEnabled(True) self.filter_bar.filter_table_box.setEnabled(True) self.filter_bar.filter_table_check_box.setEnabled(True) self.filter_bar.filter_value_box.setEnabled(True) self.filter_bar.filter_value_button.setEnabled(True) self.filter_bar.filter_reset_button.setEnabled(True) def show_result_message(self, result: str) -> None: """ 显示结果消息。 根据运行结果,显示不同的状态消息或错误信息。 :param result: 运行结果的描述。 :type result: str :rtype: None :return: 无返回值。 """ self.action_start.setEnabled(True) if result == 'done': logger.info('Run Completed') else: message = { 'no query result': ('Warning', self.lang['ui.action_start_4']), 'prepare table rows failed': ('Warning', self.lang['ui.action_start_6']), 'run error': ('Critical', self.lang['ui.action_start_7']) }.get(result) if message:
message_show(*message)
6
2023-11-07 01:02:38+00:00
24k
google-research/semivl
model/builder.py
[ { "identifier": "TIMMVisionTransformer", "path": "model/backbone/timm_vit.py", "snippet": "class TIMMVisionTransformer(nn.Module):\n\n def __init__(\n self,\n variant,\n timm_load_pretrained,\n drop_path_rate,\n img_size,\n out_indices,\n ):\n super(TIMMVisionTransformer, self).__init__()\n self.m = timm.create_model(\n variant,\n pretrained=timm_load_pretrained,\n drop_path_rate=drop_path_rate,\n img_size=img_size,\n )\n self.patch_size = self.m.patch_embed.patch_size\n self.img_size = img_size\n self.out_indices = out_indices\n assert max(self.out_indices) <= 11\n\n def forward_features(self, x):\n feats = []\n x = self.m.patch_embed(x)\n x = self.m._pos_embed(x)\n if self.m.grad_checkpointing and not torch.jit.is_scripting():\n raise ValueError(self.m.grad_checkpointing)\n # x = checkpoint_seq(self.blocks, x)\n else:\n for i, block in enumerate(self.m.blocks):\n x = block(x)\n if i in self.out_indices:\n out = self.m.norm(x)\n feats.append(out)\n x = self.m.norm(x)\n return x, feats\n\n def forward(self, x: torch.Tensor):\n if x.shape[-2] != self.m.patch_embed.img_size[0] or x.shape[-1] != self.m.patch_embed.img_size[1]:\n assert not self.training\n x = F.interpolate(x, size=self.img_size, mode='bilinear', align_corners=False)\n B, _, H, W = x.shape\n H = H // self.patch_size[0]\n W = W // self.patch_size[1]\n\n x, feats = self.forward_features(x)\n outs = [\n tuple([f[:, 1:].reshape(B, H, W, -1).permute(0, 3, 1, 2) for f in feats]),\n x[:, 0], # cls_token\n ]\n\n return outs" }, { "identifier": "DLV3PHead", "path": "model/decode_heads/dlv3p_head.py", "snippet": "class DLV3PHead(BaseDecodeHead):\n\n def __init__(self, c1_in_channels, c1_channels, dilations, img_size, **kwargs):\n super(DLV3PHead, self).__init__(**kwargs)\n self.image_size = img_size\n self.aspp = ASPPModule(self.in_channels, dilations)\n self.c1_proj = nn.Sequential(\n nn.Conv2d(c1_in_channels, c1_channels, 1, bias=False),\n nn.BatchNorm2d(c1_channels),\n nn.ReLU(True))\n fuse_channels = self.in_channels // 8 + c1_channels\n self.head = nn.Sequential(\n nn.Conv2d(fuse_channels, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n nn.Conv2d(256, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n nn.Conv2d(256, self.num_classes, 1, bias=True))\n self.conv_seg = None\n\n def forward(self, inputs, force_output_pred_masks=False):\n if force_output_pred_masks:\n inputs = inputs[0][0]\n assert len(inputs) == 2\n c1, c4 = inputs[0], inputs[1]\n\n c4 = self.aspp(c4)\n c1 = self.c1_proj(c1)\n c4 = F.interpolate(c4, size=c1.shape[-2:], mode=\"bilinear\", align_corners=self.align_corners)\n x = torch.cat([c1, c4], dim=1)\n out = self.head(x)\n\n if force_output_pred_masks:\n out = F.interpolate(out, size=(self.image_size, self.image_size),\n mode='bilinear', align_corners=self.align_corners)\n out = {\"pred_masks\": out}\n\n return out" }, { "identifier": "VLGHead", "path": "model/decode_heads/vlg_head.py", "snippet": "class VLGHead(nn.Module):\n def __init__(self,\n img_size,\n num_classes,\n text_in_channels,\n text_channels,\n up_channels,\n skip_in_channels,\n skip_channels,\n skip_from_conv_feat,\n num_layers,\n num_heads,\n channels,\n pool_size,\n conv1_ksize,\n loss_decode,\n align_corners,\n ) -> None:\n super().__init__()\n self.image_size = img_size\n self.num_classes = num_classes\n self.align_corners = align_corners\n self.text_in_channels = text_in_channels\n self.num_layers = num_layers\n self.channels = channels\n self.skip_from_conv_feat = skip_from_conv_feat\n assert loss_decode is None\n\n self.conv1 = nn.Conv2d(1, channels, kernel_size=conv1_ksize, stride=1, padding=(conv1_ksize-1)//2)\n self.aspp = ASPPModule(channels)\n self.layers = nn.ModuleList([\n SemanticTransformer(\n channels=channels, text_channels=text_channels, num_heads=num_heads, pool_size=pool_size\n ) for _ in range(num_layers)\n ])\n\n self.text_proj = nn.Sequential(\n nn.Linear(text_in_channels, text_channels),\n nn.ReLU())\n\n self.skip_proj = nn.ModuleList([\n nn.Sequential(\n nn.Conv2d(sic, sc, kernel_size=3, stride=1, padding=1),\n nn.ReLU(),\n ) for sic, sc in zip(skip_in_channels, skip_channels)\n ])\n\n self.up1 = Up(channels, up_channels[0], skip_channels[0])\n self.up2 = Up(up_channels[0], up_channels[1], skip_channels[1])\n self.head = nn.Conv2d(up_channels[1], 1, kernel_size=3, stride=1, padding=1)\n\n def forward(self, inputs, force_output_pred_masks=False):\n inputs_both = inputs\n img_feat_pyramid = inputs_both[0][0]\n img_feats = img_feat_pyramid[-1]\n if self.skip_from_conv_feat:\n conv_feats = inputs_both[2]\n if len(img_feat_pyramid) > 1:\n skip_feats = [\n *img_feat_pyramid[:-1][::-1],\n *conv_feats[::-1],\n ]\n else:\n skip_feats = conv_feats[::-1]\n assert len(self.skip_proj) == len(skip_feats)\n else:\n skip_feats = img_feat_pyramid[:-1][::-1]\n text_feats = inputs_both[1]\n\n text_feats = text_feats.repeat(img_feats.shape[0], 1, 1).float()\n B, C, H, W = img_feats.shape\n assert list(text_feats.shape) == [B, self.num_classes, C]\n\n # Compute Similarity Map\n img_feats = F.normalize(img_feats, dim=1)\n text_feats = F.normalize(text_feats, dim=-1)\n x = torch.einsum('bchw, bnc -> bnhw', img_feats, text_feats)\n\n # Spatial Reasoning\n x = rearrange(x, 'b n h w -> (b n) () h w')\n x = self.conv1(x)\n x = self.aspp(x)\n x = rearrange(x, '(b n) c h w -> b c n h w', b=B)\n\n # Semantic Reasoning\n if self.text_proj is not None:\n text_feats = self.text_proj(text_feats)\n\n for layer in self.layers:\n x = layer(x, text_feats)\n\n # Upsampling\n if self.skip_proj is not None:\n skip_feats = [proj(f) for proj, f in zip(self.skip_proj, skip_feats)]\n\n x = rearrange(x, 'b c n h w -> (b n) c h w')\n x = self.up1(x, skip_feats[0])\n x = self.up2(x, skip_feats[1])\n x = self.head(x)\n x = rearrange(x, '(b n) () h w -> b n h w', b=B)\n\n if x.shape[1] != self.num_classes:\n cls2con = get_class_to_concept_idxs(self.load_text_embedding)\n x = aggregate_concept_predictions(x, cls2con)\n\n if force_output_pred_masks:\n x = F.interpolate(x, size=(self.image_size, self.image_size),\n mode='bilinear', align_corners=self.align_corners)\n x = {\"pred_masks\": x}\n\n return x" }, { "identifier": "VLM", "path": "model/vlm.py", "snippet": "class VLM(EncoderDecoder):\n def __init__(self,\n freeze_backbone=False,\n exclude_keys=None,\n load_text_embedding=None,\n load_mcc_text_embedding=None,\n load_pl_text_embedding=None,\n clip_encoder=None,\n conv_encoder=None,\n maskclip_class_filter=None,\n maskclip_trust_head=None,\n renorm_clip_img=False,\n **args):\n super(VLM, self).__init__(**args)\n assert load_text_embedding == load_pl_text_embedding\n assert maskclip_class_filter is None\n assert maskclip_trust_head is None\n self.local_iter = 0\n\n self.clip_encoder = None\n if clip_encoder is not None:\n self.clip_encoder = builder.build_backbone(clip_encoder)\n self.conv_encoder = None\n if conv_encoder is not None:\n self.conv_encoder = builder.build_backbone(conv_encoder)\n\n self.load_text_embedding = load_text_embedding\n self.decode_head.load_text_embedding = load_text_embedding\n self.load_mcc_text_embedding = load_mcc_text_embedding\n self.renorm_clip_img = renorm_clip_img\n if renorm_clip_img:\n print('Renormalize clip image.')\n if self.load_mcc_text_embedding:\n self.loaded_mcc_text_feat = np.load(self.load_mcc_text_embedding)\n self.loaded_mcc_text_feat = torch.from_numpy(self.loaded_mcc_text_feat).float()\n else:\n raise NotImplementedError\n\n if freeze_backbone:\n self.freeze(self.backbone, exclude_keys=exclude_keys)\n\n def renormalize_img_for_clip(self, img):\n if not self.renorm_clip_img:\n return img\n loader_mean, loader_std = [0.485, 0.456, 0.406], [0.229, 0.224, 0.225]\n clip_mean, clip_std = [0.48145466, 0.4578275, 0.40821073], [0.26862954, 0.26130258, 0.27577711]\n loader_mean = torch.tensor(loader_mean, device=img.device).view(1, -1, 1, 1)\n loader_std = torch.tensor(loader_std, device=img.device).view(1, -1, 1, 1)\n clip_mean = torch.tensor(clip_mean, device=img.device).view(1, -1, 1, 1)\n clip_std = torch.tensor(clip_std, device=img.device).view(1, -1, 1, 1)\n return (img * loader_std + loader_mean - clip_mean) / clip_std\n\n def freeze(self, model, exclude_keys=None):\n for n, m in model.named_parameters():\n m.requires_grad = False\n if exclude_keys is not None:\n assert isinstance(exclude_keys, list)\n for k in exclude_keys:\n if str(k) in n:\n m.requires_grad = True\n print(f'Finetune {n}')\n \n def forward_maskclip(self, img, conf_tresh):\n img = self.renormalize_img_for_clip(img)\n self.clip_encoder.eval()\n with torch.no_grad():\n text_feat = self.loaded_mcc_text_feat.detach().to(img.device)\n visual_feat, _ = self.clip_encoder(img)\n visual_feat = visual_feat[-1]\n\n dense_pred = F.conv2d(visual_feat, text_feat[:, :, None, None])\n if dense_pred.shape[1] != self.num_classes:\n cls2con = get_class_to_concept_idxs(self.load_mcc_text_embedding)\n dense_pred = aggregate_concept_predictions(dense_pred, cls2con)\n assert dense_pred.shape[1] == self.num_classes\n dense_pred = F.interpolate(dense_pred, size=img.shape[-2:],\n mode='bilinear', align_corners=self.decode_head.align_corners)\n dense_pred = (100.0 * dense_pred).softmax(dim=1)\n dense_pred_certainty, dense_pred = dense_pred.max(dim=1)\n\n filtered_dense_pred = dense_pred.clone()\n filtered_dense_pred[dense_pred_certainty < conf_tresh] = 255\n return filtered_dense_pred\n\n def extract_feat(self, img):\n orig_img = img\n img = self.renormalize_img_for_clip(img)\n visual_feat = self.backbone(img)\n text_feat = np.load(self.load_text_embedding)\n text_feat = torch.from_numpy(text_feat).to(img.device)\n self.decode_head.load_text_embedding = self.load_text_embedding\n conv_feat = None\n if self.conv_encoder is not None:\n conv_feat = self.conv_encoder(orig_img)\n\n return [visual_feat, text_feat, conv_feat]\n\n def _decode_head_forward_test(self, x, img_metas):\n seg_logits = self.decode_head.forward(x, force_output_pred_masks=True)['pred_masks']\n return seg_logits" }, { "identifier": "MaskClipVisionTransformer", "path": "third_party/maskclip/models/backbones/maskclip_vit.py", "snippet": "class MaskClipVisionTransformer(BaseModule):\n \"\"\"Vision Transformer.\n\n This backbone is the implementation of `An Image is Worth 16x16 Words:\n Transformers for Image Recognition at\n Scale <https://arxiv.org/abs/2010.11929>`_.\n\n Args:\n img_size (int | tuple): Input image size. Default: 224.\n patch_size (int): The patch size. Default: 16.\n in_channels (int): Number of input channels. Default: 3.\n embed_dims (int): embedding dimension. Default: 768.\n num_layers (int): depth of transformer. Default: 12.\n num_heads (int): number of attention heads. Default: 12.\n mlp_ratio (int): ratio of mlp hidden dim to embedding dim.\n Default: 4.\n out_indices (list | tuple | int): Output from which stages.\n Default: -1.\n qkv_bias (bool): enable bias for qkv if True. Default: True.\n drop_rate (float): Probability of an element to be zeroed.\n Default 0.0\n attn_drop_rate (float): The drop out rate for attention layer.\n Default 0.0\n drop_path_rate (float): stochastic depth rate. Default 0.0\n with_cls_token (bool): Whether concatenating class token into image\n tokens as transformer input. Default: True.\n output_cls_token (bool): Whether output the cls_token. If set True,\n `with_cls_token` must be True. Default: False.\n norm_cfg (dict): Config dict for normalization layer.\n Default: dict(type='LN')\n act_cfg (dict): The activation config for FFNs.\n Default: dict(type='GELU').\n patch_norm (bool): Whether to add a norm in PatchEmbed Block.\n Default: False.\n final_norm (bool): Whether to add a additional layer to normalize\n final feature map. Default: False.\n interpolate_mode (str): Select the interpolate mode for position\n embeding vector resize. Default: bicubic.\n num_fcs (int): The number of fully-connected layers for FFNs.\n Default: 2.\n norm_eval (bool): Whether to set norm layers to eval mode, namely,\n freeze running stats (mean and var). Note: Effect on Batch Norm\n and its variants only. Default: False.\n with_cp (bool): Use checkpoint or not. Using checkpoint will save\n some memory while slowing down the training speed. Default: False.\n pretrained (str, optional): model pretrained path. Default: None.\n init_cfg (dict or list[dict], optional): Initialization config dict.\n Default: None.\n \"\"\"\n\n def __init__(self,\n img_size=224,\n patch_size=16,\n patch_bias=True,\n in_channels=3,\n embed_dims=768,\n num_layers=12,\n num_heads=12,\n mlp_ratio=4,\n out_indices=-1,\n qkv_bias=True,\n drop_rate=0.,\n attn_drop_rate=0.,\n drop_path_rate=0.,\n with_cls_token=True,\n output_cls_token=False,\n norm_cfg=dict(type='LN'),\n act_cfg=dict(type='GELU'),\n patch_norm=False,\n pre_norm=False,\n final_norm=False,\n return_qkv=False,\n return_clip_embed=False,\n skip_last_attn=False,\n interpolate_mode='bicubic',\n num_fcs=2,\n norm_eval=False,\n with_cp=False,\n pretrained=None,\n num_prompt_tokens=None,\n lora_layers=[],\n lora_r=4,\n lora_scaling=1,\n lora_dropout=0,\n lora_targets='qkvo',\n init_cfg=None):\n super(MaskClipVisionTransformer, self).__init__(init_cfg=init_cfg)\n\n if isinstance(img_size, int):\n img_size = to_2tuple(img_size)\n elif isinstance(img_size, tuple):\n if len(img_size) == 1:\n img_size = to_2tuple(img_size[0])\n assert len(img_size) == 2, \\\n f'The size of image should have length 1 or 2, ' \\\n f'but got {len(img_size)}'\n\n if output_cls_token:\n assert with_cls_token is True, f'with_cls_token must be True if' \\\n f'set output_cls_token to True, but got {with_cls_token}'\n\n assert not (init_cfg and pretrained), \\\n 'init_cfg and pretrained cannot be set at the same time'\n if isinstance(pretrained, str):\n warnings.warn('DeprecationWarning: pretrained is deprecated, '\n 'please use \"init_cfg\" instead')\n self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)\n elif pretrained is not None:\n raise TypeError('pretrained must be a str or None')\n\n self.img_size = img_size\n self.patch_size = patch_size\n self.interpolate_mode = interpolate_mode\n self.norm_eval = norm_eval\n self.with_cp = with_cp\n self.pretrained = pretrained\n self.num_prompt_tokens = num_prompt_tokens\n\n self.patch_embed = PatchEmbed(\n in_channels=in_channels,\n embed_dims=embed_dims,\n conv_type='Conv2d',\n kernel_size=patch_size,\n stride=patch_size,\n padding='corner',\n bias=patch_bias,\n norm_cfg=norm_cfg if patch_norm else None,\n init_cfg=None,\n )\n\n num_patches = (img_size[0] // patch_size) * \\\n (img_size[1] // patch_size)\n\n self.with_cls_token = with_cls_token\n self.output_cls_token = output_cls_token\n self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dims))\n self.pos_embed = nn.Parameter(\n torch.zeros(1, num_patches + 1, embed_dims))\n self.drop_after_pos = nn.Dropout(p=drop_rate)\n\n if out_indices is None:\n self.out_indices = [num_layers]\n elif isinstance(out_indices, int):\n if out_indices == -1:\n out_indices = num_layers - 1\n self.out_indices = [out_indices]\n elif isinstance(out_indices, list) or isinstance(out_indices, tuple):\n self.out_indices = out_indices\n else:\n raise TypeError('out_indices must be type of int, list or tuple')\n\n dpr = [\n x.item() for x in torch.linspace(0, drop_path_rate, num_layers)\n ] # stochastic depth decay rule\n\n self.layers = ModuleList()\n for i in range(num_layers):\n self.layers.append(\n TransformerEncoderLayer(\n embed_dims=embed_dims,\n num_heads=num_heads,\n feedforward_channels=mlp_ratio * embed_dims,\n attn_drop_rate=attn_drop_rate,\n drop_rate=drop_rate,\n drop_path_rate=dpr[i],\n num_fcs=num_fcs,\n qkv_bias=qkv_bias,\n act_cfg=act_cfg,\n norm_cfg=norm_cfg,\n lora=(i in lora_layers),\n lora_r=lora_r,\n lora_scaling=lora_scaling,\n lora_dropout=lora_dropout,\n lora_targets=lora_targets,\n batch_first=True))\n\n self.pre_norm = pre_norm\n if pre_norm:\n self.norm0_name, norm0 = build_norm_layer(\n norm_cfg, embed_dims, postfix=0)\n self.add_module(self.norm0_name, norm0)\n\n self.final_norm = final_norm\n if final_norm:\n self.norm1_name, norm1 = build_norm_layer(\n norm_cfg, embed_dims, postfix=1)\n self.add_module(self.norm1_name, norm1)\n\n self.return_clip_embed = return_clip_embed\n if self.return_clip_embed:\n self.proj = nn.Conv2d(embed_dims, 512, 1, bias=False)\n self.add_module('proj', self.proj)\n\n self.return_qkv = [False] * num_layers\n if isinstance(return_qkv, bool):\n for out_i in self.out_indices:\n if out_i >= num_layers:\n continue\n self.return_qkv[out_i] = return_qkv\n elif isinstance(return_qkv, list) or isinstance(return_qkv, tuple):\n for i, out_i in enumerate(self.out_indices):\n if out_i >= num_layers:\n continue\n self.return_qkv[out_i] = return_qkv[i]\n else:\n raise TypeError('return_qkv must be type of bool, list or tuple')\n if self.return_clip_embed:\n self.return_qkv[num_layers - 1] = True\n\n self.skip_last_attn = skip_last_attn\n\n if self.num_prompt_tokens is not None:\n val = math.sqrt(6. / float(3 * reduce(mul, (patch_size, patch_size), 1) + embed_dims))\n\n self.deep_prompt_embeddings = nn.Parameter(torch.zeros(num_layers, self.num_prompt_tokens, embed_dims))\n nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)\n\n self.prompt_proj = nn.Linear(embed_dims, embed_dims)\n nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out')\n self.prompt_norm = LayerNorm(embed_dims, eps=1e-6)\n self.prompt_dropout = nn.Dropout(0.1)\n\n @property\n def norm0(self):\n return getattr(self, self.norm0_name)\n \n @property\n def norm1(self):\n return getattr(self, self.norm1_name)\n\n def init_weights(self):\n if (isinstance(self.init_cfg, dict)\n and self.init_cfg.get('type') == 'Pretrained'):\n logger = get_root_logger()\n checkpoint = _load_checkpoint(\n self.init_cfg['checkpoint'], logger=logger, map_location='cpu')\n\n if 'state_dict' in checkpoint:\n state_dict = checkpoint['state_dict']\n else:\n state_dict = checkpoint\n logger.info(msg='Remove backbone prefix from state_dict.')\n state_dict = {k.replace('backbone.', ''): v for k, v in state_dict.items()}\n\n if 'pos_embed' in state_dict.keys():\n if self.pos_embed.shape != state_dict['pos_embed'].shape:\n logger.info(msg=f'Resize the pos_embed shape from '\n f'{state_dict[\"pos_embed\"].shape} to '\n f'{self.pos_embed.shape}')\n h, w = self.img_size\n pos_size = int(\n math.sqrt(state_dict['pos_embed'].shape[1] - 1))\n state_dict['pos_embed'] = self.resize_pos_embed(\n state_dict['pos_embed'],\n (h // self.patch_size, w // self.patch_size),\n (pos_size, pos_size), self.interpolate_mode)\n\n if self.return_clip_embed:\n state_dict['proj.weight'] = state_dict['proj.weight'][:, :, None, None]\n else:\n state_dict.pop('proj.weight')\n\n print(self.load_state_dict(state_dict, False))\n elif self.init_cfg is not None:\n super(MaskClipVisionTransformer, self).init_weights()\n else:\n # We only implement the 'jax_impl' initialization implemented at\n # https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py#L353 # noqa: E501\n trunc_normal_(self.pos_embed, std=.02)\n trunc_normal_(self.cls_token, std=.02)\n for n, m in self.named_modules():\n if isinstance(m, nn.Linear):\n trunc_normal_(m.weight, std=.02)\n if m.bias is not None:\n if 'ffn' in n:\n nn.init.normal_(m.bias, mean=0., std=1e-6)\n else:\n nn.init.constant_(m.bias, 0)\n elif isinstance(m, nn.Conv2d):\n kaiming_init(m, mode='fan_in', bias=0.)\n elif isinstance(m, (_BatchNorm, nn.GroupNorm, nn.LayerNorm)):\n constant_init(m, val=1.0, bias=0.)\n\n def _pos_embeding(self, patched_img, hw_shape, pos_embed):\n \"\"\"Positiong embeding method.\n\n Resize the pos_embed, if the input image size doesn't match\n the training size.\n Args:\n patched_img (torch.Tensor): The patched image, it should be\n shape of [B, L1, C].\n hw_shape (tuple): The downsampled image resolution.\n pos_embed (torch.Tensor): The pos_embed weighs, it should be\n shape of [B, L2, c].\n Return:\n torch.Tensor: The pos encoded image feature.\n \"\"\"\n assert patched_img.ndim == 3 and pos_embed.ndim == 3, \\\n 'the shapes of patched_img and pos_embed must be [B, L, C]'\n x_len, pos_len = patched_img.shape[1], pos_embed.shape[1]\n if x_len != pos_len:\n if pos_len == (self.img_size[0] // self.patch_size) * (\n self.img_size[1] // self.patch_size) + 1:\n pos_h = self.img_size[0] // self.patch_size\n pos_w = self.img_size[1] // self.patch_size\n else:\n raise ValueError(\n 'Unexpected shape of pos_embed, got {}.'.format(\n pos_embed.shape))\n pos_embed = self.resize_pos_embed(pos_embed, hw_shape,\n (pos_h, pos_w),\n self.interpolate_mode)\n return self.drop_after_pos(patched_img + pos_embed)\n\n @staticmethod\n def resize_pos_embed(pos_embed, input_shpae, pos_shape, mode):\n \"\"\"Resize pos_embed weights.\n\n Resize pos_embed using bicubic interpolate method.\n Args:\n pos_embed (torch.Tensor): Position embedding weights.\n input_shpae (tuple): Tuple for (downsampled input image height,\n downsampled input image width).\n pos_shape (tuple): The resolution of downsampled origin training\n image.\n mode (str): Algorithm used for upsampling:\n ``'nearest'`` | ``'linear'`` | ``'bilinear'`` | ``'bicubic'`` |\n ``'trilinear'``. Default: ``'nearest'``\n Return:\n torch.Tensor: The resized pos_embed of shape [B, L_new, C]\n \"\"\"\n assert pos_embed.ndim == 3, 'shape of pos_embed must be [B, L, C]'\n pos_h, pos_w = pos_shape\n cls_token_weight = pos_embed[:, 0]\n pos_embed_weight = pos_embed[:, (-1 * pos_h * pos_w):]\n pos_embed_weight = pos_embed_weight.reshape(\n 1, pos_h, pos_w, pos_embed.shape[2]).permute(0, 3, 1, 2)\n pos_embed_weight = resize(\n pos_embed_weight, size=input_shpae, align_corners=False, mode=mode)\n cls_token_weight = cls_token_weight.unsqueeze(1)\n pos_embed_weight = torch.flatten(pos_embed_weight, 2).transpose(1, 2)\n pos_embed = torch.cat((cls_token_weight, pos_embed_weight), dim=1)\n return pos_embed\n\n def forward(self, inputs):\n B = inputs.shape[0]\n\n x, hw_shape = self.patch_embed(inputs)\n\n # stole cls_tokens impl from Phil Wang, thanks\n cls_tokens = self.cls_token.expand(B, -1, -1)\n x = torch.cat((cls_tokens, x), dim=1)\n x = self._pos_embeding(x, hw_shape, self.pos_embed)\n\n if not self.with_cls_token:\n # Remove class token for transformer encoder input\n x = x[:, 1:]\n\n if self.pre_norm:\n x = self.norm0(x)\n\n outs = []\n for i, layer in enumerate(self.layers):\n # add deep prompt\n if self.num_prompt_tokens is not None:\n deep_prompt_emb = self.prompt_dropout(self.prompt_proj(self.deep_prompt_embeddings[i]).expand(B, -1, -1))\n assert self.with_cls_token\n assert x.shape[1] == 1 + hw_shape[0] * hw_shape[1], x.shape\n x = torch.cat((\n x[:, :1, :],\n deep_prompt_emb,\n x[:, 1:, :]\n ), dim=1)\n assert x.shape[1] == 1 + self.num_prompt_tokens + hw_shape[0] * hw_shape[1], x.shape\n x, q, k, v = layer(x, self.return_qkv[i] \\\n or (i==len(self.layers)-1 and self.skip_last_attn))\n # remove deep prompt\n if self.num_prompt_tokens is not None:\n x = torch.cat((\n x[:, :1, :],\n x[:, 1+self.num_prompt_tokens:, :]\n ), dim=1)\n assert x.shape[1] == 1 + hw_shape[0] * hw_shape[1]\n if v is not None:\n v = torch.cat((\n v[:, :1, :],\n v[:, 1+self.num_prompt_tokens:, :]\n ), dim=1)\n assert v.shape[1] == 1 + hw_shape[0] * hw_shape[1]\n if i == len(self.layers) - 1:\n if self.final_norm:\n x = self.norm1(x)\n if self.return_qkv[i]:\n v = self.norm1(v)\n if self.skip_last_attn:\n if self.with_cls_token:\n x[:, 1:] = v[:, 1:]\n else:\n x = v\n if self.return_clip_embed:\n visual_embedding = v\n if self.with_cls_token:\n visual_embedding = visual_embedding[:, 1:]\n B, _, C = visual_embedding.shape\n visual_embedding = visual_embedding.reshape(B, hw_shape[0], hw_shape[1],\n C).permute(0, 3, 1, 2).contiguous()\n visual_embedding = self.proj(visual_embedding)\n visual_embedding = visual_embedding / visual_embedding.norm(dim=1, keepdim=True)\n if i in self.out_indices:\n if self.with_cls_token:\n # Remove class token and reshape token for decoder head\n out = x[:, 1:]\n else:\n out = x\n B, _, C = out.shape\n out = out.reshape(B, hw_shape[0], hw_shape[1],\n C).permute(0, 3, 1, 2).contiguous()\n if self.output_cls_token:\n out = [out, x[:, 0]]\n if self.return_qkv[i]:\n if self.with_cls_token:\n q = q[:, 1:]\n k = k[:, 1:]\n v = v[:, 1:]\n v = v.reshape(B, hw_shape[0], hw_shape[1],\n C).permute(0, 3, 1, 2).contiguous()\n out = [out, q, k, v]\n outs.append(out)\n\n if self.return_clip_embed:\n features = []\n for o in outs:\n if isinstance(o, list):\n # from return_qkv\n assert len(o) == 4\n features.append(o[3])\n else:\n features.append(o)\n if len(self.layers) in self.out_indices:\n features.append(visual_embedding)\n global_embedding = self.proj(x[:, 0][:, :, None, None])[:, :, 0, 0]\n global_embedding = global_embedding / global_embedding.norm(dim=1, keepdim=True)\n\n outs = [\n tuple(features),\n global_embedding\n ]\n\n return outs\n\n def train(self, mode=True):\n super(MaskClipVisionTransformer, self).train(mode)\n if mode and self.norm_eval:\n for m in self.modules():\n if isinstance(m, nn.LayerNorm):\n m.eval()" }, { "identifier": "MaskClip2Head", "path": "third_party/maskclip/models/decode_heads/maskclip2_head.py", "snippet": "class MaskClip2Head(BaseDecodeHead):\n\n def __init__(self, img_size, **kwargs):\n super(MaskClip2Head, self).__init__(**kwargs)\n self.img_size = img_size\n\n def forward(self, inputs, force_output_pred_masks=False):\n assert force_output_pred_masks\n inputs_both = inputs\n inputs = inputs_both[0][0]\n cls_token = inputs_both[0][1]\n txt_embed = inputs_both[1]\n feat = inputs[-1]\n\n output = self.cls_seg(feat, txt_embed)\n\n output = F.interpolate(output, size=(self.img_size, self.img_size),\n mode='bilinear', align_corners=self.align_corners)\n output = {\"pred_masks\": output}\n\n return output\n\n def cls_seg(self, feat, txt_embed):\n txt_embed = txt_embed.to(feat.dtype)\n output = F.conv2d(feat, txt_embed[:, :, None, None])\n \n return output\n\n def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):\n raise RuntimeError('MaskClip is not trainable. Try MaskClip+ instead.')" }, { "identifier": "MaskClipHead", "path": "third_party/maskclip/models/decode_heads/maskclip_head.py", "snippet": "class MaskClipHead(BaseDecodeHead):\n\n def __init__(self, text_categories, text_channels, text_embeddings_path,\n visual_projs_path, vit=False, ks_thresh=0., pd_thresh=0.,\n attn_pooling=False, num_heads=32, **kwargs):\n super(MaskClipHead, self).__init__(**kwargs)\n\n self.text_categories = text_categories\n self.text_channels = text_channels\n self.text_embeddings_path = text_embeddings_path\n self.visual_projs_path = visual_projs_path\n\n if self.text_embeddings_path is None:\n self.text_embeddings = nn.Parameter(torch.zeros(text_categories, text_channels))\n nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)\n else:\n self.register_buffer('text_embeddings', torch.randn(text_categories, text_channels))\n self.load_text_embeddings()\n \n self.vit = vit\n if vit:\n self.proj = nn.Conv2d(self.in_channels, text_channels, 1, bias=False)\n else:\n self.q_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)\n self.k_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)\n self.v_proj = nn.Conv2d(self.in_channels, self.in_channels, 1)\n self.c_proj = nn.Conv2d(self.in_channels, text_channels, 1)\n self.load_visual_projs()\n\n self.ks_thresh = ks_thresh\n self.pd_thresh = pd_thresh\n self.attn_pooling = attn_pooling\n self.num_heads = num_heads\n\n def init_weights(self):\n super(MaskClipHead, self).init_weights()\n if self.text_embeddings_path is None:\n nn.init.normal_(self.text_embeddings, mean=0.0, std=0.01)\n else:\n self.load_text_embeddings()\n self.load_visual_projs()\n\n def load_text_embeddings(self):\n loaded = torch.load(self.text_embeddings_path, map_location='cuda')\n self.text_embeddings[:, :] = loaded[:, :]\n print_log(f'Loaded text embeddings from {self.text_embeddings_path}', logger=get_root_logger())\n\n def load_visual_projs(self):\n loaded = torch.load(self.visual_projs_path, map_location='cuda')\n attrs = ['proj'] if self.vit else ['q_proj', 'k_proj', 'v_proj', 'c_proj']\n for attr in attrs:\n current_attr = getattr(self, attr)\n state_dict = loaded[attr]\n for key in state_dict:\n if 'weight' in key:\n state_dict[key] = state_dict[key][:, :, None, None]\n current_attr.load_state_dict(state_dict)\n print_log(f'Loaded proj weights from {self.visual_projs_path}', logger=get_root_logger())\n \n def forward(self, inputs):\n x = self._transform_inputs(inputs)\n q, k, v, cls_token = None, None, None, None\n if self.vit:\n if isinstance(x, list) and len(x) == 4:\n x, q, k, v = x\n if isinstance(x, list) and len(x) == 2:\n x, cls_token = x\n if v is not None:\n feat = self.proj(v)\n else:\n feat = self.proj(x)\n if cls_token is not None:\n cls_token = self.proj(cls_token[:, :, None, None])[:, :, 0, 0]\n else:\n if self.attn_pooling:\n N, C, H, W = x.shape\n x = x.view(N, C, -1).permute(2, 0, 1) # NCHW -> (HW)NC\n x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0)\n x, _ = F.multi_head_attention_forward(\n query=x, key=x, value=x,\n embed_dim_to_check=x.shape[-1],\n num_heads=self.num_heads,\n q_proj_weight=self.q_proj.weight[:, :, 0, 0],\n k_proj_weight=self.k_proj.weight[:, :, 0, 0],\n v_proj_weight=self.v_proj.weight[:, :, 0, 0],\n in_proj_weight=None,\n in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),\n bias_k=None,\n bias_v=None,\n add_zero_attn=False,\n dropout_p=0,\n out_proj_weight=self.c_proj.weight[:, :, 0, 0],\n out_proj_bias=self.c_proj.bias,\n use_separate_proj_weight=True,\n training=self.training,\n need_weights=False\n )\n feat = x[1:].permute(1, 2, 0).view(N, -1, H, W)\n else:\n q = self.q_proj(x)\n k = self.k_proj(x)\n q = torch.flatten(q, start_dim=2).transpose(-2, -1)\n k = torch.flatten(k, start_dim=2).transpose(-2, -1)\n v = self.v_proj(x)\n feat = self.c_proj(v)\n output = self.cls_seg(feat)\n if not self.training:\n output = self.refine_output(output, k)\n\n return output\n\n def cls_seg(self, feat):\n feat = feat / feat.norm(dim=1, keepdim=True)\n output = F.conv2d(feat, self.text_embeddings[:, :, None, None])\n \n return output\n\n def refine_output(self, output, k):\n if self.pd_thresh > 0:\n N, C, H, W = output.shape\n _output = F.softmax(output*100, dim=1)\n max_cls_conf = _output.view(N, C, -1).max(dim=-1)[0]\n selected_cls = (max_cls_conf < self.pd_thresh)[:, :, None, None].expand(N, C, H, W)\n output[selected_cls] = -100\n\n if k is not None and self.ks_thresh > 0:\n output = F.softmax(output*100, dim=1)\n N, C, H, W = output.shape\n output = output.view(N, C, -1).transpose(-2, -1)\n # softmax\n # weight = k @ k.transpose(-2, -1)\n # weight = F.softmax(weight, dim=-1)\n # L2 distance\n k = F.normalize(k, p=2)\n weight = k @ k.transpose(-2, -1)\n\n selected_pos = (output.max(dim=-1, keepdim=True)[0] < self.ks_thresh)\n selected_pos = selected_pos.expand(-1, -1, C)\n\n weighted_output = weight @ output\n output[selected_pos] = weighted_output[selected_pos]\n output = output.transpose(-2, -1).view(N, C, H, W)\n\n return output\n\n def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg):\n raise RuntimeError('MaskClip is not trainable. Try MaskClip+ instead.')" }, { "identifier": "DeepLabV3Plus", "path": "third_party/unimatch/model/semseg/deeplabv3plus.py", "snippet": "class DeepLabV3Plus(nn.Module):\n def __init__(self, cfg):\n super(DeepLabV3Plus, self).__init__()\n\n if 'resnet' in cfg['backbone']:\n self.backbone = resnet.__dict__[cfg['backbone']](pretrained=True, \n replace_stride_with_dilation=cfg['replace_stride_with_dilation'])\n else:\n assert cfg['backbone'] == 'xception'\n self.backbone = xception(pretrained=True)\n\n low_channels = 256\n high_channels = 2048\n\n self.head = ASPPModule(high_channels, cfg['dilations'])\n\n self.reduce = nn.Sequential(nn.Conv2d(low_channels, 48, 1, bias=False),\n nn.BatchNorm2d(48),\n nn.ReLU(True))\n\n self.fuse = nn.Sequential(nn.Conv2d(high_channels // 8 + 48, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n nn.Conv2d(256, 256, 3, padding=1, bias=False),\n nn.BatchNorm2d(256),\n nn.ReLU(True))\n\n self.classifier = nn.Conv2d(256, cfg['nclass'], 1, bias=True)\n\n def forward(self, x, need_fp=False, only_fp=False):\n h, w = x.shape[-2:]\n\n feats = self.backbone.base_forward(x)\n c1, c4 = feats[0], feats[-1]\n\n if only_fp:\n out_fp = self._decode(nn.Dropout2d(0.5)(c1),\n nn.Dropout2d(0.5)(c4))\n out_fp = F.interpolate(out_fp, size=(h, w), mode=\"bilinear\", align_corners=True)\n return out_fp\n elif need_fp:\n outs = self._decode(torch.cat((c1, nn.Dropout2d(0.5)(c1))),\n torch.cat((c4, nn.Dropout2d(0.5)(c4))))\n outs = F.interpolate(outs, size=(h, w), mode=\"bilinear\", align_corners=True)\n out, out_fp = outs.chunk(2)\n\n return out, out_fp\n\n out = self._decode(c1, c4)\n out = F.interpolate(out, size=(h, w), mode=\"bilinear\", align_corners=True)\n\n return out\n\n def _decode(self, c1, c4):\n c4 = self.head(c4)\n c4 = F.interpolate(c4, size=c1.shape[-2:], mode=\"bilinear\", align_corners=True)\n\n c1 = self.reduce(c1)\n\n feature = torch.cat([c1, c4], dim=1)\n feature = self.fuse(feature)\n\n out = self.classifier(feature)\n\n return out" }, { "identifier": "SegLossPlus", "path": "third_party/zegclip/losses/atm_loss.py", "snippet": "class SegLossPlus(nn.Module):\n \"\"\"ATMLoss.\n \"\"\"\n def __init__(self,\n num_classes,\n dec_layers,\n mask_weight=20.0,\n dice_weight=1.0,\n loss_weight=1.0,\n use_point=False):\n super(SegLossPlus, self).__init__()\n weight_dict = {\"loss_mask\": mask_weight, \"loss_dice\": dice_weight}\n aux_weight_dict = {}\n for i in range(dec_layers - 1):\n aux_weight_dict.update({k + f\"_{i}\": v for k, v in weight_dict.items()})\n weight_dict.update(aux_weight_dict)\n\n self.criterion = SegPlusCriterion(\n num_classes,\n weight_dict=weight_dict,\n losses=[\"masks\"],\n )\n\n self.loss_weight = loss_weight\n\n def forward(self,\n outputs,\n label,\n ignore_index=255,\n ):\n \"\"\"Forward function.\"\"\"\n \n self.ignore_index = ignore_index\n targets = self.prepare_targets(label)\n losses = self.criterion(outputs, targets)\n\n for k in list(losses.keys()):\n if k in self.criterion.weight_dict:\n losses[k] = losses[k] * self.criterion.weight_dict[k] * self.loss_weight\n else:\n # remove this loss if not specified in `weight_dict`\n losses.pop(k)\n\n return losses\n\n def prepare_targets(self, targets):\n new_targets = []\n for targets_per_image in targets:\n # gt_cls\n gt_cls = targets_per_image.unique()\n gt_cls = gt_cls[gt_cls != self.ignore_index]\n masks = []\n for cls in gt_cls:\n masks.append(targets_per_image == cls)\n if len(gt_cls) == 0:\n masks.append(targets_per_image == self.ignore_index)\n\n masks = torch.stack(masks, dim=0)\n new_targets.append(\n {\n \"labels\": gt_cls,\n \"target_masks\": masks,\n \"masks\": targets_per_image,\n }\n )\n return new_targets" }, { "identifier": "CLIPVisionTransformer", "path": "third_party/zegclip/models/backbones/clip_vit.py", "snippet": "class CLIPVisionTransformer(nn.Module):\n def __init__(self, input_resolution=224, patch_size=32, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.0,\n out_indices=[3, 5, 7, 11], pretrained=None, get_embeddings=False, embed_v=False, **kwargs):\n super().__init__()\n self.pretrained = pretrained\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n self.spatial_size = input_resolution // patch_size\n self.ln_pre = LayerNorm(width)\n self.get_embeddings = get_embeddings\n self.embed_v = embed_v\n\n self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)\n\n self.out_indices = out_indices\n\n if get_embeddings:\n self.ln_post = LayerNorm(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.patch_size = patch_size\n\n def init_weights(self, pretrained=None):\n pretrained = pretrained or self.pretrained\n if isinstance(pretrained, str):\n checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()\n\n state_dict = {}\n\n for k in checkpoint.keys():\n if k.startswith('visual.'):\n new_k = k.replace('visual.', '')\n state_dict[new_k] = checkpoint[k]\n\n if 'positional_embedding' in state_dict.keys():\n if self.positional_embedding.shape != state_dict['positional_embedding'].shape:\n # (1025, 768) (197, 768) upsample the positional_embedding for larger input\n print(f'Resize the pos_embed shape from {state_dict[\"positional_embedding\"].shape} to {self.positional_embedding.shape}')\n cls_pos = state_dict[\"positional_embedding\"][0:1, :]\n if self.patch_size == 16:\n spatial_pos = F.interpolate(state_dict[\"positional_embedding\"][1:,].reshape(1, 14, 14, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')\n elif self.patch_size == 32:\n spatial_pos = F.interpolate(state_dict[\"positional_embedding\"][1:,].reshape(1, 7, 7, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')\n else:\n assert ValueError('Patch Size should be 16 or 32')\n spatial_pos = spatial_pos.reshape(768, self.spatial_size*self.spatial_size).permute(1, 0)\n positional_embedding = torch.cat([cls_pos, spatial_pos], dim=0)\n state_dict['positional_embedding'] = positional_embedding\n assert self.positional_embedding.shape == state_dict['positional_embedding'].shape\n\n u, w = self.load_state_dict(state_dict, False)\n print(u, w, 'are misaligned params in vision transformer')\n\n\n def forward(self, x: torch.Tensor):\n x = self.conv1(x)\n B, C, H, W = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1)\n x = x.permute(0, 2, 1)\n x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)\n\n pos = self.positional_embedding.to(x.dtype)\n cls_pos = pos[0,:] + self.class_embedding.to(x.dtype)\n spatial_pos = F.interpolate(pos[1:,].reshape(1, self.spatial_size, self.spatial_size, C).permute(0, 3, 1, 2), size=(H, W), mode='bilinear')\n spatial_pos = spatial_pos.reshape(1, C, H*W).permute(0, 2, 1)\n pos = torch.cat([cls_pos.reshape(1, 1, C), spatial_pos], dim=1)\n x = x + pos\n x = self.ln_pre(x)\n x = x.permute(1, 0, 2) # NLD -> LND\n\n features = []\n outs = []\n for i, blk in enumerate(self.transformer.resblocks):\n if self.embed_v and i == len(self.transformer.resblocks) - 1:\n y = blk.ln_1(x)\n y = F.linear(y, blk.attn.in_proj_weight, blk.attn.in_proj_bias)\n y_N, y_L, y_C = y.shape\n y = y.view(y_N, y_L, 3, y_C//3).permute(2, 0, 1, 3).reshape(3*y_N, y_L, y_C//3)\n y = F.linear(y, blk.attn.out_proj.weight, blk.attn.out_proj.bias)\n q, k, v = y.tensor_split(3, dim=0)\n v += x\n v = v + blk.mlp(blk.ln_2(v))\n x = blk(x)\n if len(self.out_indices) > 1:\n if i in self.out_indices:\n xp = x.permute(1, 0, 2)[:, 1:, :].permute(0, 2, 1).reshape(B, -1, H, W)\n features.append(xp.contiguous())\n\n if self.get_embeddings:\n x = x.permute(1, 0, 2)\n x = self.ln_post(x)\n x = x @ self.proj\n\n global_embedding = x[:, 0]\n if self.embed_v:\n v = v.permute(1, 0, 2)\n v = self.ln_post(v)\n v = v @ self.proj\n visual_embedding = v[:, 1:].reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()\n else:\n visual_embedding = x[:, 1:].reshape(B, H, W, -1).permute(0, 3, 1, 2)\n\n if len(self.out_indices) == 1:\n visual_embedding = visual_embedding / visual_embedding.norm(dim=1, keepdim=True)\n features.append(visual_embedding)\n\n outs.append(tuple(features))\n global_embedding = global_embedding / global_embedding.norm(dim=1, keepdim=True)\n outs.append(global_embedding)\n return outs" }, { "identifier": "VPTCLIPVisionTransformer", "path": "third_party/zegclip/models/backbones/clip_vpt_vit.py", "snippet": "class VPTCLIPVisionTransformer(nn.Module):\n def __init__(self, input_resolution=224, patch_size=32, width=768, layers=12, heads=12, output_dim=512, drop_path_rate=0.0,\n out_indices=[3, 5, 7, 11], pretrained=None, get_embeddings=False, embed_v=False,\n num_tokens=20, prompt_dim=512, total_d_layer=11, **kwargs):\n super().__init__()\n self.pretrained = pretrained\n self.input_resolution = input_resolution\n self.output_dim = output_dim\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn((input_resolution // patch_size) ** 2 + 1, width))\n self.spatial_size = input_resolution // patch_size\n self.ln_pre = LayerNorm(width)\n self.get_embeddings = get_embeddings\n self.embed_v = embed_v\n self.num_layers = layers\n\n self.transformer = Transformer(width, layers, heads, drop_path_rate=drop_path_rate)\n\n self.out_indices = out_indices\n\n if get_embeddings:\n self.ln_post = LayerNorm(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.patch_size = patch_size\n\n ## Setting of visual prompt tuning\n self.num_tokens = num_tokens\n self.prompt_dim = prompt_dim\n self.total_d_layer = total_d_layer\n\n ## Add the prompt parameters # exclude_key=prompt:\n self._init_prompt(patch_size, self.num_tokens, self.prompt_dim, self.total_d_layer)\n\n def _init_prompt(self, patch, num_tokens, prompt_dim, total_d_layer):\n patch_size = []\n patch_size.append(patch)\n patch_size.append(patch)\n val = math.sqrt(6. / float(3 * reduce(mul, patch_size, 1) + prompt_dim)) # noqa\n\n if total_d_layer >= 0:\n self.prompt_embeddings = nn.Parameter(torch.zeros(1, num_tokens, prompt_dim))\n # xavier_uniform initialization\n nn.init.uniform_(self.prompt_embeddings.data, -val, val)\n\n if total_d_layer > 0: # noqa\n self.deep_prompt_embeddings = nn.Parameter(torch.zeros(total_d_layer, num_tokens, prompt_dim))\n # xavier_uniform initialization\n nn.init.uniform_(self.deep_prompt_embeddings.data, -val, val)\n\n self.prompt_proj = nn.Linear(prompt_dim, prompt_dim)\n nn.init.kaiming_normal_(self.prompt_proj.weight, a=0, mode='fan_out')\n self.prompt_norm = LayerNorm(prompt_dim, eps=1e-6)\n self.prompt_dropout = Dropout(0.1)\n\n else: # total_d_layer < 0\n raise ValueError(f'Invalid total_d_layer={self.total_d_layer}.')\n\n\n def init_weights(self, pretrained=None):\n pretrained = pretrained or self.pretrained\n if isinstance(pretrained, str):\n checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()\n\n state_dict = {}\n\n for k in checkpoint.keys():\n if k.startswith('visual.'):\n new_k = k.replace('visual.', '')\n state_dict[new_k] = checkpoint[k]\n\n if 'positional_embedding' in state_dict.keys():\n if self.positional_embedding.shape != state_dict['positional_embedding'].shape:\n # (1025, 768) (197, 768) upsample the positional_embedding for larger input\n print(f'Resize the pos_embed shape from {state_dict[\"positional_embedding\"].shape} to {self.positional_embedding.shape}')\n cls_pos = state_dict[\"positional_embedding\"][0:1, :]\n if self.patch_size == 16:\n spatial_pos = F.interpolate(state_dict[\"positional_embedding\"][1:,].reshape(1, 14, 14, 768).permute(0, 3, 1, 2), size=(self.spatial_size, self.spatial_size), mode='bilinear')\n else:\n assert ValueError('Patch Size should be 16 or 32')\n spatial_pos = spatial_pos.reshape(768, self.spatial_size*self.spatial_size).permute(1, 0)\n positional_embedding = torch.cat([cls_pos, spatial_pos], dim=0)\n state_dict['positional_embedding'] = positional_embedding\n assert self.positional_embedding.shape == state_dict['positional_embedding'].shape\n\n u, w = self.load_state_dict(state_dict, False)\n print(u, w, 'are misaligned params in vision transformer')\n\n\n def forward(self, x: torch.Tensor):\n x = self.conv1(x)\n B, C, H, W = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1)\n x = x.permute(0, 2, 1)\n x = torch.cat([self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device), x], dim=1)\n\n pos = self.positional_embedding.to(x.dtype)\n cls_pos = pos[0,:] + self.class_embedding.to(x.dtype)\n spatial_pos = F.interpolate(pos[1:,].reshape(1, self.spatial_size, self.spatial_size, C).permute(0, 3, 1, 2), size=(H, W), mode='bilinear')\n spatial_pos = spatial_pos.reshape(1, C, H*W).permute(0, 2, 1)\n pos = torch.cat([cls_pos.reshape(1, 1, C), spatial_pos], dim=1)\n x = x + pos\n x = self.ln_pre(x)\n\n if self.total_d_layer >=0:\n # concat prompt\n x = torch.cat((\n x[:, :1, :],\n self.prompt_dropout(self.prompt_proj(self.prompt_embeddings).expand(B, -1, -1)),\n x[:, 1:, :]\n ), dim=1)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n\n features = []\n outs = []\n if self.total_d_layer == 0: #shallow\n assert not self.embed_v\n for i, blk in enumerate(self.transformer.resblocks):\n x = blk(x)\n if len(self.out_indices) > 1:\n if i in self.out_indices:\n xp = x.permute(1, 0, 2)[:, 1+self.num_tokens:, :].permute(0, 2, 1).reshape(B, -1, H, W)\n features.append(xp.contiguous())\n elif self.total_d_layer > 0: # deep\n x, features, v = self.forward_deep_prompt(x, features, H, W)\n else:\n raise NotImplementedError(f'Invalid total_d_layer={self.total_d_layer}.')\n\n if self.get_embeddings:\n x = x.permute(1, 0, 2)\n x = self.ln_post(x)\n x = x @ self.proj\n\n global_embedding = x[:, 0]\n if self.embed_v:\n v = v.permute(1, 0, 2)\n v = self.ln_post(v)\n v = v @ self.proj\n visual_embedding = v[:, -(H*W):].reshape(B, H, W, -1).permute(0, 3, 1, 2).contiguous()\n else:\n visual_embedding = x[:, -(H*W):].reshape(B, H, W, -1).permute(0, 3, 1, 2)\n\n if len(self.out_indices) == 1:\n visual_embedding = visual_embedding / visual_embedding.norm(dim=1, keepdim=True)\n features.append(visual_embedding)\n\n outs.append(tuple(features))\n global_embedding = global_embedding / global_embedding.norm(dim=1, keepdim=True)\n outs.append(global_embedding)\n return outs\n\n\n def forward_deep_prompt(self, embedding_output, features, H, W, out_last=False):\n B = embedding_output.shape[1]\n v = None\n\n for i in range(self.num_layers):\n if i == 0:\n hidden_states = self.transformer.resblocks[i](embedding_output)\n elif i <= self.deep_prompt_embeddings.shape[0]:\n deep_prompt_emb = self.prompt_dropout(self.prompt_proj(self.deep_prompt_embeddings[i-1]).expand(B, -1, -1)).permute(1, 0, 2)\n hidden_states = torch.cat((\n hidden_states[:1, :, :],\n deep_prompt_emb,\n hidden_states[(1+self.num_tokens):, :, :]\n ), dim=0)\n if self.embed_v and i == self.num_layers - 1:\n x = hidden_states\n blk = self.transformer.resblocks[i]\n y = blk.ln_1(x)\n y = F.linear(y, blk.attn.in_proj_weight, blk.attn.in_proj_bias)\n y_N, y_L, y_C = y.shape\n y = y.view(y_N, y_L, 3, y_C//3).permute(2, 0, 1, 3).reshape(3*y_N, y_L, y_C//3)\n y = F.linear(y, blk.attn.out_proj.weight, blk.attn.out_proj.bias)\n q, k, v = y.tensor_split(3, dim=0)\n v += x\n v = v + blk.mlp(blk.ln_2(v))\n\n hidden_states = self.transformer.resblocks[i](hidden_states)\n else:\n assert not self.embed_v\n hidden_states = torch.cat((\n hidden_states[:1, :, :],\n hidden_states[-(H*W):, :, :]\n ), dim=0)\n hidden_states = self.transformer.resblocks[i](hidden_states)\n\n if len(self.out_indices) > 1:\n if i in self.out_indices:\n xp = hidden_states.permute(1, 0, 2)[:, -(H*W):, :].permute(0, 2, 1).reshape(B, -1, H, W)\n features.append(xp.contiguous())\n\n if i == (self.num_layers-2): #10\n before_last_feats = self.prompt_norm(hidden_states)\n\n encoded = self.prompt_norm(hidden_states)\n if out_last:\n return before_last_feats\n else:\n return encoded, features, v" }, { "identifier": "CLIPTextEncoder", "path": "third_party/zegclip/models/backbones/text_encoder.py", "snippet": "class CLIPTextEncoder(nn.Module):\n def __init__(self, context_length=77,\n vocab_size=49408,\n transformer_width=512,\n transformer_heads=8,\n transformer_layers=12,\n embed_dim=1024,\n out_dim=256,\n pretrained=None, **kwargs):\n super().__init__()\n\n self.pretrained = pretrained\n\n self.context_length = context_length\n\n self.transformer = Transformer(\n width=transformer_width,\n layers=transformer_layers,\n heads=transformer_heads,\n attn_mask=self.build_attention_mask()\n )\n\n self.vocab_size = vocab_size\n self.token_embedding = nn.Embedding(vocab_size, transformer_width)\n self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))\n self.ln_final = LayerNorm(transformer_width)\n self.text_projection = nn.Parameter(torch.empty(transformer_width, embed_dim))\n \n def init_weights(self, pretrained=None):\n pretrained = pretrained or self.pretrained\n if isinstance(pretrained, str):\n checkpoint = torch.jit.load(pretrained, map_location='cpu').float().state_dict()\n\n state_dict = {}\n\n for k in checkpoint.keys():\n if k.startswith('transformer.'):\n state_dict[k] = checkpoint[k]\n \n if k == 'positional_embedding' or k == 'text_projection' or k.startswith('token_embedding') or k.startswith('ln_final'):\n if k == 'positional_embedding' and checkpoint[k].size(0) > self.context_length:\n checkpoint[k] = checkpoint[k][:self.context_length]\n print('positional_embedding is tuncated from 77 to', self.context_length)\n state_dict[k] = checkpoint[k]\n \n u, w = self.load_state_dict(state_dict, False)\n print(u, w, 'are misaligned params in text encoder')\n\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the vision tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.context_length, self.context_length)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n def forward(self, text):\n x = self.token_embedding(text)\n x = x + self.positional_embedding \n x = x.permute(1, 0, 2)\n x = self.transformer(x)\n x = x.permute(1, 0, 2)\n x = self.ln_final(x)\n x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection\n return x" }, { "identifier": "DropPath", "path": "third_party/zegclip/models/backbones/utils.py", "snippet": "class DropPath(nn.Module):\n \"\"\"Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).\n \"\"\"\n def __init__(self, drop_prob=None):\n super(DropPath, self).__init__()\n self.drop_prob = drop_prob\n\n def forward(self, x):\n return drop_path(x, self.drop_prob, self.training)\n \n def extra_repr(self) -> str:\n return 'p={}'.format(self.drop_prob)" }, { "identifier": "ATMSingleHeadSeg", "path": "third_party/zegclip/models/decode_heads/atm_head.py", "snippet": "class ATMSingleHeadSeg(BaseDecodeHead):\n def __init__(\n self,\n img_size,\n in_channels,\n seen_idx,\n all_idx,\n ignore_seen_pseudo_labels=True,\n embed_dims=768,\n num_layers=3,\n num_heads=8,\n use_stages=1,\n use_proj=True,\n crop_train=False,\n use_rd=True,\n use_aspp=False,\n aspp_relu=True,\n aspp_bn=True,\n aspp_residual=False,\n dilations=(6, 12, 18),\n **kwargs,\n ):\n super(ATMSingleHeadSeg, self).__init__(\n in_channels=in_channels, **kwargs)\n\n self.image_size = img_size\n self.use_stages = use_stages\n self.crop_train = crop_train\n self.use_rd = use_rd\n self.use_aspp = use_aspp\n self.aspp_residual = aspp_residual\n self.seen_idx = seen_idx\n self.all_idx = all_idx\n self.ignore_seen_pseudo_labels = ignore_seen_pseudo_labels\n self.debug = False\n nhead = num_heads\n dim = embed_dims\n input_proj = []\n proj_norm = []\n atm_decoders = []\n\n self.unseen_idx = self.all_idx.copy()\n for i_idx in self.seen_idx:\n self.unseen_idx.remove(i_idx)\n\n if self.use_aspp:\n self.aspp = ASPPModule(dim, dilations, out_channels=dim, bn=aspp_bn, relu=aspp_relu)\n self.add_module(\"aspp\", self.aspp)\n\n for i in range(self.use_stages):\n # FC layer to change ch\n if use_proj:\n proj = nn.Linear(self.in_channels, dim)\n trunc_normal_(proj.weight, std=.02)\n else:\n proj = nn.Identity()\n self.add_module(\"input_proj_{}\".format(i + 1), proj)\n input_proj.append(proj)\n # norm layer\n if use_proj:\n norm = nn.LayerNorm(dim)\n else:\n norm = nn.Identity()\n self.add_module(\"proj_norm_{}\".format(i + 1), norm)\n proj_norm.append(norm)\n # decoder layer\n decoder_layer = TPN_DecoderLayer(d_model=dim, nhead=nhead, dim_feedforward=dim * 4)\n decoder = TPN_Decoder(decoder_layer, num_layers)\n self.add_module(\"decoder_{}\".format(i + 1), decoder)\n atm_decoders.append(decoder)\n\n self.input_proj = input_proj\n self.proj_norm = proj_norm\n self.decoder = atm_decoders\n\n delattr(self, 'conv_seg')\n\n self.q_proj = nn.Linear(dim * 2 if self.use_rd else dim, dim)\n\n def init_weights(self):\n for n, m in self.named_modules():\n if isinstance(m, nn.Linear):\n trunc_normal_init(m, std=.02, bias=0)\n elif isinstance(m, nn.LayerNorm):\n constant_init(m, val=1.0, bias=0.0)\n\n def forward_train(self, inputs, img_metas, gt_semantic_seg, train_cfg, self_training=False):\n seg_logits = self.forward(inputs)\n\n if self.debug:\n pred = seg_logits['pred_masks'].detach().sigmoid().argmax(dim=1).unsqueeze(1)\n pred_cpu = pred.detach().cpu()\n gt_semantic_seg_cpu = gt_semantic_seg.detach().cpu()\n self.debug_output = [[\n dict(title='Prediction', data=pred_cpu[i], type='label'),\n dict(title='Ground Truth', data=gt_semantic_seg_cpu[i], type='label'),\n ] for i in range(pred.shape[0])]\n\n # print('Self-training: 'self_training)\n if self_training:\n pseudo_semantic_masks = seg_logits['pred_masks'].clone().detach().sigmoid()\n if self.ignore_seen_pseudo_labels:\n pseudo_semantic_masks[:, self.seen_idx, :, :] = -1\n assert -1 not in pseudo_semantic_masks.max(dim=1)[0], \\\n 'All classes are set to -1, which results in argmax defaulting to label 0'\n pseudo_semantic_seg = pseudo_semantic_masks.argmax(dim=1).unsqueeze(1)\n # generate pseudo labels for \"transductive\" setting\n # print('Before pseudo-labelling', gt_semantic_seg[0, 0])\n gt_semantic_seg[gt_semantic_seg==-1] = pseudo_semantic_seg[gt_semantic_seg==-1]\n gt_semantic_seg[gt_semantic_seg==-1] = 255\n # print('After pseudo-labelling', gt_semantic_seg[0, 0])\n losses = self.losses(seg_logits, gt_semantic_seg)\n\n else:\n gt_semantic_seg[gt_semantic_seg==-1] = 255\n losses = self.losses(seg_logits, gt_semantic_seg)\n\n if self.debug and self_training:\n for i in range(gt_semantic_seg.shape[0]):\n self.debug_output[i].append(\n dict(title='Pseudo Label', data=gt_semantic_seg[i].detach().cpu(), type='label'))\n\n return losses\n\n def forward_test(self, inputs, img_metas, test_cfg, self_training):\n return self.forward(inputs, self_training)\n\n def forward(self, inputs_both, self_training=None, force_output_pred_masks=False):\n inputs = inputs_both[0][0]\n cls_token = inputs_both[0][1]\n text_token = inputs_both[1]\n \n x = []\n for stage_ in inputs[:self.use_stages]:\n x.append(self.d4_to_d3(stage_) if stage_.dim() > 3 else stage_)\n x.reverse()\n bs = x[0].size()[0]\n\n laterals = []\n attns = []\n maps_size = []\n qs = []\n\n for idx, (x_, proj_, norm_) in enumerate(zip(x, self.input_proj, self.proj_norm)):\n lateral = norm_(proj_(x_))\n if idx == 0:\n laterals.append(lateral)\n else:\n if laterals[idx - 1].size()[1] == lateral.size()[1]:\n laterals.append(lateral + laterals[idx - 1])\n else:\n # nearest interpolate\n l_ = self.d3_to_d4(laterals[idx - 1])\n l_ = F.interpolate(l_, scale_factor=2, mode=\"nearest\")\n l_ = self.d4_to_d3(l_)\n laterals.append(l_ + lateral)\n\n lateral = laterals[-1]\n if self.use_aspp:\n if self.aspp_residual:\n lateral = lateral + 0.01 * self.d4_to_d3(self.aspp(self.d3_to_d4(lateral)))\n else:\n lateral = self.d3_to_d4(lateral)\n lateral = self.aspp(lateral)\n lateral = self.d4_to_d3(lateral)\n\n q = self.q_proj(self.get_qs(text_token, cls_token))\n q = q.transpose(0,1)\n\n for idx, decoder_ in enumerate(self.decoder):\n q_, attn_ = decoder_(q, lateral.transpose(0, 1))\n for q, attn in zip(q_, attn_):\n attn = attn.transpose(-1, -2) \n attn = self.d3_to_d4(attn)\n maps_size.append(attn.size()[-2:])\n qs.append(q.transpose(0, 1))\n attns.append(attn)\n qs = torch.stack(qs, dim=0)\n\n outputs_seg_masks = []\n size = maps_size[-1]\n\n for i_attn, attn in enumerate(attns):\n if attn.shape[1] != self.num_classes:\n cls2con = get_class_to_concept_idxs(self.load_text_embedding)\n attn = aggregate_concept_predictions(attn, cls2con)\n assert attn.shape[1] == self.num_classes\n outputs_seg_masks.append(F.interpolate(attn, size=size, mode='bilinear', align_corners=False))\n\n pred = F.interpolate(outputs_seg_masks[-1],\n size=(self.image_size, self.image_size),\n mode='bilinear', align_corners=False)\n \n out = {\"pred_masks\": pred}\n\n \n if self.training or force_output_pred_masks:\n outputs_seg_masks = torch.stack(outputs_seg_masks, dim=0)# (3, bs, 20, 14, 14)\n else:\n if self_training:\n out[\"pred\"] = self.semantic_inference(out[\"pred_masks\"], self.seen_idx) #(bs, 20, 224, 224)\n else:\n out[\"pred\"] = self.semantic_inference(out[\"pred_masks\"], self.seen_idx, 0.1)\n return out[\"pred\"] \n return out\n\n def semantic_inference(self, mask_pred, seen_idx, weight=0.0):\n mask_pred = mask_pred.sigmoid()\n mask_pred[:,seen_idx] = mask_pred[:,seen_idx] - weight\n return mask_pred\n\n @torch.jit.unused\n def _set_aux_loss(self, outputs_seg_masks):\n return [\n {\"pred_masks\": a}\n # for a in zip(outputs_seg_masks[:-1])\n for a in outputs_seg_masks[:-1]\n ]\n\n def d3_to_d4(self, t):\n n, hw, c = t.size()\n if hw % 2 != 0:\n t = t[:, 1:]\n h = w = int(math.sqrt(hw))\n assert h * w == hw\n return t.transpose(1, 2).reshape(n, c, h, w)\n\n def d4_to_d3(self, t):\n return t.flatten(-2).transpose(-1, -2)\n\n def get_qs(self, q, cls):\n # q = [q.cls, q]\n C, dim = q.shape\n bs, _ = cls.shape\n q = q.expand(bs, -1, -1)\n if self.use_rd:\n q1 = torch.einsum(\"bd,bcd->bcd\", cls, q)\n q_ = torch.concat((q1, q), dim=-1)\n else:\n q_ = q.to(cls.dtype)\n return q_\n\n\n @force_fp32(apply_to=('seg_logit',))\n def losses(self, seg_logit, seg_label, num_classes=None):\n \"\"\"Compute segmentation loss.\"\"\"\n if isinstance(seg_logit, dict):\n # atm loss\n seg_label = seg_label.squeeze(1)\n\n loss = self.loss_decode(\n seg_logit,\n seg_label,\n ignore_index = self.ignore_index)\n\n loss['acc_seg'] = accuracy(seg_logit[\"pred_masks\"], seg_label, ignore_index=self.ignore_index)\n return loss" } ]
import types import torch from functools import reduce from mmcv.utils import Config from mmseg.models import ASPPHead, DepthwiseSeparableASPPHead, build_segmentor from mmseg.ops import resize from torch.nn import functional as F from model.backbone.timm_vit import TIMMVisionTransformer from model.decode_heads.dlv3p_head import DLV3PHead from model.decode_heads.vlg_head import VLGHead from model.vlm import VLM from third_party.maskclip.models.backbones.maskclip_vit import MaskClipVisionTransformer from third_party.maskclip.models.decode_heads.maskclip2_head import MaskClip2Head from third_party.maskclip.models.decode_heads.maskclip_head import MaskClipHead from third_party.unimatch.model.semseg.deeplabv3plus import DeepLabV3Plus from third_party.zegclip.losses.atm_loss import SegLossPlus from third_party.zegclip.models.backbones.clip_vit import CLIPVisionTransformer from third_party.zegclip.models.backbones.clip_vpt_vit import VPTCLIPVisionTransformer from third_party.zegclip.models.backbones.text_encoder import CLIPTextEncoder from third_party.zegclip.models.backbones.utils import DropPath from third_party.zegclip.models.decode_heads.atm_head import ATMSingleHeadSeg
19,928
# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def nested_set(dic, key, value): keys = key.split('.') for key in keys[:-1]: dic = dic.setdefault(key, {}) dic[keys[-1]] = value def nested_get(dictionary, keys, default=None): return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("."), dictionary) def is_vlm(obj): return isinstance(obj, VLM) def forward_wrapper(self, img, gt=None, need_fp=False, only_fp=False, forward_mode='default'): if forward_mode == 'maskclip_trust': return self.train_maskclip_trust(img, gt) elif forward_mode == 'default': x = self.extract_feat(img) if self.disable_dropout: dropout_modules = [module for module in self.modules() if isinstance(module, torch.nn.Dropout) or isinstance(module, DropPath)] for module in dropout_modules: module.eval() if only_fp: if is_vlm(self): feats = x[0][0] x[0][0] = [F.dropout2d(f, self.fp_rate) for f in feats] # perturb features from conv_encoder if len(x) == 3 and x[2] is not None: x[2] = [F.dropout2d(f, self.fp_rate) for f in x[2]] # also provide unperturbed features if hasattr(self.decode_head, 'dc_unperturbed') and self.decode_head.dc_unperturbed: assert len(x[0]) == 2 x[0].append(feats) else: x = [F.dropout2d(f, self.fp_rate) for f in x] elif need_fp: if is_vlm(self): feats = x[0][0] x[0][0] = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in feats] x[0][1] = torch.cat((x[0][1], x[0][1])) # perturb features from conv_encoder if len(x) == 3 and x[2] is not None: x[2] = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in x[2]] # also provide unperturbed features if hasattr(self.decode_head, 'dc_unperturbed') and self.decode_head.dc_unperturbed: assert len(x[0]) == 2 x[0].append([torch.cat((f, f)) for f in feats]) else: x = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in x] out = self._decode_head_forward_test(x, img_metas=None) out = resize( input=out, size=img.shape[2:], mode='bilinear', align_corners=self.align_corners) if need_fp: out = out.chunk(2) return out else: raise ValueError(forward_mode) def build_model(cfg): model_type = cfg['model'] if model_type == 'deeplabv3plus':
# Copyright 2023 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def nested_set(dic, key, value): keys = key.split('.') for key in keys[:-1]: dic = dic.setdefault(key, {}) dic[keys[-1]] = value def nested_get(dictionary, keys, default=None): return reduce(lambda d, key: d.get(key, default) if isinstance(d, dict) else default, keys.split("."), dictionary) def is_vlm(obj): return isinstance(obj, VLM) def forward_wrapper(self, img, gt=None, need_fp=False, only_fp=False, forward_mode='default'): if forward_mode == 'maskclip_trust': return self.train_maskclip_trust(img, gt) elif forward_mode == 'default': x = self.extract_feat(img) if self.disable_dropout: dropout_modules = [module for module in self.modules() if isinstance(module, torch.nn.Dropout) or isinstance(module, DropPath)] for module in dropout_modules: module.eval() if only_fp: if is_vlm(self): feats = x[0][0] x[0][0] = [F.dropout2d(f, self.fp_rate) for f in feats] # perturb features from conv_encoder if len(x) == 3 and x[2] is not None: x[2] = [F.dropout2d(f, self.fp_rate) for f in x[2]] # also provide unperturbed features if hasattr(self.decode_head, 'dc_unperturbed') and self.decode_head.dc_unperturbed: assert len(x[0]) == 2 x[0].append(feats) else: x = [F.dropout2d(f, self.fp_rate) for f in x] elif need_fp: if is_vlm(self): feats = x[0][0] x[0][0] = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in feats] x[0][1] = torch.cat((x[0][1], x[0][1])) # perturb features from conv_encoder if len(x) == 3 and x[2] is not None: x[2] = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in x[2]] # also provide unperturbed features if hasattr(self.decode_head, 'dc_unperturbed') and self.decode_head.dc_unperturbed: assert len(x[0]) == 2 x[0].append([torch.cat((f, f)) for f in feats]) else: x = [torch.cat((f, F.dropout2d(f, self.fp_rate))) for f in x] out = self._decode_head_forward_test(x, img_metas=None) out = resize( input=out, size=img.shape[2:], mode='bilinear', align_corners=self.align_corners) if need_fp: out = out.chunk(2) return out else: raise ValueError(forward_mode) def build_model(cfg): model_type = cfg['model'] if model_type == 'deeplabv3plus':
model = DeepLabV3Plus(cfg)
7
2023-11-02 14:49:38+00:00
24k
codefuse-ai/Collinear-Constrained-Attention
model/build_model.py
[ { "identifier": "get_model_params_num", "path": "utils/common_utils.py", "snippet": "def get_model_params_num(model):\n \"\"\"\n Get params number of the model\n Args:\n model: model(required)\n Returns:\n the number of parameters of model\n \"\"\"\n num = 0\n for _, param in model.named_parameters():\n num += param.nelement()\n return num" }, { "identifier": "GPTNeoXConfig", "path": "model/gpt_neox/configuration_gpt_neox.py", "snippet": "class GPTNeoXConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`GPTNeoXModel`]. It is used to instantiate an\n GPTNeoX model according to the specified arguments, defining the model architecture. Instantiating a configuration\n with the defaults will yield a similar configuration to that of the GPTNeoX\n [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) architecture.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 50432):\n Vocabulary size of the GPTNeoX model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`GPTNeoXModel`].\n hidden_size (`int`, *optional*, defaults to 6144):\n Dimension of the encoder layers and the pooler layer.\n num_hidden_layers (`int`, *optional*, defaults to 44):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 64):\n Number of attention heads for each attention layer in the Transformer encoder.\n intermediate_size (`int`, *optional*, defaults to 24576):\n Dimension of the \"intermediate\" (i.e., feed-forward) layer in the Transformer encoder.\n hidden_act (`str` or `function`, *optional*, defaults to `\"gelu\"`):\n The non-linear activation function (function or string) in the encoder and pooler. If string, `\"gelu\"`,\n `\"relu\"`, `\"selu\"` and `\"gelu_new\"` are supported.\n rotary_pct (`float`, *optional*, defaults to 0.25):\n percentage of hidden dimensions to allocate to rotary embeddings\n rotary_emb_base (`int`, *optional*, defaults to 10000)\n base for computing rotary embeddings frequency\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 1e-5):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n layer_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the layer normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n use_parallel_residual (`bool`, *optional*, defaults to `True`):\n Whether to use a \"parallel\" formulation in each Transformer layer, which can provide a slight training\n speedup at large scales (e.g. 20B).\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports three scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{\"type\": strategy name, \"factor\": scaling factor}`. When using this flag, don't update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n Example:\n\n ```python\n >>> from transformers import GPTNeoXConfig, GPTNeoXModel\n\n >>> # Initializing a GPTNeoX gpt-neox-20b style configuration\n >>> configuration = GPTNeoXConfig()\n\n >>> # Initializing a model (with random weights) from the gpt-neox-20b style configuration\n >>> model = GPTNeoXModel(configuration) # doctest: +SKIP\n\n >>> # Accessing the model configuration\n >>> configuration = model.config # doctest: +SKIP\n ```\"\"\"\n model_type = \"gpt_neox\"\n\n def __init__(\n self,\n vocab_size=50432,\n hidden_size=6144,\n num_hidden_layers=44,\n num_attention_heads=64,\n intermediate_size=24576,\n hidden_act=\"gelu\",\n rotary_pct=0.25,\n rotary_emb_base=10000,\n max_position_embeddings=2048,\n initializer_range=0.02,\n layer_norm_eps=1e-5,\n use_cache=True,\n bos_token_id=0,\n eos_token_id=2,\n tie_word_embeddings=False,\n use_parallel_residual=True,\n rope_scaling=None,\n **kwargs\n ):\n super().__init__(bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n self.intermediate_size = intermediate_size\n self.hidden_act = hidden_act\n self.rotary_pct = rotary_pct\n self.rotary_emb_base = rotary_emb_base\n self.initializer_range = initializer_range\n self.layer_norm_eps = layer_norm_eps\n self.use_cache = use_cache\n self.tie_word_embeddings = tie_word_embeddings\n self.use_parallel_residual = use_parallel_residual\n self.rope_scaling = rope_scaling\n self._rope_scaling_validation()\n\n if self.hidden_size % self.num_attention_heads != 0:\n raise ValueError(\n \"The hidden size is not divisble by the number of attention heads! Make sure to update them!\"\n )\n\n # Copied from transformers.models.llama.configuration_llama.LlamaConfig._rope_scaling_validation\n def _rope_scaling_validation(self):\n \"\"\"\n Validate the `rope_scaling` configuration.\n \"\"\"\n if self.rope_scaling is None:\n return\n\n if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:\n raise ValueError(\n \"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, \"\n f\"got {self.rope_scaling}\"\n )\n rope_scaling_type = self.rope_scaling.get(\"type\", None)\n rope_scaling_factor = self.rope_scaling.get(\"factor\", None)\n if rope_scaling_type is None or rope_scaling_type not in [\"linear\", \"dynamic\"]:\n raise ValueError(\n f\"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}\"\n )\n if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:\n raise ValueError(f\"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}\")" }, { "identifier": "GPTNeoXForCausalLM", "path": "model/gpt_neox/modeling_gpt_neox.py", "snippet": "class GPTNeoXForCausalLM(GPTNeoXPreTrainedModel):\n\n # _keys_to_ignore_on_load_missing = [r\"position_ids\", r\"predictions.decoder.bias\"]\n\n def __init__(self, config):\n super().__init__(config)\n\n self.gpt_neox = GPTNeoXModel(config)\n self.embed_out = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_output_embeddings(self):\n return self.embed_out\n\n def set_output_embeddings(self, new_embeddings):\n self.embed_out = new_embeddings\n\n @add_start_docstrings_to_model_forward(GPT_NEOX_INPUTS_DOCSTRING.format(\"batch_size, sequence_length\"))\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: Optional[torch.LongTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n head_mask: Optional[torch.FloatTensor] = None,\n past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n past_key_values (`tuple(tuple(torch.FloatTensor))`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):\n Tuple of `tuple(torch.FloatTensor)` of length `config.n_layers`, with each tuple having 2 tensors of shape\n `(batch_size, num_heads, sequence_length, embed_size_per_head)`) and 2 additional tensors of shape\n `(batch_size, num_heads, encoder_sequence_length, embed_size_per_head)`. The two additional tensors are\n only required when the model is used as a decoder in a Sequence to Sequence model.\n\n Contains pre-computed hidden-states (key and values in the self-attention blocks that can be used (see\n `past_key_values` input) to speed up sequential decoding.\n\n If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that\n don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all\n `decoder_input_ids` of shape `(batch_size, sequence_length)`.\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in\n `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are\n ignored (masked), the loss is only computed for the tokens with labels n `[0, ..., config.vocab_size]`.\n use_cache (`bool`, *optional*):\n If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see\n `past_key_values`).\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, GPTNeoXForCausalLM, GPTNeoXConfig\n >>> import torch\n\n >>> tokenizer = AutoTokenizer.from_pretrained(\"EleutherAI/gpt-neox-20b\")\n >>> config = GPTNeoXConfig.from_pretrained(\"EleutherAI/gpt-neox-20b\")\n >>> config.is_decoder = True\n >>> model = GPTNeoXForCausalLM.from_pretrained(\"EleutherAI/gpt-neox-20b\", config=config)\n\n >>> inputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\n >>> outputs = model(**inputs)\n\n >>> prediction_logits = outputs.logits\n ```\"\"\"\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n outputs = self.gpt_neox(\n input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n head_mask=head_mask,\n inputs_embeds=inputs_embeds,\n past_key_values=past_key_values,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n lm_logits = self.embed_out(hidden_states)\n\n lm_loss = None\n if labels is not None:\n # move labels to correct device to enable model parallelism\n labels = labels.to(lm_logits.device)\n # we are doing next-token prediction; shift prediction scores and input ids by one\n shift_logits = lm_logits[:, :-1, :].contiguous()\n labels = labels[:, 1:].contiguous()\n loss_fct = CrossEntropyLoss()\n lm_loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), labels.view(-1))\n\n if not return_dict:\n output = (lm_logits,) + outputs[1:]\n return ((lm_loss,) + output) if lm_loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=lm_loss,\n logits=lm_logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n input_shape = input_ids.shape\n\n # cut decoder_input_ids if past is used\n if past_key_values and past_key_values[0] is not None:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly\n if attention_mask is None:\n attention_mask = input_ids.new_ones(input_shape)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"attention_mask\": attention_mask,\n \"past_key_values\": past_key_values,\n \"position_ids\": position_ids,\n }\n )\n\n return model_inputs\n\n def _reorder_cache(self, past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx) for past_state in layer_past[:2]) + layer_past[2:],\n )\n return reordered_past" }, { "identifier": "GPTNeoXTokenizerFast", "path": "model/gpt_neox/tokenization_gpt_neox_fast.py", "snippet": "class GPTNeoXTokenizerFast(PreTrainedTokenizerFast):\n \"\"\"\n Construct a \"fast\" GPT-NeoX-20B tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level\n Byte-Pair-Encoding.\n\n This tokenizer has been trained to treat spaces like parts of the tokens (a bit like sentencepiece) so a word will\n be encoded differently whether it is at the beginning of the sentence (without space) or not:\n\n ```python\n >>> from transformers import GPTNeoXTokenizerFast\n\n >>> tokenizer = GPTNeoXTokenizerFast.from_pretrained(\"gpt2\")\n >>> tokenizer(\"Hello world\")[\"input_ids\"]\n [15496, 995]\n\n >>> tokenizer(\" Hello world\")[\"input_ids\"]\n [18435, 995]\n ```\n\n You can get around that behavior by passing `add_prefix_space=True` when instantiating this tokenizer, but since\n the model was not pretrained this way, it might yield a decrease in performance.\n\n <Tip>\n\n When used with `is_split_into_words=True`, this tokenizer needs to be instantiated with `add_prefix_space=True`.\n\n </Tip>\n\n This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\n refer to this superclass for more information regarding those methods.\n\n Args:\n vocab_file (`str`):\n Path to the vocabulary file.\n merges_file (`str`):\n Path to the merges file.\n errors (`str`, *optional*, defaults to `\"replace\"`):\n Paradigm to follow when decoding bytes to UTF-8. See\n [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.\n unk_token (`str`, *optional*, defaults to `<|endoftext|>`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n bos_token (`str`, *optional*, defaults to `<|endoftext|>`):\n The beginning of sequence token.\n eos_token (`str`, *optional*, defaults to `<|endoftext|>`):\n The end of sequence token.\n add_prefix_space (`bool`, *optional*, defaults to `False`):\n Whether or not to add an initial space to the input. This allows to treat the leading word just as any\n other word. (GPTNeoX tokenizer detect beginning of words by the preceding space).\n trim_offsets (`bool`, *optional*, defaults to `True`):\n Whether or not the post-processing step should trim offsets to avoid including whitespaces.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file=None,\n merges_file=None,\n tokenizer_file=None,\n unk_token=\"<|endoftext|>\",\n bos_token=\"<|endoftext|>\",\n eos_token=\"<|endoftext|>\",\n add_prefix_space=False,\n **kwargs,\n ):\n super().__init__(\n vocab_file,\n merges_file,\n tokenizer_file=tokenizer_file,\n unk_token=unk_token,\n bos_token=bos_token,\n eos_token=eos_token,\n add_prefix_space=add_prefix_space,\n **kwargs,\n )\n\n pre_tok_state = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__())\n if pre_tok_state.get(\"add_prefix_space\", add_prefix_space) != add_prefix_space:\n pre_tok_class = getattr(pre_tokenizers, pre_tok_state.pop(\"type\"))\n pre_tok_state[\"add_prefix_space\"] = add_prefix_space\n self.backend_tokenizer.pre_tokenizer = pre_tok_class(**pre_tok_state)\n\n self.add_prefix_space = add_prefix_space\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n files = self._tokenizer.model.save(save_directory, name=filename_prefix)\n return tuple(files)\n\n def _build_conversation_input_ids(self, conversation: \"Conversation\") -> List[int]:\n \"\"\"This corresponds to DialoGPT variants of models.\"\"\"\n input_ids = []\n for is_user, text in conversation.iter_texts():\n input_ids.extend(self.encode(text, add_special_tokens=False) + [self.eos_token_id])\n\n if len(input_ids) > self.model_max_length:\n input_ids = input_ids[-self.model_max_length :]\n return input_ids" }, { "identifier": "LlamaConfig", "path": "model/llama/configuration_llama.py", "snippet": "class LlamaConfig(PretrainedConfig):\n r\"\"\"\n This is the configuration class to store the configuration of a [`LlamaModel`]. It is used to instantiate an LLaMA\n model according to the specified arguments, defining the model architecture. Instantiating a configuration with the\n defaults will yield a similar configuration to that of the LLaMA-7B.\n\n Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the\n documentation from [`PretrainedConfig`] for more information.\n\n\n Args:\n vocab_size (`int`, *optional*, defaults to 32000):\n Vocabulary size of the LLaMA model. Defines the number of different tokens that can be represented by the\n `inputs_ids` passed when calling [`LlamaModel`]\n hidden_size (`int`, *optional*, defaults to 4096):\n Dimension of the hidden representations.\n intermediate_size (`int`, *optional*, defaults to 11008):\n Dimension of the MLP representations.\n num_hidden_layers (`int`, *optional*, defaults to 32):\n Number of hidden layers in the Transformer encoder.\n num_attention_heads (`int`, *optional*, defaults to 32):\n Number of attention heads for each attention layer in the Transformer encoder.\n num_key_value_heads (`int`, *optional*):\n This is the number of key_value heads that should be used to implement Grouped Query Attention. If\n `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if\n `num_key_value_heads=1 the model will use Multi Query Attention (MQA) otherwise GQA is used. When\n converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed\n by meanpooling all the original heads within that group. For more details checkout [this\n paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to\n `num_attention_heads`.\n pretraining_tp (`int`, *optional*, defaults to `1`):\n Experimental feature. Tensor parallelism rank used during pretraining. Please refer to [this\n document](https://huggingface.co/docs/transformers/parallelism) to understand more about it. This value is\n necessary to ensure exact reproducibility of the pretraining results. Please refer to [this\n issue](https://github.com/pytorch/pytorch/issues/76232).\n hidden_act (`str` or `function`, *optional*, defaults to `\"silu\"`):\n The non-linear activation function (function or string) in the decoder.\n max_position_embeddings (`int`, *optional*, defaults to 2048):\n The maximum sequence length that this model might ever be used with. Typically set this to something large\n just in case (e.g., 512 or 1024 or 2048).\n initializer_range (`float`, *optional*, defaults to 0.02):\n The standard deviation of the truncated_normal_initializer for initializing all weight matrices.\n rms_norm_eps (`float`, *optional*, defaults to 1e-12):\n The epsilon used by the rms normalization layers.\n use_cache (`bool`, *optional*, defaults to `True`):\n Whether or not the model should return the last key/values attentions (not used by all models). Only\n relevant if `config.is_decoder=True`.\n tie_word_embeddings(`bool`, *optional*, defaults to `False`):\n Whether to tie weight embeddings\n rope_scaling (`Dict`, *optional*):\n Dictionary containing the scaling configuration for the RoPE embeddings. Currently supports two scaling\n strategies: linear and dynamic. Their scaling factor must be an float greater than 1. The expected format\n is `{\"type\": strategy name, \"factor\": scaling factor}`. When using this flag, don't update\n `max_position_embeddings` to the expected new maximum. See the following thread for more information on how\n these scaling strategies behave:\n https://www.reddit.com/r/LocalLLaMA/comments/14mrgpr/dynamically_scaled_rope_further_increases/. This is an\n experimental feature, subject to breaking API changes in future versions.\n\n Example:\n\n ```python\n >>> from transformers import LlamaModel, LlamaConfig\n\n >>> # Initializing a LLaMA llama-7b style configuration\n >>> configuration = LlamaConfig()\n\n >>> # Initializing a model from the llama-7b style configuration\n >>> model = LlamaModel(configuration)\n\n >>> # Accessing the model configuration\n >>> configuration = model.config\n ```\"\"\"\n model_type = \"llama\"\n keys_to_ignore_at_inference = [\"past_key_values\"]\n\n def __init__(\n self,\n vocab_size=32000,\n hidden_size=4096,\n intermediate_size=11008,\n num_hidden_layers=32,\n num_attention_heads=32,\n num_key_value_heads=None,\n hidden_act=\"silu\",\n max_position_embeddings=2048,\n initializer_range=0.02,\n rms_norm_eps=1e-6,\n use_cache=True,\n pad_token_id=None,\n bos_token_id=1,\n eos_token_id=2,\n pretraining_tp=1,\n tie_word_embeddings=False,\n rope_scaling=None,\n **kwargs,\n ):\n self.vocab_size = vocab_size\n self.max_position_embeddings = max_position_embeddings\n self.hidden_size = hidden_size\n self.intermediate_size = intermediate_size\n self.num_hidden_layers = num_hidden_layers\n self.num_attention_heads = num_attention_heads\n\n # for backward compatibility\n if num_key_value_heads is None:\n num_key_value_heads = num_attention_heads\n\n self.num_key_value_heads = num_key_value_heads\n self.hidden_act = hidden_act\n self.initializer_range = initializer_range\n self.rms_norm_eps = rms_norm_eps\n self.pretraining_tp = pretraining_tp\n self.use_cache = use_cache\n self.rope_scaling = rope_scaling\n self._rope_scaling_validation()\n\n super().__init__(\n pad_token_id=pad_token_id,\n bos_token_id=bos_token_id,\n eos_token_id=eos_token_id,\n tie_word_embeddings=tie_word_embeddings,\n **kwargs,\n )\n\n def _rope_scaling_validation(self):\n \"\"\"\n Validate the `rope_scaling` configuration.\n \"\"\"\n if self.rope_scaling is None:\n return\n\n if not isinstance(self.rope_scaling, dict) or len(self.rope_scaling) != 2:\n raise ValueError(\n \"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, \"\n f\"got {self.rope_scaling}\"\n )\n rope_scaling_type = self.rope_scaling.get(\"type\", None)\n rope_scaling_factor = self.rope_scaling.get(\"factor\", None)\n if rope_scaling_type is None or rope_scaling_type not in [\"linear\", \"dynamic\"]:\n raise ValueError(\n f\"`rope_scaling`'s name field must be one of ['linear', 'dynamic'], got {rope_scaling_type}\"\n )\n if rope_scaling_factor is None or not isinstance(rope_scaling_factor, float) or rope_scaling_factor <= 1.0:\n raise ValueError(f\"`rope_scaling`'s factor field must be an float > 1, got {rope_scaling_factor}\")" }, { "identifier": "LlamaForCausalLM", "path": "model/llama/modeling_llama.py", "snippet": "class LlamaForCausalLM(LlamaPreTrainedModel):\n _tied_weights_keys = [\"lm_head.weight\"]\n\n def __init__(self, config):\n super().__init__(config)\n self.model = LlamaModel(config)\n self.vocab_size = config.vocab_size\n self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)\n\n # Initialize weights and apply final processing\n self.post_init()\n\n def get_input_embeddings(self):\n return self.model.embed_tokens\n\n def set_input_embeddings(self, value):\n self.model.embed_tokens = value\n\n def get_output_embeddings(self):\n return self.lm_head\n\n def set_output_embeddings(self, new_embeddings):\n self.lm_head = new_embeddings\n\n def set_decoder(self, decoder):\n self.model = decoder\n\n def get_decoder(self):\n return self.model\n\n @add_start_docstrings_to_model_forward(LLAMA_INPUTS_DOCSTRING)\n @replace_return_docstrings(output_type=CausalLMOutputWithPast, config_class=_CONFIG_FOR_DOC)\n def forward(\n self,\n input_ids: torch.LongTensor = None,\n attention_mask: Optional[torch.Tensor] = None,\n position_ids: Optional[torch.LongTensor] = None,\n past_key_values: Optional[List[torch.FloatTensor]] = None,\n inputs_embeds: Optional[torch.FloatTensor] = None,\n labels: Optional[torch.LongTensor] = None,\n use_cache: Optional[bool] = None,\n output_attentions: Optional[bool] = None,\n output_hidden_states: Optional[bool] = None,\n return_dict: Optional[bool] = None,\n ) -> Union[Tuple, CausalLMOutputWithPast]:\n r\"\"\"\n Args:\n labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):\n Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,\n config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored\n (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.\n\n Returns:\n\n Example:\n\n ```python\n >>> from transformers import AutoTokenizer, LlamaForCausalLM\n\n >>> model = LlamaForCausalLM.from_pretrained(PATH_TO_CONVERTED_WEIGHTS)\n >>> tokenizer = AutoTokenizer.from_pretrained(PATH_TO_CONVERTED_TOKENIZER)\n\n >>> prompt = \"Hey, are you conscious? Can you talk to me?\"\n >>> inputs = tokenizer(prompt, return_tensors=\"pt\")\n\n >>> # Generate\n >>> generate_ids = model.generate(inputs.input_ids, max_length=30)\n >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]\n \"Hey, are you conscious? Can you talk to me?\\nI'm not conscious, but I can talk to you.\"\n ```\"\"\"\n\n output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions\n output_hidden_states = (\n output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states\n )\n return_dict = return_dict if return_dict is not None else self.config.use_return_dict\n\n # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)\n outputs = self.model(\n input_ids=input_ids,\n attention_mask=attention_mask,\n position_ids=position_ids,\n past_key_values=past_key_values,\n inputs_embeds=inputs_embeds,\n use_cache=use_cache,\n output_attentions=output_attentions,\n output_hidden_states=output_hidden_states,\n return_dict=return_dict,\n )\n\n hidden_states = outputs[0]\n if self.config.pretraining_tp > 1:\n lm_head_slices = self.lm_head.weight.split(self.vocab_size // self.config.pretraining_tp, dim=0)\n logits = [F.linear(hidden_states, lm_head_slices[i]) for i in range(self.config.pretraining_tp)]\n logits = torch.cat(logits, dim=-1)\n else:\n logits = self.lm_head(hidden_states)\n logits = logits.float()\n\n loss = None\n if labels is not None:\n # Shift so that tokens < n predict n\n shift_logits = logits[..., :-1, :].contiguous()\n shift_labels = labels[..., 1:].contiguous()\n # Flatten the tokens\n loss_fct = CrossEntropyLoss()\n shift_logits = shift_logits.view(-1, self.config.vocab_size)\n shift_labels = shift_labels.view(-1)\n # Enable model parallelism\n shift_labels = shift_labels.to(shift_logits.device)\n loss = loss_fct(shift_logits, shift_labels)\n\n if not return_dict:\n output = (logits,) + outputs[1:]\n return (loss,) + output if loss is not None else output\n\n return CausalLMOutputWithPast(\n loss=loss,\n logits=logits,\n past_key_values=outputs.past_key_values,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n def prepare_inputs_for_generation(\n self, input_ids, past_key_values=None, attention_mask=None, inputs_embeds=None, **kwargs\n ):\n if past_key_values:\n input_ids = input_ids[:, -1:]\n\n position_ids = kwargs.get(\"position_ids\", None)\n if attention_mask is not None and position_ids is None:\n # create position_ids on the fly for batch generation\n position_ids = attention_mask.long().cumsum(-1) - 1\n position_ids.masked_fill_(attention_mask == 0, 1)\n if past_key_values:\n position_ids = position_ids[:, -1].unsqueeze(-1)\n\n # if `inputs_embeds` are passed, we only want to use them in the 1st generation step\n if inputs_embeds is not None and past_key_values is None:\n model_inputs = {\"inputs_embeds\": inputs_embeds}\n else:\n model_inputs = {\"input_ids\": input_ids}\n\n model_inputs.update(\n {\n \"position_ids\": position_ids,\n \"past_key_values\": past_key_values,\n \"use_cache\": kwargs.get(\"use_cache\"),\n \"attention_mask\": attention_mask,\n }\n )\n return model_inputs\n\n @staticmethod\n def _reorder_cache(past_key_values, beam_idx):\n reordered_past = ()\n for layer_past in past_key_values:\n reordered_past += (\n tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),\n )\n return reordered_past" }, { "identifier": "LlamaTokenizer", "path": "model/llama/tokenization_llama.py", "snippet": "class LlamaTokenizer(PreTrainedTokenizer):\n \"\"\"\n Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding. The default padding token is unset as there is\n no padding token in the original model.\n\n Args:\n vocab_file (`str`):\n Path to the vocabulary file.\n legacy (`bool`, *optional*, defaults to `True`):\n Whether or not the `legacy` behaviour of the tokenizer should be used. Legacy is before the merge of #24622\n which includes fixes to properly handle tokens that appear after special tokens. A simple example:\n\n - `legacy=True`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=True)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\")\n [8774, 32099, 3, 5, 1]\n ```\n - `legacy=False`:\n ```python\n >>> from transformers import T5Tokenizer\n\n >>> tokenizer = T5Tokenizer.from_pretrained(\"t5-base\", legacy=False)\n >>> tokenizer.encode(\"Hello <extra_id_0>.\") # the extra space `[3]` is no longer here\n [8774, 32099, 5, 1]\n ```\n Checkout the pull request and the issue [here](https://github.com/huggingface/transformers/pull/24565) for\n more details.\n\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n pretrained_vocab_files_map = PRETRAINED_VOCAB_FILES_MAP\n max_model_input_sizes = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n pad_token=None,\n sp_model_kwargs: Optional[Dict[str, Any]] = None,\n add_bos_token=True,\n add_eos_token=False,\n clean_up_tokenization_spaces=False,\n legacy=None,\n **kwargs,\n ):\n self.sp_model_kwargs = {} if sp_model_kwargs is None else sp_model_kwargs\n bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token\n eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token\n unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token\n pad_token = AddedToken(pad_token, lstrip=False, rstrip=False) if isinstance(pad_token, str) else pad_token\n super().__init__(\n bos_token=bos_token,\n eos_token=eos_token,\n unk_token=unk_token,\n pad_token=pad_token,\n add_bos_token=add_bos_token,\n add_eos_token=add_eos_token,\n sp_model_kwargs=self.sp_model_kwargs,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n legacy=legacy,\n **kwargs,\n )\n if legacy is None:\n logger.warning_once(\n f\"You are using the default legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to\"\n \" read the related pull request available at https://github.com/huggingface/transformers/pull/24565, and set the legacy attribute accordingly.\"\n )\n legacy = True\n\n self.legacy = legacy\n self.vocab_file = vocab_file\n self.add_bos_token = add_bos_token\n self.add_eos_token = add_eos_token\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.Load(vocab_file)\n\n def __getstate__(self):\n state = self.__dict__.copy()\n state[\"sp_model\"] = None\n state[\"sp_model_proto\"] = self.sp_model.serialized_model_proto()\n return state\n\n def __setstate__(self, d):\n self.__dict__ = d\n self.sp_model = spm.SentencePieceProcessor(**self.sp_model_kwargs)\n self.sp_model.LoadFromSerializedProto(self.sp_model_proto)\n\n @property\n def vocab_size(self):\n \"\"\"Returns vocab size\"\"\"\n return self.sp_model.get_piece_size()\n\n def get_vocab(self):\n \"\"\"Returns vocab as a dict\"\"\"\n vocab = {self.convert_ids_to_tokens(i): i for i in range(self.vocab_size)}\n vocab.update(self.added_tokens_encoder)\n return vocab\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer.tokenize\n def tokenize(self, text: \"TextInput\", **kwargs) -> List[str]:\n # Replace the SPIECE_UNDERLINE with a space to make sure SPIECE_UNDERLINE is only used at\n # the beginning of the text\n if not self.legacy:\n text = SPIECE_UNDERLINE + text.replace(SPIECE_UNDERLINE, \" \")\n return super().tokenize(text, **kwargs)\n\n # Copied from transformers.models.t5.tokenization_t5.T5Tokenizer._tokenize\n def _tokenize(self, text, **kwargs):\n \"\"\"\n Returns a tokenized string.\n\n Since the sentencepiece internal model always adds a SPIECE_UNDERLINE, at the beginning of the provided text,\n we need to remove it by hand when the current text is a subsequence. This happens whenever the `self.tokenize`\n function is called with specials tokens: the input is split on the special tokens, and each subsequence is\n passed to `_tokenize`. Thus if a subsequence did not start with a `\" \"` or SPIECE_UNDERLINE, we have to remove\n the extra `SPIECE_UNDERLINE` prepended.\n \"\"\"\n if not self.legacy:\n is_first = text.startswith(SPIECE_UNDERLINE)\n if is_first:\n text = text[1:]\n\n tokens = self.sp_model.encode(text, out_type=str)\n\n if not self.legacy and not is_first and not text.startswith(\" \") and tokens[0].startswith(SPIECE_UNDERLINE):\n tokens = ([tokens[0][1:]] if len(tokens[0]) > 1 else []) + tokens[1:]\n return tokens\n\n def _convert_token_to_id(self, token):\n \"\"\"Converts a token (str) in an id using the vocab.\"\"\"\n return self.sp_model.piece_to_id(token)\n\n def _convert_id_to_token(self, index):\n \"\"\"Converts an index (integer) in a token (str) using the vocab.\"\"\"\n token = self.sp_model.IdToPiece(index)\n return token\n\n def convert_tokens_to_string(self, tokens):\n \"\"\"Converts a sequence of tokens (string) in a single string.\"\"\"\n current_sub_tokens = []\n out_string = \"\"\n prev_is_special = False\n for i, token in enumerate(tokens):\n # make sure that special tokens are not decoded using sentencepiece model\n if token in self.all_special_tokens:\n if not prev_is_special and i != 0:\n out_string += \" \"\n out_string += self.sp_model.decode(current_sub_tokens) + token\n prev_is_special = True\n current_sub_tokens = []\n else:\n current_sub_tokens.append(token)\n prev_is_special = False\n out_string += self.sp_model.decode(current_sub_tokens)\n return out_string\n\n def save_vocabulary(self, save_directory, filename_prefix: Optional[str] = None) -> Tuple[str]:\n \"\"\"\n Save the vocabulary and special tokens file to a directory.\n\n Args:\n save_directory (`str`):\n The directory in which to save the vocabulary.\n\n Returns:\n `Tuple(str)`: Paths to the files saved.\n \"\"\"\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file) and os.path.isfile(self.vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n elif not os.path.isfile(self.vocab_file):\n with open(out_vocab_file, \"wb\") as fi:\n content_spiece_model = self.sp_model.serialized_model_proto()\n fi.write(content_spiece_model)\n\n return (out_vocab_file,)\n\n def build_inputs_with_special_tokens(self, token_ids_0, token_ids_1=None):\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = bos_token_id + token_ids_0 + eos_token_id\n\n if token_ids_1 is not None:\n output = output + bos_token_id + token_ids_1 + eos_token_id\n\n return output\n\n def get_special_tokens_mask(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False\n ) -> List[int]:\n \"\"\"\n Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding\n special tokens using the tokenizer `prepare_for_model` method.\n\n Args:\n token_ids_0 (`List[int]`):\n List of IDs.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n already_has_special_tokens (`bool`, *optional*, defaults to `False`):\n Whether or not the token list is already formatted with special tokens for the model.\n\n Returns:\n `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.\n \"\"\"\n if already_has_special_tokens:\n return super().get_special_tokens_mask(\n token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True\n )\n\n bos_token_id = [1] if self.add_bos_token else []\n eos_token_id = [1] if self.add_eos_token else []\n\n if token_ids_1 is None:\n return bos_token_id + ([0] * len(token_ids_0)) + eos_token_id\n return (\n bos_token_id\n + ([0] * len(token_ids_0))\n + eos_token_id\n + bos_token_id\n + ([0] * len(token_ids_1))\n + eos_token_id\n )\n\n def create_token_type_ids_from_sequences(\n self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None\n ) -> List[int]:\n \"\"\"\n Creates a mask from the two sequences passed to be used in a sequence-pair classification task. An ALBERT\n sequence pair mask has the following format:\n\n ```\n 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1\n | first sequence | second sequence |\n ```\n\n if token_ids_1 is None, only returns the first portion of the mask (0s).\n\n Args:\n token_ids_0 (`List[int]`):\n List of ids.\n token_ids_1 (`List[int]`, *optional*):\n Optional second list of IDs for sequence pairs.\n\n Returns:\n `List[int]`: List of [token type IDs](../glossary#token-type-ids) according to the given sequence(s).\n \"\"\"\n bos_token_id = [self.bos_token_id] if self.add_bos_token else []\n eos_token_id = [self.eos_token_id] if self.add_eos_token else []\n\n output = [0] * len(bos_token_id + token_ids_0 + eos_token_id)\n\n if token_ids_1 is not None:\n output += [1] * len(bos_token_id + token_ids_1 + eos_token_id)\n\n return output\n\n def _build_conversation_input_ids(self, conversation: \"Conversation\") -> List[int]:\n r\"\"\"Builds the input ids for a conversation.\n This is the format used in the provided examples. System prompts should be manually added at the beginning of\n the conversation. If no system prompt is given, the `DEFAULT_SYSTEM_PROMPT` will be used.\n ```\n <bos>[INST] B_SYS SytemPrompt E_SYS Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST]\n ```\n\n If you want to use your own system prompt, make sure to use both `B_SYS` and `E_SYS` use the following:\n ```python\n >>> from transformers import Conversation\n\n >>> Conversation(\n ... \"<<SYS>>\\n Only answer with emojis, and charades\\n<</SYS>>\\n\\nHow can I build a house in 10 septs?\"\n ... ) # doctest: +IGNORE_RESULT\n ```\n Args:\n conversation (`Conversation`):\n Conversation to build input ids for.\n Returns:\n `List[int]`:\n Input ids for the conversation.\n \"\"\"\n if len(conversation.past_user_inputs) > 0:\n if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:\n conversation.past_user_inputs[0] = (\n B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]\n )\n elif conversation.new_user_input:\n if not conversation.new_user_input.startswith(B_SYS) or E_SYS not in conversation.new_user_input:\n conversation.new_user_input = B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.new_user_input\n else:\n raise ValueError(\"Last message must be from user\")\n\n dialogue = list(conversation.iter_texts())\n if not all([is_user for is_user, msg in dialogue[::2]]) or not all(\n [not is_user for is_user, msg in dialogue[1::2]]\n ):\n raise ValueError(\n \"The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)\"\n )\n\n dialog_tokens: List[int] = []\n dialog_tokens += sum(\n [\n [self.bos_token_id]\n + self.encode(\n f\"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} \", add_special_tokens=False\n )\n + [self.eos_token_id]\n for prompt, answer in zip(dialogue[::2], dialogue[1::2])\n ],\n [],\n )\n dialog_tokens += [self.bos_token_id] + self.encode(\n f\"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}\", add_special_tokens=False\n )\n return dialog_tokens" }, { "identifier": "LlamaTokenizerFast", "path": "model/llama/tokenization_llama_fast.py", "snippet": "class LlamaTokenizerFast(PreTrainedTokenizerFast):\n \"\"\"\n Construct a Llama tokenizer. Based on byte-level Byte-Pair-Encoding.\n\n This uses notably ByteFallback and no normalization.\n\n ```\n from transformers import LlamaTokenizerFast\n\n tokenizer = LlamaTokenizerFast.from_pretrained(\"hf-internal-testing/llama-tokenizer\")\n tokenizer.encode(\"Hello this is a test\")\n >>> [1, 15043, 445, 338, 263, 1243]\n ```\n\n If you want to change the `bos_token` or the `eos_token`, make sure to specify them when initializing the model, or\n call `tokenizer.update_post_processor()` to make sure that the post-processing is correctly done (otherwise the\n values of the first token and final token of an encoded sequence will not be correct). For more details, checkout\n [post-processors] (https://huggingface.co/docs/tokenizers/api/post-processors) documentation.\n\n\n This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should\n refer to this superclass for more information regarding those methods.\n\n Args:\n vocab_file (`str`):\n [SentencePiece](https://github.com/google/sentencepiece) file (generally has a .model extension) that\n contains the vocabulary necessary to instantiate a tokenizer.\n tokenizer_file (`str`):\n [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that\n contains everything needed to load the tokenizer.\n\n clean_up_tokenization_spaces (`str`, *optional*, defaults to `False`):\n Wether to cleanup spaces after decoding, cleanup consists in removing potential artifacts like extra\n spaces.\n\n bos_token (`str`, *optional*, defaults to `\"<s>\"`):\n The beginning of sequence token that was used during pretraining. Can be used a sequence classifier token.\n\n eos_token (`str`, *optional*, defaults to `\"</s>\"`):\n The end of sequence token.\n\n unk_token (`str`, *optional*, defaults to `\"<unk>\"`):\n The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this\n token instead.\n \"\"\"\n\n vocab_files_names = VOCAB_FILES_NAMES\n slow_tokenizer_class = LlamaTokenizer\n padding_side = \"left\"\n model_input_names = [\"input_ids\", \"attention_mask\"]\n\n def __init__(\n self,\n vocab_file=None,\n tokenizer_file=None,\n clean_up_tokenization_spaces=False,\n unk_token=\"<unk>\",\n bos_token=\"<s>\",\n eos_token=\"</s>\",\n add_bos_token=True,\n add_eos_token=False,\n **kwargs,\n ):\n super().__init__(\n vocab_file=vocab_file,\n tokenizer_file=tokenizer_file,\n clean_up_tokenization_spaces=clean_up_tokenization_spaces,\n unk_token=unk_token,\n bos_token=bos_token,\n eos_token=eos_token,\n **kwargs,\n )\n self._add_bos_token = add_bos_token\n self._add_eos_token = add_eos_token\n self.update_post_processor()\n\n self.vocab_file = vocab_file\n self.can_save_slow_tokenizer = False if not self.vocab_file else True\n\n def update_post_processor(self):\n \"\"\"\n Updates the underlying post processor with the current `bos_token` and `eos_token`.\n \"\"\"\n bos = self.bos_token\n bos_token_id = self.bos_token_id\n\n eos = self.eos_token\n eos_token_id = self.eos_token_id\n\n single = f\"{(bos+':0 ') * self.add_bos_token}$A:0{(' '+eos+':0') * self.add_eos_token}\"\n pair = f\"{single}{(' '+bos+':1') * self.add_bos_token} $B:1{(' '+eos+':1') * self.add_eos_token}\"\n\n special_tokens = []\n if self.add_bos_token:\n special_tokens.append((bos, bos_token_id))\n if self.add_eos_token:\n special_tokens.append((eos, eos_token_id))\n self._tokenizer.post_processor = processors.TemplateProcessing(\n single=single, pair=pair, special_tokens=special_tokens\n )\n\n @property\n def add_eos_token(self):\n return self._add_eos_token\n\n @property\n def add_bos_token(self):\n return self._add_bos_token\n\n @add_eos_token.setter\n def add_eos_token(self, value):\n self._add_eos_token = value\n self.update_post_processor()\n\n @add_bos_token.setter\n def add_bos_token(self, value):\n self._add_bos_token = value\n self.update_post_processor()\n\n def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:\n if not self.can_save_slow_tokenizer:\n raise ValueError(\n \"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow \"\n \"tokenizer.\"\n )\n\n if not os.path.isdir(save_directory):\n logger.error(f\"Vocabulary path ({save_directory}) should be a directory\")\n return\n out_vocab_file = os.path.join(\n save_directory, (filename_prefix + \"-\" if filename_prefix else \"\") + VOCAB_FILES_NAMES[\"vocab_file\"]\n )\n\n if os.path.abspath(self.vocab_file) != os.path.abspath(out_vocab_file):\n copyfile(self.vocab_file, out_vocab_file)\n\n return (out_vocab_file,)\n\n def _build_conversation_input_ids(self, conversation: \"Conversation\"):\n \"\"\"Builds the input ids for a conversation.\n This is the format used in the provided examples. System prompts should be manually added at the beginning of\n the conversation. If no system prompt is given, the `DEFAULT_SYSTEM_PROMPT` will be used.\n ```\n <bos>[INST] B_SYS SytemPrompt E_SYS Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST] Answer <eos>\n <bos>[INST] Prompt [/INST]\n ```\n\n If you want to use your own system prompt, make sure to use both `B_SYS` and `E_SYS` use the following:\n ```python\n >>> from transformers import Conversation\n\n >>> Conversation(\n ... \"<<SYS>>\\n Only answer with emojis, and charades\\n<</SYS>>\\n\\nHow can I build a house in 10 septs?\"\n ... )\n ```\n Args:\n conversation (`Conversation`):\n Conversation to build input ids for.\n Returns:\n `List[int]`:\n Input ids for the conversation.\n \"\"\"\n if len(conversation.past_user_inputs) > 0:\n if not conversation.past_user_inputs[0].startswith(B_SYS) or E_SYS not in conversation.past_user_inputs[0]:\n conversation.past_user_inputs[0] = (\n B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.past_user_inputs[0]\n )\n elif conversation.new_user_input:\n if not conversation.new_user_input.startswith(B_SYS) or E_SYS not in conversation.new_user_input:\n conversation.new_user_input = B_SYS + DEFAULT_SYSTEM_PROMPT + E_SYS + conversation.new_user_input\n else:\n raise ValueError(\"Last message must be from user\")\n\n dialogue = list(conversation.iter_texts())\n if not all([is_user for is_user, msg in dialogue[::2]]) or not all(\n [not is_user for is_user, msg in dialogue[1::2]]\n ):\n raise ValueError(\n \"The model only supports 'user' and 'assistant' roles, starting with user and alternating (u/a/u/a/u...)\"\n )\n\n dialog_tokens = []\n dialog_tokens += sum(\n [\n [self.bos_token_id]\n + self.encode(\n f\"{B_INST} {(prompt[1]).strip()} {E_INST} {(answer[1]).strip()} \", add_special_tokens=False\n )\n + [self.eos_token_id]\n for prompt, answer in zip(dialogue[::2], dialogue[1::2])\n ],\n [],\n )\n dialog_tokens += [self.bos_token_id] + self.encode(\n f\"{B_INST} {(dialogue[-1][1]).strip()} {E_INST}\", add_special_tokens=False\n )\n return dialog_tokens" }, { "identifier": "print_rank_0", "path": "utils/common_utils.py", "snippet": "def print_rank_0(*message):\n \"\"\"If distributed is initialized print only on rank 0.\"\"\"\n if torch.distributed.is_initialized():\n if torch.distributed.get_rank() == 0:\n print(*message, flush=True)\n else:\n print(*message, flush=True)" }, { "identifier": "is_old_version", "path": "utils/common_utils.py", "snippet": "def is_old_version(path):\n new_vocab_files = ['merge.model']\n new_vocab_file_exists = []\n for filename in new_vocab_files:\n if not os.path.exists(os.path.join(path, filename)):\n new_vocab_file_exists.append(False)\n else:\n new_vocab_file_exists.append(True)\n if all(new_vocab_file_exists):\n return False\n if any(new_vocab_file_exists):\n return 'new_version_file_absent'\n else:\n return True" }, { "identifier": "build_tokenizer", "path": "tokenizer/tokenizer.py", "snippet": "def build_tokenizer(args):\n \"\"\"Initialize tokenizer.\"\"\"\n print_rank_0(\"> building {} tokenizer ...\".format(args.tokenizer_type))\n # if args.rank == 0:\n # print(\"> building {} tokenizer ...\".format(args.tokenizer_type), flush=True)\n\n # Select and instantiate the tokenizer.\n if args.tokenizer_type.lower() == \"GPT2BPETokenizer\".lower():\n assert args.vocab_file is not None\n assert args.merge_file is not None\n tokenizer = _GPT2BPETokenizer(args.vocab_file, args.merge_file)\n elif args.tokenizer_type.lower() == \"SPMTokenizer\".lower():\n assert args.vocab_file is not None\n tokenizer = SentencePieceTokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"HFTokenizer\".lower():\n assert args.vocab_file is not None\n tokenizer = HFTokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"HFGPT2Tokenizer\".lower():\n if args.vocab_file is None:\n print(\n \"WARNING: No vocab file found, loading Huggingface's pretrained GPT2Tokenizer\"\n )\n tokenizer = HFGPT2Tokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"CharLevelTokenizer\".lower():\n tokenizer = CharLevelTokenizer(vocab_size=512)\n elif args.tokenizer_type.lower() == \"TiktokenTokenizer\".lower():\n assert args.vocab_file is not None\n tokenizer = TiktokenTokenizer(args.vocab_file)\n elif args.tokenizer_type.lower() == \"GLMTokenizer\".lower():\n if is_old_version(args.pretrained_model_path):\n print('is an old version')\n from model.glm.tokenization_glm_deprecated import GLMChineseTokenizer\n args.glm_mask = '[sMASK]'\n old_version_tokenizer = True\n tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)\n else:\n print('is not an old version')\n old_version_tokenizer = False\n tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path, trust_remote_code=True)\n else:\n raise NotImplementedError(\n \"{} tokenizer is not \" \"implemented.\".format(args.tokenizer_type)\n )\n\n # Add vocab size.\n args.padded_vocab_size = _vocab_size_with_padding(tokenizer.vocab_size, args)\n\n return tokenizer" }, { "identifier": "HFTokenizer", "path": "tokenizer/tokenizer.py", "snippet": "class HFTokenizer(AbstractTokenizer):\n \"\"\"Designed to Integrate HF's Tokenizer library.\"\"\"\n\n def __init__(self, vocab_file):\n name = \"HFTokenizer\"\n super().__init__(name)\n\n self.tokenizer = Tokenizer.from_file(vocab_file)\n # self.eod_id = self.tokenizer.token_to_id(\"<|endoftext|>\")\n self.eod_id = self.tokenizer.token_to_id(\"<|end|>\")\n # self.pad_id = self.tokenizer.token_to_id(\"<|padding|>\")\n \n # 新词表没有<|padding|>, 用<|extratoken_1|>代替,和tokenization一致\n # self.pad_id = self.tokenizer.token_to_id(\"<|extratoken_1|>\")\n self.pad_id = self.tokenizer.token_to_id(\"<|pad|>\")\n\n @property\n def vocab_size(self):\n return self.tokenizer.get_vocab_size()\n\n @property\n def vocab(self):\n return self.tokenizer.get_vocab()\n\n @property\n def inv_vocab(self):\n return self.tokenizer.decoder\n\n def tokenize(self, text: str):\n return self.tokenizer.encode(text).ids\n\n def tokenize_batch(self, text_batch: Union[List[str], str]):\n return self.tokenizer.encode_batch(text_batch)\n\n def detokenize(self, token_ids):\n return self.tokenizer.decode(token_ids)\n\n @property\n def eod(self):\n return self.eod_id" }, { "identifier": "prepare_model_for_kbit_training", "path": "model/peft/utils/others.py", "snippet": "def prepare_model_for_kbit_training(model, use_gradient_checkpointing=True):\n r\"\"\"\n This method wraps the entire protocol for preparing a model before running a training. This includes:\n 1- Cast the layernorm in fp32 2- making output embedding layer require grads 3- Add the upcasting of the lm\n head to fp32\n\n Args:\n model, (`transformers.PreTrainedModel`):\n The loaded model from `transformers`\n \"\"\"\n loaded_in_kbit = getattr(model, \"is_loaded_in_8bit\", False) or getattr(model, \"is_loaded_in_4bit\", False)\n\n for name, param in model.named_parameters():\n # freeze base model's layers\n param.requires_grad = False\n \n # cast all non INT8 parameters to fp32\n for param in model.parameters():\n if (param.dtype == torch.float16) or (param.dtype == torch.bfloat16):\n param.data = param.data.to(torch.float32)\n \n if loaded_in_kbit and use_gradient_checkpointing:\n # For backward compatibility\n if hasattr(model, \"enable_input_require_grads\"):\n model.enable_input_require_grads()\n else:\n \n def make_inputs_require_grad(module, input, output):\n output.requires_grad_(True)\n\n model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)\n\n # enable gradient checkpointing for memory efficiency\n model.gradient_checkpointing_enable()\n\n return model" }, { "identifier": "AdaLoraConfig", "path": "model/peft/tuner/adalora.py", "snippet": "class AdaLoraConfig(LoraConfig):\n \"\"\"\n This is the configuration class to store the configuration of a [`~peft.AdaLora`].\n\n Args:\n target_r (`int`): The target average rank of incremental matrix.\n init_r (`int`): The initial rank for each incremental matrix.\n tinit (`int`): The steps of initial fine-tuning warmup.\n tfinal (`int`): The step of final fine-tuning.\n deltaT (`int`): The time internval between two budget allocations.\n beta1 (`float`): The hyperparameter of EMA for sensitivity smoothing.\n beta2 (`float`): The hyperparameter of EMA for undertainty quantification.\n orth_reg_weight (`float`): The coefficient of orthogonal regularization.\n total_step (`int`): The total training steps that should be specified before training.\n rank_pattern (`list`): The allocated rank for each weight matrix by RankAllocator.\n \"\"\"\n\n target_r: int = field(default=8, metadata={\"help\": \"Target Lora matrix dimension.\"})\n init_r: int = field(default=12, metadata={\"help\": \"Intial Lora matrix dimension.\"})\n tinit: int = field(default=0, metadata={\"help\": \"The steps of initial warmup.\"})\n tfinal: int = field(default=0, metadata={\"help\": \"The steps of final warmup.\"})\n deltaT: int = field(default=1, metadata={\"help\": \"Step interval of rank allocation.\"})\n beta1: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n beta2: float = field(default=0.85, metadata={\"help\": \"Hyperparameter of EMA.\"})\n orth_reg_weight: float = field(default=0.5, metadata={\"help\": \"The orthogonal regularization coefficient.\"})\n total_step: Optional[int] = field(default=None, metadata={\"help\": \"The total training steps.\"})\n rank_pattern: Optional[dict] = field(default=None, metadata={\"help\": \"The saved rank pattern.\"})\n init_lora_weights: bool = field(\n default=True,\n metadata={\"help\": \"Whether to initialize the weights of the Lora layers.\"},\n )\n\n def __post_init__(self):\n self.peft_type = PeftType.ADALORA" } ]
import os import torch import sys import peft import model.peft.modeling_peft # noqa import bitsandbytes as bnb # noqa import accelerate # noqa from utils.common_utils import get_model_params_num from transformers import ( # noqa: E402 CONFIG_MAPPING, AutoConfig, AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast ) from .gpt_neox.configuration_gpt_neox import GPTNeoXConfig from .gpt_neox.modeling_gpt_neox import GPTNeoXForCausalLM from .gpt_neox.tokenization_gpt_neox_fast import GPTNeoXTokenizerFast from .llama.configuration_llama import LlamaConfig from .llama.modeling_llama import LlamaForCausalLM from .llama.tokenization_llama import LlamaTokenizer from .llama.tokenization_llama_fast import LlamaTokenizerFast from torch.distributed.fsdp import ( FullyShardedDataParallel as FSDP, StateDictType, ) from utils.common_utils import print_rank_0, is_old_version from tokenizer import build_tokenizer from tokenizer.tokenizer import HFTokenizer from peft.tuners.lora import LoraLayer from model.peft.utils import prepare_model_for_kbit_training from peft import ( # noqa LoraConfig, PrefixTuningConfig, PromptEncoderConfig, PromptEncoderReparameterizationType, PromptTuningConfig, PromptTuningInit, TaskType, get_peft_model ) from model.peft.tuner import AdaLoraConfig from transformers import BitsAndBytesConfig from packaging import version from .glm.tokenization_glm_deprecated import GLMChineseTokenizer
17,414
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox':
# coding=utf-8 # Copyright (c) 2023 Ant Group. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. sys.path.append("..") # from .glm.modeling_glm import GLMForConditionalGeneration # from .glm.configuration_glm import GLMConfig # from .glm.tokenization_glm import GLMTokenizer try: except ImportError: BitsAndBytesConfig = None try: except ImportError: bnb = None def find_all_linear_names(args, model): cls = bnb.nn.Linear4bit if args.bits == 4 else (bnb.nn.Linear8bitLt if args.bits == 8 else torch.nn.Linear) lora_module_names = set() for name, module in model.named_modules(): if isinstance(module, cls): names = name.split('.') lora_module_names.add(names[0] if len(names) == 1 else names[-1]) if 'lm_head' in lora_module_names: # needed for 16-bit lora_module_names.remove('lm_head') return list(lora_module_names) def setup_model(args, logger, use_cache=False): # Load pretrained model and tokenizer if args.pretrained_model_path: # TODO: 实现from pretrained读tokenizer if args.model_type == 'gpt_neox': # if args.tokenizer_type: # tokenizer = build_tokenizer(args) # tokenizer.eod_token = "<|endoftext|>" # tokenizer.pad_token = "<|pad|>" # # tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset # # tokenizer.eop_token = "<|endoftext|>" # tokenizer.eod_id = tokenizer.tokenize(tokenizer.eod_token)[0] # tokenizer.pad_id = tokenizer.tokenize(tokenizer.pad_token)[0] # else: tokenizer = GPTNeoXTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') elif args.model_type == 'llama': tokenizer = LlamaTokenizerFast.from_pretrained(args.pretrained_model_path) # tokenizer = AutoTokenizer.from_pretrained( # args.pretrained_model_path, # trust_remote_code=True, # ) tokenizer.eod_token = "</s>" tokenizer.eos_token = "</s>" tokenizer.bos_token = "<s>" tokenizer.pad_token = "[PAD]" tokenizer.unk_token = "<unk>" tokenizer.sop_token = "</s>" # 适配multi task dataset tokenizer.eop_token = "</s>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.eos_id = tokenizer.convert_tokens_to_ids(tokenizer.eos_token) tokenizer.bos_id = tokenizer.convert_tokens_to_ids(tokenizer.bos_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) tokenizer.unk_id = tokenizer.convert_tokens_to_ids(tokenizer.unk_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.eos_token} id: {tokenizer.eos_id}') print_rank_0(f'tokenizer {tokenizer.bos_token} id: {tokenizer.bos_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') print_rank_0(f'tokenizer {tokenizer.unk_token} id: {tokenizer.unk_id}') elif args.model_type == 'glm': if is_old_version(args.pretrained_model_path): tokenizer = GLMChineseTokenizer.from_pretrained(args.pretrained_model_path) else: tokenizer = GLMTokenizer.from_pretrained(args.pretrained_model_path) elif args.train_mode == 'sst': # tokenizer = build_tokenizer(args) tokenizer = PreTrainedTokenizerFast(tokenizer_file=args.vocab_file) tokenizer.eod_token = "<|endoftext|>" tokenizer.pad_token = "<|pad|>" tokenizer.sop_token = "<|endoftext|>" # 适配multi task dataset tokenizer.eop_token = "<|endoftext|>" tokenizer.eod_id = tokenizer.convert_tokens_to_ids(tokenizer.eod_token) tokenizer.pad_id = tokenizer.convert_tokens_to_ids(tokenizer.pad_token) print_rank_0(f'tokenizer {tokenizer.eod_token} id: {tokenizer.eod_id}') print_rank_0(f'tokenizer {tokenizer.pad_token} id: {tokenizer.pad_id}') else: raise ValueError( "You are instantiating a new tokenizer from scratch. This is not supported by this script." "You can do it from another script, save it, and load it from here, using --tokenizer_path." ) if args.model_type == 'gpt_neox':
auto_config = GPTNeoXConfig
1
2023-11-02 01:37:01+00:00
24k
bytedance/cryostar
projects/star/train_atom.py
[ { "identifier": "SpatialGridTranslate", "path": "cryostar/utils/transforms.py", "snippet": "class SpatialGridTranslate(torch.nn.Module):\n\n def __init__(self, D, device=None) -> None:\n super().__init__()\n self.D = D\n # yapf: disable\n coords = torch.stack(torch.meshgrid([\n torch.linspace(-1.0, 1.0, self.D, device=device),\n torch.linspace(-1.0, 1.0, self.D, device=device)],\n indexing=\"ij\"), dim=-1).reshape(-1, 2)\n # yapf: enable\n self.register_buffer(\"coords\", coords)\n\n def transform(self, images: torch.Tensor, trans: torch.Tensor):\n \"\"\"\n The `images` are stored in `YX` mode, so the `trans` is also `YX`!\n\n Supposing that D is 96, a point is at 0.0:\n - adding 48 should move it to the right corner which is 1.0\n 1.0 = 0.0 + 48 / (96 / 2)\n - adding 96(>48) should leave it at 0.0\n 0.0 = 0.0 + 96 / (96 / 2) - 2.0\n - adding -96(<48) should leave it at 0.0\n 0.0 = 0.0 - 96 / (96 / 2) + 2.0\n\n Input:\n images: (B, NY, NX)\n trans: (B, T, 2)\n\n Returns:\n images: (B, T, NY, NX)\n \"\"\"\n B, NY, NX = images.shape\n assert self.D == NY == NX\n assert images.shape[0] == trans.shape[0]\n\n grid = einops.rearrange(self.coords, \"N C2 -> 1 1 N C2\") - \\\n einops.rearrange(trans, \"B T C2 -> B T 1 C2\") * 2 / self.D\n grid = grid.flip(-1) # convert the first axis from slow-axis to fast-axis\n grid[grid >= 1] -= 2\n grid[grid <= -1] += 2\n grid.clamp_(-1.0, 1.0)\n\n sampled = F.grid_sample(einops.rearrange(images, \"B NY NX -> B 1 NY NX\"), grid, align_corners=True)\n\n sampled = einops.rearrange(sampled, \"B 1 T (NY NX) -> B T NY NX\", NX=NX, NY=NY)\n return sampled" }, { "identifier": "StarfileDataSet", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDataSet(Dataset):\n\n def __init__(self, cfg: StarfileDatasetConfig):\n super().__init__()\n self.cfg = cfg\n self.df = starfile.read(Path(cfg.starfile_path))\n\n if \"optics\" in self.df:\n optics_df = self.df[\"optics\"]\n particles_df = self.df[\"particles\"]\n else:\n optics_df = None\n particles_df = self.df\n self.particles_df = particles_df\n\n if cfg.apix is None:\n if optics_df is not None and \"rlnImagePixelSize\" in optics_df:\n self.apix = float(optics_df[\"rlnImagePixelSize\"][0])\n print(f\"Infer dataset apix={self.apix} from first optic group.\")\n elif \"rlnDetectorPixelSize\" in particles_df and \"rlnMagnification\" in particles_df:\n self.apix = float(particles_df[\"rlnDetectorPixelSize\"][0] / particles_df[\"rlnMagnification\"][0] * 1e4)\n print(f\"Infer dataset apix={self.apix} from first particle meta data.\")\n else:\n raise AttributeError(\"Cannot parse apix from starfile, please set it in config by hand.\")\n else:\n self.apix = cfg.apix\n\n if cfg.side_shape is None:\n tmp_mrc_path = osp.join(cfg.dataset_dir, particles_df[\"rlnImageName\"][0].split('@')[-1])\n with mrcfile.mmap(tmp_mrc_path, mode=\"r\", permissive=True) as m:\n self.side_shape = m.data.shape[-1]\n print(f\"Infer dataset side_shape={self.side_shape} from the 1st particle.\")\n else:\n self.side_shape = cfg.side_shape\n\n self.num_proj = len(particles_df)\n\n self.down_side_shape = self.side_shape\n if cfg.down_side_shape is not None:\n self.down_side_shape = cfg.down_side_shape\n\n if cfg.mask_rad is not None:\n self.mask = Mask(self.down_side_shape, cfg.mask_rad)\n\n self.f_mu = None\n self.f_std = None\n\n def __len__(self):\n return self.num_proj\n\n def estimate_normalization(self):\n if self.f_mu is None and self.f_std is None:\n f_sub_data = []\n # I have checked that the standard deviation of 10/100/1000 particles is similar\n for i in range(0, len(self), len(self) // 100):\n f_sub_data.append(self[i][\"fproj\"])\n f_sub_data = torch.cat(f_sub_data, dim=0)\n # self.f_mu = torch.mean(f_sub_data)\n self.f_mu = 0.0 # just follow cryodrgn\n self.f_std = torch.std(f_sub_data).item()\n else:\n raise Exception(\"The normalization factor has been estimated!\")\n\n def __getitem__(self, idx):\n item_row = self.particles_df.iloc[idx]\n try:\n img_name_raw = item_row[\"rlnImageName\"]\n in_mrc_idx, img_name = item_row[\"rlnImageName\"].split(\"@\")\n in_mrc_idx = int(in_mrc_idx) - 1\n mrc_path = osp.join(self.cfg.dataset_dir, img_name)\n with mrcfile.mmap(mrc_path, mode=\"r\", permissive=True) as mrc:\n if mrc.data.ndim > 2:\n proj = torch.from_numpy(np.array(mrc.data[in_mrc_idx])).float() * self.cfg.scale_images\n else:\n # the mrcs file can contain only one particle\n proj = torch.from_numpy(np.array(mrc.data)).float() * self.cfg.scale_images\n\n # get (1, side_shape, side_shape) proj\n if len(proj.shape) == 2:\n proj = proj[None, :, :] # add a dummy channel (for consistency w/ img fmt)\n else:\n assert len(proj.shape) == 3 and proj.shape[0] == 1 # some starfile already have a dummy channel\n\n # down-sample\n if self.down_side_shape != self.side_shape:\n if self.cfg.down_method == \"interp\":\n proj = tvf.resize(proj, [self.down_side_shape, ] * 2, antialias=True)\n elif self.cfg.down_method == \"fft\":\n proj = downsample_2d(proj[0, :, :], self.down_side_shape)[None, :, :]\n else:\n raise NotImplementedError\n\n if self.cfg.mask_rad is not None:\n proj = self.mask(proj)\n\n except Exception as e:\n print(f\"WARNING: Particle image {img_name_raw} invalid! Setting to zeros.\")\n print(e)\n proj = torch.zeros(1, self.down_side_shape, self.down_side_shape)\n\n if self.cfg.power_images != 1.0:\n proj *= self.cfg.power_images\n\n # Generate CTF from CTF paramaters\n defocusU = torch.from_numpy(np.array(item_row[\"rlnDefocusU\"] / 1e4, ndmin=2)).float()\n defocusV = torch.from_numpy(np.array(item_row[\"rlnDefocusV\"] / 1e4, ndmin=2)).float()\n angleAstigmatism = torch.from_numpy(np.radians(np.array(item_row[\"rlnDefocusAngle\"], ndmin=2))).float()\n\n # Read \"GT\" orientations\n if self.cfg.ignore_rots:\n rotmat = torch.eye(3).float()\n else:\n # yapf: disable\n rotmat = torch.from_numpy(euler_angles2matrix(\n np.radians(-item_row[\"rlnAngleRot\"]),\n # np.radians(particle[\"rlnAngleTilt\"]) * (-1 if self.cfg.invert_hand else 1),\n np.radians(-item_row[\"rlnAngleTilt\"]),\n np.radians(-item_row[\"rlnAnglePsi\"]))\n ).float()\n # yapf: enable\n\n # Read \"GT\" shifts\n if self.cfg.ignore_trans:\n shiftX = torch.tensor([0.])\n shiftY = torch.tensor([0.])\n else:\n # support early starfile formats\n # Particle translations used to be in pixels (rlnOriginX and rlnOriginY) but this changed to Angstroms\n # (rlnOriginXAngstrom and rlnOriginYAngstrom) in relion 3.1.\n # https://relion.readthedocs.io/en/release-3.1/Reference/Conventions.html\n if \"rlnOriginXAngst\" in item_row:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginXAngst\"], dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginYAngst\"], dtype=np.float32))\n else:\n shiftX = torch.from_numpy(np.array(item_row[\"rlnOriginX\"] * self.apix, dtype=np.float32))\n shiftY = torch.from_numpy(np.array(item_row[\"rlnOriginY\"] * self.apix, dtype=np.float32))\n\n fproj = primal_to_fourier_2d(proj)\n\n if self.f_mu is not None:\n fproj = (fproj - self.f_mu) / self.f_std\n proj = fourier_to_primal_2d(fproj).real\n\n in_dict = {\n \"proj\": proj,\n \"rotmat\": rotmat,\n \"defocusU\": defocusU,\n \"defocusV\": defocusV,\n \"shiftX\": shiftX,\n \"shiftY\": shiftY,\n \"angleAstigmatism\": angleAstigmatism,\n \"idx\": torch.tensor(idx, dtype=torch.long),\n \"fproj\": fproj,\n \"imgname_raw\": img_name_raw\n }\n\n if \"rlnClassNumber\" in item_row:\n in_dict[\"class_id\"] = item_row[\"rlnClassNumber\"]\n\n return in_dict" }, { "identifier": "StarfileDatasetConfig", "path": "cryostar/utils/dataio.py", "snippet": "class StarfileDatasetConfig:\n dataset_dir: str\n starfile_path: str\n # if is not specified, the following apix, and side_shape will be inferred from starfile\n apix: float = None\n side_shape: int = None\n # down-sample the original image or not\n down_side_shape: int = None\n down_method: str = \"interp\"\n # apply a circular mask on input image or not\n mask_rad: float = None\n # change image values\n scale_images: float = 1.0\n power_images: float = field(\n default=1.0,\n metadata={\"help\": \"Change the power of the signal by multiplying a constant number.\"})\n # ignore pose from starfile or not\n ignore_trans: bool = False\n ignore_rots: bool = False\n # invert_hand: bool = field(\n # default=False,\n # metadata={\"help\": \"Invert handedness when reading relion data.\"})" }, { "identifier": "Mask", "path": "cryostar/utils/dataio.py", "snippet": "class Mask(torch.nn.Module):\n\n def __init__(self, im_size, rad):\n super(Mask, self).__init__()\n\n mask = torch.lt(torch.linspace(-1, 1, im_size)[None]**2 + torch.linspace(-1, 1, im_size)[:, None]**2, rad**2)\n # float for pl ddp broadcast compatible\n self.register_buffer('mask', mask.float())\n self.num_masked = torch.sum(mask).item()\n\n def forward(self, x):\n return x * self.mask" }, { "identifier": "CTFRelion", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFRelion(CTFBase):\n \"\"\"\n BUG: There are two bugs in this file:\n 1. `self.angleFrequency` has some error for even-sized grid.\n 2. `local_defocus` in `get_ctf()` has some error, `angleAstigmatism` should be\n replaced with `defocusU - defocusV`.\n\n The bugs will not affect real-world data too much. But you may encounter some issues\n on simulated datasets. Use CTFCryoDRGN instead.\n \"\"\"\n\n def __init__(self,\n size=257,\n resolution=0.8,\n kV=300.0,\n valueNyquist=1.,\n defocusU=1.,\n defocusV=1.,\n angleAstigmatism=0.,\n cs=2.7,\n phasePlate=0.,\n amplitudeContrast=.1,\n bFactor=0.,\n num_particles=500,\n requires_grad=False,\n precompute=False,\n flip_images=False):\n super(CTFRelion, self).__init__(resolution, num_particles, requires_grad)\n self.requires_grad = requires_grad\n self.flip_images = flip_images\n\n self.size = size # in pixel\n self.resolution = resolution # in angstrom\n self.kV = kV # in kilovolt\n\n self.valueNyquist = valueNyquist\n self.phasePlate = phasePlate / 180. * np.pi # in radians (converted from degrees)\n self.amplitudeContrast = amplitudeContrast\n self.bFactor = bFactor\n\n self.frequency = 1. / self.resolution\n\n self.wavelength = self._get_ewavelength(self.kV * 1e3) # input in V (so we convert kv*1e3)\n\n angleAstigmatism = angleAstigmatism / 180. * np.pi # input in degree converted in radian\n cs = cs * 1e7 # input in mm converted in angstrom\n # the angleAstigmatism, defocusU, defocusV and cs are nn.Parameter of size (N, 1, 1)\n self.angleAstigmatism = nn.Parameter(angleAstigmatism * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.cs = nn.Parameter(cs * torch.ones((num_particles, 1, 1), dtype=torch.float32), requires_grad=requires_grad)\n self.defocusU = nn.Parameter(defocusU * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n self.defocusV = nn.Parameter(defocusV * torch.ones((num_particles, 1, 1), dtype=torch.float32),\n requires_grad=requires_grad)\n\n self.precomputed_filters = precompute\n\n ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n self.register_buffer(\"r2\", mx**2 + my**2)\n self.register_buffer(\"r\", torch.sqrt(self.r2))\n self.register_buffer(\"angleFrequency\", torch.atan2(my, mx))\n\n if not self.requires_grad and self.precomputed_filters:\n print(\"Precomputing hFourier in CTF\")\n self.register_buffer('hFourier', self.get_ctf(torch.arange(num_particles), num_particles))\n\n def _get_ewavelength(self, U):\n # assumes V as input, returns wavelength in angstrom\n h = scipy.constants.h\n e = scipy.constants.e\n c = scipy.constants.c\n m0 = scipy.constants.m_e\n\n return h / math.sqrt(2. * m0 * e * U) / math.sqrt(1 + e * U / (2 * m0 * c**2)) * 1e10\n\n def get_ctf(self, idcs, B, cpu_params={}, frequency_marcher=None):\n defocusU = self.defocusU[idcs, :, :]\n defocusV = self.defocusV[idcs, :, :]\n angleAstigmatism = self.angleAstigmatism[idcs, :, :]\n cs = self.cs[idcs, :, :]\n\n ac = self.amplitudeContrast\n pc = math.sqrt(1. - ac**2)\n K1 = np.pi / 2. * cs * self.wavelength**3\n K2 = np.pi * self.wavelength\n\n # Cut-off from frequency marcher\n if frequency_marcher is not None:\n self.size_after_fm = 2 * frequency_marcher.f + 1\n if self.size_after_fm > self.size:\n self.size_after_fm = self.size\n angleFrequency = frequency_marcher.cut_coords_plane(self.angleFrequency.reshape(\n self.size, self.size, 1)).reshape(self.size_after_fm, self.size_after_fm)\n r2 = frequency_marcher.cut_coords_plane(self.r2.reshape(self.size, self.size,\n 1)).reshape(self.size_after_fm, self.size_after_fm)\n else:\n self.size_after_fm = self.size\n angleFrequency = self.angleFrequency\n r2 = self.r2\n\n angle = angleFrequency - angleAstigmatism\n local_defocus = 1e4 * (defocusU + defocusV) / 2. + angleAstigmatism * torch.cos(2. * angle)\n\n gamma = K1 * r2**2 - K2 * r2 * local_defocus - self.phasePlate\n hFourier = -pc * torch.sin(gamma) + ac * torch.cos(gamma)\n\n if self.valueNyquist != 1:\n decay = np.sqrt(-np.log(self.valueNyquist)) * 2. * self.resolution\n envelope = torch.exp(-self.frequency * decay**2 * r2)\n hFourier *= envelope\n\n return hFourier\n\n def oversample_multiply_crop(self, x_fourier, hFourier):\n # we assume that the shape of the CTF is always going to be bigger\n # than the size of the input image\n input_sz = x_fourier.shape[-1]\n if input_sz != self.size_after_fm:\n x_primal = fourier_to_primal_2d(x_fourier)\n\n pad_len = (self.size_after_fm - x_fourier.shape[-1]) // 2 # here we assume even lengths\n p2d = (pad_len, pad_len, pad_len, pad_len)\n x_primal_padded = F.pad(x_primal, p2d, 'constant', 0)\n\n x_fourier_padded = primal_to_fourier_2d(x_primal_padded)\n\n x_fourier_padded_filtered = x_fourier_padded * hFourier[:, None, :, :]\n return x_fourier_padded_filtered[..., pad_len:-pad_len, pad_len:-pad_len]\n else:\n return x_fourier * hFourier[:, None, :, :]\n\n def get_cpu_params(self, idcs, ctf_params, flip=False):\n batch_size = idcs.shape[0]\n self.defocusU[idcs, :, :] = ctf_params['defocusU'][:batch_size] if not flip else\\\n ctf_params['defocusU'][batch_size:]\n self.defocusV[idcs, :, :] = ctf_params['defocusV'][:batch_size] if not flip else\\\n ctf_params['defocusV'][batch_size:]\n self.angleAstigmatism[idcs, :, :] = ctf_params['angleAstigmatism'][:batch_size] if not flip else\\\n ctf_params['angleAstigmatism'][batch_size:]\n cpu_params = {}\n return cpu_params\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n # This is when we want to prescribe parameters for the CTF\n if x_fourier.dim() == 3:\n x_fourier = x_fourier[None, ...]\n # x_fourier: B, 1, S, S\n batch_size = len(idcs)\n cpu_params = {}\n if ctf_params:\n cpu_params = self.get_cpu_params(idcs, ctf_params, flip=False)\n\n # if new params for the CTF have been prescribed or we are optimizing it\n # then request the evaluation of the CTF\n if not ctf_params and self.precomputed_filters and not self.requires_grad:\n hFourier = self.hFourier[idcs, :, :]\n else:\n hFourier = self.get_ctf(idcs, batch_size, cpu_params=cpu_params, frequency_marcher=frequency_marcher)\n\n if self.flip_images:\n flipped_hFourier = torch.flip(hFourier, [1, 2])\n\n hFourier = torch.cat([hFourier, flipped_hFourier], dim=0)\n\n return self.oversample_multiply_crop(x_fourier, hFourier)" }, { "identifier": "CTFCryoDRGN", "path": "cryostar/utils/ctf_utils.py", "snippet": "class CTFCryoDRGN(CTFBase):\n\n def __init__(self,\n size,\n resolution,\n num_particles=None,\n kV=300,\n cs=2.0,\n amplitudeContrast=0.1,\n requires_grad=False):\n super(CTFBase, self).__init__()\n self.size = size\n self.resolution = resolution\n self.requires_grad = requires_grad\n self.kV = kV\n self.cs = cs\n self.ac = amplitudeContrast\n # ax = torch.linspace(-1. / (2. * resolution), 1 / (2. * resolution), self.size)\n # mx, my = torch.meshgrid(ax, ax, indexing=\"ij\")\n ax = torch.fft.fftshift(torch.fft.fftfreq(self.size, self.resolution))\n mx, my = torch.meshgrid(ax, ax, indexing=\"xy\")\n freqs = torch.stack([mx.flatten(), my.flatten()], 1)\n self.register_buffer(\"freqs\", freqs)\n\n def get_ctf(self, ctf_params={}):\n bsz = len(ctf_params[\"defocusU\"])\n device = self.freqs.device\n hFourier = compute_ctf(freqs=self.freqs.repeat(bsz, 1, 1),\n dfu=(ctf_params[\"defocusU\"] * 1e4).squeeze(1),\n dfv=(ctf_params[\"defocusV\"] * 1e4).squeeze(1),\n dfang=torch.rad2deg(ctf_params[\"angleAstigmatism\"]).squeeze(1),\n volt=torch.tensor(self.kV, device=device).repeat(bsz, 1),\n cs=torch.tensor(self.cs, device=device).repeat(bsz, 1),\n w=torch.tensor(self.ac, device=device).repeat(bsz,\n 1)).reshape(bsz, self.size, self.size)\n return hFourier\n\n def forward(self, x_fourier, idcs=0, ctf_params={}, mode='gt', frequency_marcher=None):\n hFourier = -self.get_ctf(ctf_params)\n return x_fourier * hFourier[:, None, :, :]" }, { "identifier": "calc_cor_loss", "path": "cryostar/utils/losses.py", "snippet": "def calc_cor_loss(pred_images, gt_images, mask=None):\n if mask is not None:\n pred_images = mask(pred_images)\n gt_images = mask(gt_images)\n pixel_num = mask.num_masked\n else:\n pixel_num = pred_images.shape[-2] * pred_images.shape[-1]\n\n # b, c, h, w -> b, c, num_pix\n pred_images = pred_images.flatten(start_dim=2)\n gt_images = gt_images.flatten(start_dim=2)\n\n # b, c\n dots = (pred_images * gt_images).sum(-1)\n # b, c -> b, c\n err = -dots / (gt_images.std(-1) + 1e-5) / (pred_images.std(-1) + 1e-5)\n # b, c -> b -> 1 value\n err = err.sum(-1).mean() / pixel_num\n return err" }, { "identifier": "calc_kl_loss", "path": "cryostar/utils/losses.py", "snippet": "def calc_kl_loss(mu, log_var, free_bits, reduction=\"mean\"):\n kld_loss = -0.5 * (1 + log_var - mu.pow(2) - log_var.exp())\n # free bits\n kld_loss = torch.clamp(kld_loss, free_bits) # (bsz, z-dim)\n kld_loss = torch.mean(kld_loss, dim=1) # (bsz, )\n if reduction == \"mean\":\n kld_loss = torch.mean(kld_loss) # averaged over bsz x z-dim\n elif reduction == \"none\":\n kld_loss = kld_loss\n else:\n raise NotImplementedError\n return kld_loss" }, { "identifier": "log_to_current", "path": "cryostar/utils/misc.py", "snippet": "def set_seed(seed: int = 42):\ndef chain(arg, *funcs):\ndef convert_to_numpy(*args):\ndef CHECK_SHAPE(tensor, expected_shape):\ndef ASSERT_SHAPE(tensor, expected_shape):\ndef parse_mmengine_args(override_mode=\"default\"):\ndef flatten_nested_dict(nested: Union[dict, Config]) -> dict:\ndef warmup(warmup_step, lower=0.0, upper=1.0):\n def run(cur_step):\ndef init_mmengine_config(args):\ndef init_mmengine_exp(args,\n exp_prefix='',\n backup_list=None,\n inplace=True,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\",\n tensorboard=False):\ndef _get_next_version(root_dir, dir_name_prefix):\ndef pl_init_exp(override_mode=\"default\",\n exp_prefix='',\n backup_list=None,\n inplace=False,\n work_dir_name=\"work_dirs\",\n project_name=\"cryostar\"):\ndef save_pdb(CAs, path, ref_pdb_path):\ndef load_CAs_from_pdb(file):\ndef load_NCaC_from_pdb(file):\ndef load_chain_A(pdb_path):\ndef points_to_pdb(path_to_save, points: np.ndarray):\ndef point_stack_to_pdb(path_to_save, point_stack: np.ndarray):\ndef find_rigid_alignment(A, B):\ndef batch_find_rigid_alignment(A, B):\ndef pretty_dict(x, precision=3):\ndef create_sphere_mask(d, h, w, center=None, radius=None) -> np.ndarray:\ndef create_circular_mask(h, w, center=None, radius=None) -> np.ndarray:\n H = A_c.T.mm(B_c)\n U, S, V = torch.svd(H)\n R = V.mm(U.T)\n H = einops.einsum(A_c, B_c, \"b n c1, b n c2 -> b c1 c2\")\n V = VmT.mT\n R = einops.einsum(V, U.transpose(2, 1), \"b c1 c2, b c2 c3 -> b c1 c3\")" }, { "identifier": "bt_save_pdb", "path": "cryostar/utils/pdb_tools.py", "snippet": "def bt_save_pdb(file_path: Union[str, Path], array: Union[AtomArray, AtomArrayStack], **kwargs):\n \"\"\"Save biotite AtomArray or AtomArrayStack to pdb file\n\n Parameters\n ----------\n file_path: save file path\n array: the structure to be saved\n kwargs: additional parameters to be passed, always empty\n\n \"\"\"\n bt_struc.io.save_structure(file_path, array, **kwargs)" }, { "identifier": "EMAN2Grid", "path": "cryostar/gmm/gmm.py", "snippet": "class EMAN2Grid(BaseGrid):\n \"\"\"EMAN2 style grid.\n origin set to -(side_shape // 2) * voxel_size\n\n \"\"\"\n\n def __init__(self, side_shape, voxel_size):\n origin = -side_shape // 2 * voxel_size\n super().__init__(side_shape=side_shape, voxel_size=voxel_size, origin=origin)" }, { "identifier": "batch_projection", "path": "cryostar/gmm/gmm.py", "snippet": "def batch_projection(gauss: Gaussian, rot_mats: torch.Tensor, line_grid: Grid) -> torch.Tensor:\n \"\"\"A quick version of e2gmm projection.\n\n Parameters\n ----------\n gauss: (b/1, num_centers, 3) mus, (b/1, num_centers) sigmas and amplitudes\n rot_mats: (b, 3, 3)\n line_grid: (num_pixels, 3) coords, (nx, ) shape\n\n Returns\n -------\n proj: (b, y, x) projections\n \"\"\"\n\n centers = einops.einsum(rot_mats, gauss.mus, \"b c31 c32, b nc c32 -> b nc c31\")\n\n sigmas = einops.rearrange(gauss.sigmas, 'b nc -> b 1 nc')\n sigmas = 2 * sigmas**2\n\n proj_x = einops.rearrange(line_grid.coords, \"nx -> 1 nx 1\") - einops.rearrange(centers[..., 0], \"b nc -> b 1 nc\")\n proj_x = torch.exp(-proj_x**2 / sigmas)\n\n proj_y = einops.rearrange(line_grid.coords, \"ny -> 1 ny 1\") - einops.rearrange(centers[..., 1], \"b nc -> b 1 nc\")\n proj_y = torch.exp(-proj_y**2 / sigmas)\n\n proj = einops.einsum(gauss.amplitudes, proj_x, proj_y, \"b nc, b nx nc, b ny nc -> b nx ny\")\n proj = einops.rearrange(proj, \"b nx ny -> b ny nx\")\n return proj" }, { "identifier": "Gaussian", "path": "cryostar/gmm/gmm.py", "snippet": "class Gaussian:\n mus: Union[torch.Tensor, np.ndarray]\n sigmas: Union[torch.Tensor, np.ndarray]\n amplitudes: Union[torch.Tensor, np.ndarray]" }, { "identifier": "E3Deformer", "path": "cryostar/gmm/deformer.py", "snippet": "class E3Deformer(torch.nn.Module, DeformerProtocol):\n\n def transform(self, deformation, coords):\n ASSERT_SHAPE(coords, (None, 3))\n ASSERT_SHAPE(deformation, (None, coords.shape[0] * 3))\n\n bsz = deformation.shape[0]\n shift = deformation.reshape(bsz, -1, 3)\n return shift + coords" }, { "identifier": "NMADeformer", "path": "cryostar/gmm/deformer.py", "snippet": "class NMADeformer(torch.nn.Module, DeformerProtocol):\n def __init__(self, modes: torch.FloatTensor) -> None:\n super().__init__()\n modes = einops.rearrange(\n modes, \"(num_coords c3) num_modes -> num_modes num_coords c3\", c3=3\n )\n self.register_buffer(\"modes\", modes)\n self.num_modes = modes.shape[0]\n self.num_coords = modes.shape[1]\n\n def transform(self, deformation, coords):\n ASSERT_SHAPE(coords, (self.num_coords, 3))\n ASSERT_SHAPE(deformation, (None, 6 + self.num_modes))\n\n axis_angle = deformation[..., :3]\n translation = deformation[..., 3:6] * 10\n nma_coeff = deformation[..., 6:]\n rotation_matrix = axis_angle_to_matrix(axis_angle)\n\n nma_deform_e3 = einops.einsum(\n nma_coeff, self.modes, \"bsz num_modes, num_modes num_coords c3 -> bsz num_coords c3\"\n )\n rotated_coords = einops.einsum(rotation_matrix, nma_deform_e3 + coords,\n \"bsz c31 c32, bsz num_coords c31 -> bsz num_coords c32\")\n deformed_coords = rotated_coords + einops.rearrange(translation, \"bsz c3 -> bsz 1 c3\")\n return deformed_coords" }, { "identifier": "primal_to_fourier_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "@torch.autocast(\"cuda\")\ndef primal_to_fourier_2d(r: torch.Tensor) -> torch.Tensor:\n with torch.autocast(\"cuda\", enabled=False):\n r = torch.fft.ifftshift(r.float(), dim=(-2, -1))\n f = torch.fft.fftshift(torch.fft.fftn(r, s=(r.shape[-2], r.shape[-1]), dim=(-2, -1)), dim=(-2, -1))\n return f" }, { "identifier": "fourier_to_primal_2d", "path": "cryostar/utils/fft_utils.py", "snippet": "def fourier_to_primal_2d(f: torch.Tensor) -> torch.Tensor:\n f = torch.fft.ifftshift(f, dim=(-2, -1))\n return torch.fft.fftshift(torch.fft.ifftn(f, s=(f.shape[-2], f.shape[-1]), dim=(-2, -1)), dim=(-2, -1))" }, { "identifier": "Polymer", "path": "cryostar/utils/polymer.py", "snippet": "class Polymer:\n chain_id: np.ndarray\n res_id: np.ndarray\n res_name: np.ndarray\n coord: np.ndarray\n atom_name: np.ndarray\n element: np.ndarray\n num_electron: np.ndarray\n\n def __init__(self, num):\n self.chain_id = np.empty(num, dtype=\"U4\")\n self.res_id = np.zeros(num, dtype=int)\n self.res_name = np.empty(num, dtype=\"U3\")\n self.coord = np.zeros((num, 3), dtype=np.float32)\n self.atom_name = np.empty(num, dtype=\"U6\")\n self.element = np.empty(num, dtype=\"U2\")\n self.num_electron = np.zeros(num, dtype=int)\n\n def __setitem__(self, index, kwargs):\n assert set(kwargs.keys()).issubset(f.name for f in dataclasses.fields(self))\n for k, v in kwargs.items():\n getattr(self, k)[index] = v\n\n def __getitem__(self, index):\n return {f.name: getattr(self, f.name)[index] for f in dataclasses.fields(self)}\n\n def __len__(self):\n return len(self.chain_id)\n\n @property\n def num_amino_acids(self):\n return np.sum(np.isin(self.atom_name, AA_ATOMS))\n\n @property\n def num_nucleotides(self):\n return np.sum(np.isin(self.atom_name, NT_ATOMS))\n\n @property\n def num_chains(self):\n return len(np.unique(self.chain_id))\n\n @classmethod\n def from_atom_arr(cls, atom_arr):\n assert isinstance(atom_arr, struc.AtomArray)\n\n nt_arr = atom_arr[struc.filter_nucleotides(atom_arr)]\n aa_arr = atom_arr[struc.filter_amino_acids(atom_arr)]\n\n num = 0\n if len(aa_arr) > 0:\n num += struc.get_residue_count(aa_arr)\n if len(nt_arr) > 0:\n for res in struc.residue_iter(nt_arr):\n valid_atoms = set(res.atom_name).intersection(NT_ATOMS)\n if len(valid_atoms) <= 0:\n raise UserWarning(f\"Nucleotides doesn't contain {' or '.join(NT_ATOMS)}.\")\n else:\n num += len(valid_atoms)\n meta = cls(num)\n\n def _update_res(tmp_res, kind=\"aa\"):\n nonlocal pos\n\n if kind == \"aa\":\n using_atom_names = AA_ATOMS\n filtered_res = tmp_res[struc.filter_peptide_backbone(tmp_res)]\n elif kind == \"nt\":\n using_atom_names = NT_ATOMS\n filtered_res = tmp_res\n else:\n raise NotImplemented\n\n valid_atom_names = set(tmp_res.atom_name).intersection(using_atom_names)\n\n for select_atom_name in valid_atom_names:\n meta[pos] = {\n \"chain_id\": tmp_res.chain_id[0],\n \"res_id\": tmp_res.res_id[0],\n \"res_name\": tmp_res.res_name[0],\n \"coord\": filtered_res[filtered_res.atom_name == select_atom_name].coord,\n \"atom_name\": select_atom_name,\n \"element\": filtered_res[filtered_res.atom_name == select_atom_name].element[0],\n \"num_electron\": get_num_electrons(tmp_res) // len(valid_atom_names)\n }\n pos += 1\n\n def _update(tmp_arr, kind=\"aa\"):\n nonlocal pos\n for chain in struc.chain_iter(tmp_arr):\n for tmp_res in struc.residue_iter(chain):\n _update_res(tmp_res, kind)\n\n pos = 0\n\n if len(aa_arr) > 0:\n _update(aa_arr, kind=\"aa\")\n if len(nt_arr) > 0:\n _update(nt_arr, kind=\"nt\")\n\n assert pos == num\n return meta\n\n @classmethod\n def from_pdb(cls, file_path):\n atom_arr = bt_read_pdb(file_path)\n if atom_arr.stack_depth() > 1:\n print(\"PDB file contains more than 1 models, select the 1st model\")\n atom_arr = atom_arr[0]\n return Polymer.from_atom_arr(atom_arr)\n\n def to_atom_arr(self):\n num = len(self)\n atom_arr = struc.AtomArray(num)\n atom_arr.coord = self.coord\n\n for f in dataclasses.fields(self):\n if f.name != \"coord\" and f.name in atom_arr.get_annotation_categories():\n atom_arr.set_annotation(f.name, getattr(self, f.name))\n # atom_arr.atom_name[atom_arr.atom_name == \"R\"] = \"CB\"\n return atom_arr" }, { "identifier": "NT_ATOMS", "path": "cryostar/utils/polymer.py", "snippet": "NT_ATOMS = (\"C1'\", )" }, { "identifier": "AA_ATOMS", "path": "cryostar/utils/polymer.py", "snippet": "AA_ATOMS = (\"CA\", )" }, { "identifier": "find_quaint_cutoff_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def find_quaint_cutoff_pairs(coord_arr,\n chain_id_arr,\n res_id_arr,\n intra_chain_cutoff=12.,\n inter_chain_cutoff=12.,\n intra_chain_res_bound=None):\n sel_indices = []\n dist_map = distance.cdist(coord_arr, coord_arr, metric='euclidean')\n # 1. intra chain\n sel_mask = dist_map <= intra_chain_cutoff\n sel_mask = np.triu(sel_mask, k=1)\n # get indices of valid pairs\n indices_in_pdb = np.nonzero(sel_mask)\n indices_in_pdb = np.column_stack((indices_in_pdb[0], indices_in_pdb[1]))\n indices_in_pdb = indices_in_pdb[chain_id_arr[indices_in_pdb[:, 0]] == chain_id_arr[indices_in_pdb[:, 1]]]\n # filter by res_id\n if intra_chain_res_bound is not None:\n assert res_id_arr is not None\n res_ids = res_id_arr[indices_in_pdb]\n res_id_dist = np.abs(np.diff(res_ids, axis=1)).flatten()\n indices_in_pdb = indices_in_pdb[res_id_dist <= intra_chain_res_bound]\n\n sel_indices.append(indices_in_pdb)\n\n # 2. inter chain\n if inter_chain_cutoff is not None:\n sel_mask = dist_map <= inter_chain_cutoff\n sel_mask = np.triu(sel_mask, k=1)\n indices_in_pdb = np.nonzero(sel_mask)\n indices_in_pdb = np.column_stack((indices_in_pdb[0], indices_in_pdb[1]))\n indices_in_pdb = indices_in_pdb[chain_id_arr[indices_in_pdb[:, 0]] != chain_id_arr[indices_in_pdb[:, 1]]]\n sel_indices.append(indices_in_pdb)\n\n sel_indices = np.vstack(sel_indices)\n return sel_indices" }, { "identifier": "find_range_cutoff_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def find_range_cutoff_pairs(coord_arr, min_cutoff=4., max_cutoff=10.):\n dist_map = distance.cdist(coord_arr, coord_arr, metric='euclidean')\n sel_mask = (dist_map <= max_cutoff) & (dist_map >= min_cutoff)\n indices_in_pdb = np.nonzero(sel_mask)\n indices_in_pdb = np.column_stack((indices_in_pdb[0], indices_in_pdb[1]))\n return indices_in_pdb" }, { "identifier": "find_continuous_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def find_continuous_pairs(chain_id_arr, res_id_arr, atom_name_arr):\n pairs = []\n\n # res_id in different chains are duplicated, so loop on chains\n u_chain_id = np.unique(chain_id_arr)\n\n for c_id in u_chain_id:\n tmp_mask = chain_id_arr == c_id\n tmp_indices_in_pdb = np.nonzero(tmp_mask)[0]\n\n tmp_res_id_arr = res_id_arr[tmp_mask]\n tmp_atom_name_arr = atom_name_arr[tmp_mask]\n\n # check is aa or nt\n tmp_atom_name_set = set(tmp_atom_name_arr)\n\n if len(tmp_atom_name_set.intersection(AA_ATOMS)) > len(tmp_atom_name_set.intersection(NT_ATOMS)):\n in_res_atom_names = AA_ATOMS\n elif len(tmp_atom_name_set.intersection(AA_ATOMS)) < len(tmp_atom_name_set.intersection(NT_ATOMS)):\n in_res_atom_names = NT_ATOMS\n else:\n raise NotImplemented(\"Cannot determine chain is amino acid or nucleotide.\")\n\n # find pairs\n if len(in_res_atom_names) == 1:\n u_res_id, indices_in_chain = np.unique(tmp_res_id_arr, return_index=True)\n if len(u_res_id) != np.sum(tmp_mask):\n raise ValueError(f\"Found duplicate residue id in single chain {c_id}.\")\n\n indices_in_chain_pair = np.column_stack((indices_in_chain[:-1], indices_in_chain[1:]))\n\n # must be adjacent on residue id\n valid_mask = np.abs(np.diff(u_res_id[indices_in_chain_pair], axis=1)) == 1\n\n indices_in_chain_pair = indices_in_chain_pair[valid_mask.flatten()]\n\n indices_in_pdb_pair = tmp_indices_in_pdb[indices_in_chain_pair]\n elif len(in_res_atom_names) > 1:\n\n def _cmp(a, b):\n # res_id compare\n if a[0] != b[0]:\n return a[0] - b[0]\n else:\n # atom_name in the same order of AA_ATOMS or NT_ATOMS\n return in_res_atom_names.index(a[1]) - in_res_atom_names.index(b[1])\n\n cache = list(zip(tmp_res_id_arr, tmp_atom_name_arr, tmp_indices_in_pdb))\n sorted_cache = list(sorted(cache, key=cmp_to_key(_cmp)))\n\n sorted_indices_in_pdb = [item[2] for item in sorted_cache]\n sorted_res_id = [item[0] for item in sorted_cache]\n\n indices_in_pdb_pair = np.column_stack((sorted_indices_in_pdb[:-1], sorted_indices_in_pdb[1:]))\n\n valid_mask = np.abs(np.diff(np.column_stack((sorted_res_id[:-1], sorted_res_id[1:])), axis=1)) <= 1\n\n indices_in_pdb_pair = indices_in_pdb_pair[valid_mask.flatten()]\n else:\n raise NotImplemented(\"No enough atoms to construct continuous pairs.\")\n\n pairs.append(indices_in_pdb_pair)\n\n pairs = np.vstack(pairs)\n return pairs" }, { "identifier": "calc_dist_by_pair_indices", "path": "cryostar/utils/dist_loss.py", "snippet": "def calc_dist_by_pair_indices(coord_arr, pair_indices):\n coord_pair_arr = coord_arr[pair_indices] # num_pair, 2, 3\n dist = np.linalg.norm(np.diff(coord_pair_arr, axis=1), ord=2, axis=-1)\n return dist.flatten()" }, { "identifier": "remove_duplicate_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def remove_duplicate_pairs(pairs_a, pairs_b, remove_flip=True):\n \"\"\"Remove pair b from a\"\"\"\n s = max(pairs_a.max(), pairs_b.max()) + 1\n # trick for fast comparison\n mask = np.zeros((s, s), dtype=bool)\n\n np.put(mask, np.ravel_multi_index(pairs_a.T, mask.shape), True)\n np.put(mask, np.ravel_multi_index(pairs_b.T, mask.shape), False)\n if remove_flip:\n np.put(mask, np.ravel_multi_index(np.flip(pairs_b, 1).T, mask.shape), False)\n return np.column_stack(np.nonzero(mask))" }, { "identifier": "filter_same_chain_pairs", "path": "cryostar/utils/dist_loss.py", "snippet": "def filter_same_chain_pairs(pair_ids, chain_id_arr):\n chain_ids = chain_id_arr[pair_ids]\n\n same_chain_mask = chain_ids[:, 0] == chain_ids[:, 1]\n\n pair_mask = []\n\n for u in np.unique(chain_ids):\n tmp = np.logical_and(chain_ids[:, 0] == u, same_chain_mask)\n if np.any(tmp):\n pair_mask.append(tmp)\n\n if len(pair_mask) > 0:\n return np.row_stack(pair_mask)\n else:\n return None" }, { "identifier": "DistLoss", "path": "cryostar/utils/dist_loss.py", "snippet": "class DistLoss(nn.Module):\n\n def __init__(self, pair_ids, gt_dists, reduction=\"mean\"):\n super().__init__()\n self.reduction = reduction\n\n self.register_buffer(\"pair_ids\", torch.from_numpy(pair_ids).long())\n self.register_buffer(\"gt_dists\", torch.from_numpy(gt_dists).float())\n\n # edge-wise weights\n # raw_weights = torch.ones(len(pair_ids), dtype=torch.float) * 3.\n #\n # self.register_parameter(\"raw_weights\", nn.Parameter(raw_weights))\n\n # RBF residue-wise weights\n # u_left_ids = np.unique(pair_ids[:, 0])\n #\n # std_idx = np.zeros(max(u_left_ids) + 1, dtype=int)\n # sparse_idx = np.arange(len(u_left_ids))\n #\n # std_idx[u_left_ids] = sparse_idx\n #\n # select_index = std_idx[pair_ids[:, 0]]\n\n # weight = 0.9 at dist_rescale\n # sigmas = torch.ones(max(u_left_ids) + 1, dtype=torch.float) * np.sqrt(-0.5 / np.log(0.9))\n #\n # self.dist_rescale = dist_rescale\n # self.register_buffer(\"select_index\", torch.from_numpy(select_index).long())\n # self.register_parameter(\"sigmas\", nn.Parameter(sigmas))\n\n # def get_weights(self):\n # return torch.sigmoid(self.raw_weights)\n # edge_sigmas = torch.index_select(self.sigmas, dim=0, index=self.select_index)\n # weights = torch.exp(-torch.pow(self.gt_dists / self.dist_rescale, 2) / (2 * torch.pow(edge_sigmas, 2)))\n # return weights\n\n def calc_pair_dists(self, batch_struc):\n batch_dist = batch_struc[:, self.pair_ids] # bsz, num_pair, 2, 3\n batch_dist = LA.vector_norm(torch.diff(batch_dist, dim=-2), axis=-1).squeeze(-1) # bsz, num_pair\n return batch_dist\n\n def forward(self, batch_struc):\n batch_dist = self.calc_pair_dists(batch_struc)\n # mse = torch.pow(batch_dist - self.gt_dists.unsqueeze(0), 2) * self.get_weights().unsqueeze(0)\n mse = torch.pow(batch_dist - self.gt_dists.unsqueeze(0), 2)\n if self.reduction is None:\n return mse\n elif self.reduction == \"mean\":\n return torch.mean(mse)\n else:\n raise NotImplementedError" }, { "identifier": "get_nearest_point", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def get_nearest_point(data: np.ndarray, query: np.ndarray) -> Tuple[npt.NDArray[np.float32], np.ndarray]:\n \"\"\"\n Find closest point in @data to @query\n Return datapoint, index\n \"\"\"\n ind = cdist(query, data).argmin(axis=1)\n return data[ind], ind" }, { "identifier": "cluster_kmeans", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def cluster_kmeans(z: np.ndarray, K: int, on_data: bool = True, reorder: bool = True) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n Cluster z by K means clustering\n Returns cluster labels, cluster centers\n If reorder=True, reorders clusters according to agglomerative clustering of cluster centers\n \"\"\"\n kmeans = KMeans(n_clusters=K, n_init=10, random_state=0, max_iter=10)\n labels = kmeans.fit_predict(z)\n centers = kmeans.cluster_centers_\n\n centers_ind = None\n if on_data:\n centers, centers_ind = get_nearest_point(z, centers)\n\n if reorder:\n # BUG from seaborn or scipy:\n # sns.clustermap only supports data with at least 2 dim\n if z.shape[1] == 1:\n centers = np.hstack([centers, np.zeros_like(centers)])\n g = sns.clustermap(centers)\n reordered = g.dendrogram_row.reordered_ind\n centers = centers[reordered]\n if centers_ind is not None:\n centers_ind = centers_ind[reordered]\n tmp = {k: i for i, k in enumerate(reordered)}\n labels = np.array([tmp[k] for k in labels])\n if z.shape[1] == 1:\n centers = centers[:, :1]\n return labels, centers" }, { "identifier": "run_pca", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def run_pca(z: np.ndarray) -> Tuple[np.ndarray, PCA]:\n pca = PCA(z.shape[1])\n pca.fit(z)\n # print(\"Explained variance ratio:\")\n # print(pca.explained_variance_ratio_)\n pc = pca.transform(z)\n return pc, pca" }, { "identifier": "get_pc_traj", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def get_pc_traj(\n pca: PCA,\n zdim: int,\n numpoints: int,\n dim: int,\n start: Optional[float] = 5,\n end: Optional[float] = 95,\n percentiles: Optional[np.ndarray] = None,\n) -> npt.NDArray[np.float32]:\n \"\"\"\n Create trajectory along specified principal component\n\n Inputs:\n pca: sklearn PCA object from run_pca\n zdim (int)\n numpoints (int): number of points between @start and @end\n dim (int): PC dimension for the trajectory (1-based index)\n start (float): Value of PC{dim} to start trajectory\n end (float): Value of PC{dim} to stop trajectory\n percentiles (np.array or None): Define percentile array instead of np.linspace(start,stop,numpoints)\n\n Returns:\n np.array (numpoints x zdim) of z values along PC\n \"\"\"\n if percentiles is not None:\n assert len(percentiles) == numpoints\n traj_pca = np.zeros((numpoints, zdim))\n if percentiles is not None:\n traj_pca[:, dim - 1] = percentiles\n else:\n assert start is not None\n assert end is not None\n traj_pca[:, dim - 1] = np.linspace(start, end, numpoints)\n ztraj_pca = pca.inverse_transform(traj_pca)\n return ztraj_pca" }, { "identifier": "run_umap", "path": "cryostar/utils/latent_space_utils.py", "snippet": "def run_umap(z: np.ndarray, **kwargs) -> Tuple[np.ndarray, umap.UMAP]:\n reducer = umap.UMAP(**kwargs)\n z_embedded = reducer.fit_transform(z)\n return z_embedded, reducer" }, { "identifier": "plot_z_dist", "path": "cryostar/utils/vis_utils.py", "snippet": "def plot_z_dist(z, extra_cluster=None, save_path=None):\n if z.shape[-1] == 1:\n fig = sns.displot(x=z[:, 0])\n fig.set_xlabels(\"z values\")\n if save_path is not None:\n fig.savefig(save_path)\n elif z.shape[-1] == 2:\n sns.set()\n fig = sns.jointplot(x=z[:, 0], y=z[:, 1], kind=\"kde\", fill=True)\n ax = fig.figure.axes\n if extra_cluster is not None:\n ax[0].scatter(extra_cluster[:, 0], extra_cluster[:, 1], marker='.', color='tab:orange')\n if save_path is not None:\n fig.savefig(save_path)\n else:\n raise ValueError(f\"input z with shape {z.shape}\")" }, { "identifier": "save_tensor_image", "path": "cryostar/utils/vis_utils.py", "snippet": "def save_tensor_image(tensors, save_path, mask=None):\n # normalize\n max_val = torch.max(tensors.flatten(start_dim=1), 1)[0][:, None, None, None]\n min_val = torch.min(tensors.flatten(start_dim=1), 1)[0][:, None, None, None]\n tensors = (tensors - min_val) / (max_val - min_val)\n\n show_img = ToPILImage()(make_grid(tensors, nrow=5))\n if mask is None:\n show_img.save(save_path)\n else:\n show_img = np.copy(np.asarray(show_img))\n # show_img = cv2.cvtColor(show_img, cv2.COLOR_GRAY2RGB)\n if mask.ndim == 2:\n mask = mask[None]\n mask = ToPILImage()(make_grid(mask.expand(tensors.shape[0], -1, -1, -1), nrow=5))\n mask = np.invert(np.asarray(mask).astype(bool))[..., 0]\n color_mask = np.array([[0, 0, 0], [31, 119, 180]], dtype=np.uint8)\n color_mask = color_mask[mask.astype(int)]\n show_img[mask] = cv2.addWeighted(show_img[mask], 0.5, color_mask[mask], 0.5, 0)\n show_img = Image.fromarray(show_img)\n show_img.save(save_path)" }, { "identifier": "merge_step_outputs", "path": "cryostar/utils/pl_utils.py", "snippet": "def merge_step_outputs(outputs):\n ks = outputs[0].keys()\n res = {}\n for k in ks:\n res[k] = torch.concat([out[k] for out in outputs], dim=0)\n return res" }, { "identifier": "squeeze_dict_outputs_1st_dim", "path": "cryostar/utils/pl_utils.py", "snippet": "def squeeze_dict_outputs_1st_dim(outputs):\n res = {}\n for k in outputs.keys():\n res[k] = outputs[k].flatten(start_dim=0, end_dim=1)\n return res" }, { "identifier": "filter_outputs_by_indices", "path": "cryostar/utils/pl_utils.py", "snippet": "def filter_outputs_by_indices(outputs, indices):\n res = {}\n for k in outputs.keys():\n res[k] = outputs[k][indices]\n return res" }, { "identifier": "get_1st_unique_indices", "path": "cryostar/utils/pl_utils.py", "snippet": "def get_1st_unique_indices(t):\n _, idx, counts = torch.unique(t, dim=None, sorted=True, return_inverse=True, return_counts=True)\n # ind_sorted: the index corresponding to same unique value will be grouped by these indices\n _, ind_sorted = torch.sort(idx, stable=True)\n cum_sum = counts.cumsum(0)\n cum_sum = torch.cat((cum_sum.new_tensor([\n 0,\n ]), cum_sum[:-1]))\n first_idx = ind_sorted[cum_sum]\n return first_idx" } ]
import os.path as osp import warnings import collections import einops import numpy as np import biotite.structure as struc import torch import lightning.pytorch as pl from pathlib import Path from copy import deepcopy from torch import nn from torch import optim from torch.utils.data import DataLoader from torchinfo import summary from lightning.fabric.utilities.warnings import PossibleUserWarning from lightning.pytorch.utilities import rank_zero_only from lightning.pytorch.strategies import DDPStrategy from mmengine import mkdir_or_exist from cryostar.utils.transforms import SpatialGridTranslate from cryostar.utils.dataio import StarfileDataSet, StarfileDatasetConfig, Mask from cryostar.utils.ctf_utils import CTFRelion, CTFCryoDRGN from cryostar.utils.losses import calc_cor_loss, calc_kl_loss from cryostar.utils.misc import log_to_current, \ pl_init_exp, pretty_dict, set_seed, warmup from cryostar.utils.pdb_tools import bt_save_pdb from cryostar.gmm.gmm import EMAN2Grid, batch_projection, Gaussian from cryostar.gmm.deformer import E3Deformer, NMADeformer from cryostar.utils.fft_utils import primal_to_fourier_2d, fourier_to_primal_2d from cryostar.utils.polymer import Polymer, NT_ATOMS, AA_ATOMS from cryostar.utils.dist_loss import (find_quaint_cutoff_pairs, find_range_cutoff_pairs, find_continuous_pairs, calc_dist_by_pair_indices, remove_duplicate_pairs, filter_same_chain_pairs, DistLoss) from cryostar.utils.latent_space_utils import get_nearest_point, cluster_kmeans, run_pca, get_pc_traj, run_umap from cryostar.utils.vis_utils import plot_z_dist, save_tensor_image from cryostar.utils.pl_utils import merge_step_outputs, squeeze_dict_outputs_1st_dim, \ filter_outputs_by_indices, get_1st_unique_indices from miscs import calc_pair_dist_loss, calc_clash_loss, low_pass_mask2d, VAE, infer_ctf_params_from_config
14,716
# other # avoid num_workers set as cpu_count warning warnings.simplefilter("ignore", PossibleUserWarning) # only log to rank_zero, comment this for debugging log_to_current = rank_zero_only(log_to_current) TASK_NAME = "atom" def prepare_images(images: torch.FloatTensor, space: str): assert space in ("real", "fourier") if space == "real": model_input = einops.rearrange(images, "b 1 ny nx -> b (1 ny nx)") else: fimages = primal_to_fourier_2d(images) model_input = einops.rearrange(torch.view_as_real(fimages), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) return model_input class InitTask(pl.LightningModule): def __init__(self, em_module): super().__init__() self.cfg = em_module.cfg self.em_module = em_module self.loss_deque = collections.deque([ 10, ], maxlen=20) def on_train_batch_end(self, outputs, batch, batch_idx): self.loss_deque.append(outputs['loss'].item()) if np.mean(self.loss_deque) < 1e-3: self.trainer.should_stop = True # update all process status self.trainer.should_stop = self.trainer.strategy.broadcast(self.trainer.should_stop) def training_step(self, batch, batch_idx): images = batch["proj"] idxes = batch["idx"] rot_mats, trans_mats = self.em_module.get_batch_pose(batch) pred_deformation, mu, log_var = self.em_module.model(prepare_images(images, self.cfg.model.input_space), idxes, rot_mats) shift_loss = torch.mean(torch.pow(pred_deformation.flatten(start_dim=-2), 2)) loss = shift_loss if self.global_step % self.cfg.runner.log_every_n_step == 0: log_to_current(f"loss {loss.item()}") return loss def configure_optimizers(self): return optim.AdamW(self.em_module.model.parameters(), lr=1e-4) def on_fit_end(self): log_to_current(f"Init finished with loss {np.mean(self.loss_deque)}") class CryoEMTask(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() cfg = deepcopy(cfg) self.cfg = cfg # Define GMM meta = Polymer.from_pdb(cfg.dataset_attr.ref_pdb_path) log_to_current(f"Load reference structure from {cfg.dataset_attr.ref_pdb_path}") # for save self.template_pdb = meta.to_atom_arr() log_to_current(f"Protein contains {len(meta)} atoms, " f"{meta.num_amino_acids} amino acids, " f"{meta.num_nucleotides} nucleotides, " f"{meta.num_chains} chains.") # ref ref_centers = torch.from_numpy(meta.coord).float() ref_amps = torch.from_numpy(meta.num_electron).float() ref_sigmas = torch.ones_like(ref_amps) ref_sigmas.fill_(2.) log_to_current(f"1st GMM blob amplitude {ref_amps[0].item()}, sigma {ref_sigmas[0].item()}") num_pts = len(meta) log_to_current(f"Reference structure has {num_pts} atom coordinates") # tunable params # gmm self.register_buffer("gmm_centers", ref_centers) if cfg.gmm.tunable: log_to_current("Set GMM sigmas, amplitudes tunable") self.register_parameter("gmm_sigmas", nn.Parameter(ref_sigmas)) self.register_parameter("gmm_amps", nn.Parameter(ref_amps)) else: self.register_buffer("gmm_sigmas", ref_sigmas) self.register_buffer("gmm_amps", ref_amps) nma_modes = None if (hasattr(self.cfg.extra_input_data_attr, "nma_path") and self.cfg.extra_input_data_attr.nma_path not in ["", None]): nma_modes = torch.tensor(np.load(self.cfg.extra_input_data_attr.nma_path), dtype=torch.float32) log_to_current(f"Load NMA coefficients from {self.cfg.extra_input_data_attr.nma_path}, " f"whose shape is {nma_modes.shape}") # model if cfg.model.input_space == "fourier": in_dim = 2 * cfg.data_process.down_side_shape ** 2 elif cfg.model.input_space == "real": in_dim = cfg.data_process.down_side_shape ** 2 else: raise NotImplementedError self.model = VAE(in_dim=in_dim, out_dim=num_pts * 3 if nma_modes is None else 6 + nma_modes.shape[1], **cfg.model.model_cfg) log_to_current('Model summary:\n' + str(summary(self.model, input_size=[(1, in_dim), (1,)], verbose=0))) if nma_modes is None:
# other # avoid num_workers set as cpu_count warning warnings.simplefilter("ignore", PossibleUserWarning) # only log to rank_zero, comment this for debugging log_to_current = rank_zero_only(log_to_current) TASK_NAME = "atom" def prepare_images(images: torch.FloatTensor, space: str): assert space in ("real", "fourier") if space == "real": model_input = einops.rearrange(images, "b 1 ny nx -> b (1 ny nx)") else: fimages = primal_to_fourier_2d(images) model_input = einops.rearrange(torch.view_as_real(fimages), "b 1 ny nx c2 -> b (1 ny nx c2)", c2=2) return model_input class InitTask(pl.LightningModule): def __init__(self, em_module): super().__init__() self.cfg = em_module.cfg self.em_module = em_module self.loss_deque = collections.deque([ 10, ], maxlen=20) def on_train_batch_end(self, outputs, batch, batch_idx): self.loss_deque.append(outputs['loss'].item()) if np.mean(self.loss_deque) < 1e-3: self.trainer.should_stop = True # update all process status self.trainer.should_stop = self.trainer.strategy.broadcast(self.trainer.should_stop) def training_step(self, batch, batch_idx): images = batch["proj"] idxes = batch["idx"] rot_mats, trans_mats = self.em_module.get_batch_pose(batch) pred_deformation, mu, log_var = self.em_module.model(prepare_images(images, self.cfg.model.input_space), idxes, rot_mats) shift_loss = torch.mean(torch.pow(pred_deformation.flatten(start_dim=-2), 2)) loss = shift_loss if self.global_step % self.cfg.runner.log_every_n_step == 0: log_to_current(f"loss {loss.item()}") return loss def configure_optimizers(self): return optim.AdamW(self.em_module.model.parameters(), lr=1e-4) def on_fit_end(self): log_to_current(f"Init finished with loss {np.mean(self.loss_deque)}") class CryoEMTask(pl.LightningModule): def __init__(self, cfg, dataset): super().__init__() cfg = deepcopy(cfg) self.cfg = cfg # Define GMM meta = Polymer.from_pdb(cfg.dataset_attr.ref_pdb_path) log_to_current(f"Load reference structure from {cfg.dataset_attr.ref_pdb_path}") # for save self.template_pdb = meta.to_atom_arr() log_to_current(f"Protein contains {len(meta)} atoms, " f"{meta.num_amino_acids} amino acids, " f"{meta.num_nucleotides} nucleotides, " f"{meta.num_chains} chains.") # ref ref_centers = torch.from_numpy(meta.coord).float() ref_amps = torch.from_numpy(meta.num_electron).float() ref_sigmas = torch.ones_like(ref_amps) ref_sigmas.fill_(2.) log_to_current(f"1st GMM blob amplitude {ref_amps[0].item()}, sigma {ref_sigmas[0].item()}") num_pts = len(meta) log_to_current(f"Reference structure has {num_pts} atom coordinates") # tunable params # gmm self.register_buffer("gmm_centers", ref_centers) if cfg.gmm.tunable: log_to_current("Set GMM sigmas, amplitudes tunable") self.register_parameter("gmm_sigmas", nn.Parameter(ref_sigmas)) self.register_parameter("gmm_amps", nn.Parameter(ref_amps)) else: self.register_buffer("gmm_sigmas", ref_sigmas) self.register_buffer("gmm_amps", ref_amps) nma_modes = None if (hasattr(self.cfg.extra_input_data_attr, "nma_path") and self.cfg.extra_input_data_attr.nma_path not in ["", None]): nma_modes = torch.tensor(np.load(self.cfg.extra_input_data_attr.nma_path), dtype=torch.float32) log_to_current(f"Load NMA coefficients from {self.cfg.extra_input_data_attr.nma_path}, " f"whose shape is {nma_modes.shape}") # model if cfg.model.input_space == "fourier": in_dim = 2 * cfg.data_process.down_side_shape ** 2 elif cfg.model.input_space == "real": in_dim = cfg.data_process.down_side_shape ** 2 else: raise NotImplementedError self.model = VAE(in_dim=in_dim, out_dim=num_pts * 3 if nma_modes is None else 6 + nma_modes.shape[1], **cfg.model.model_cfg) log_to_current('Model summary:\n' + str(summary(self.model, input_size=[(1, in_dim), (1,)], verbose=0))) if nma_modes is None:
self.deformer = E3Deformer()
13
2023-11-06 07:15:26+00:00
24k
KAIST-AILab/palr
train.py
[ { "identifier": "BC", "path": "imitation/bc.py", "snippet": "class BC(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, envname=None, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(BC, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n self.device = device\n \n self.obs_dim = obs_dim\n self.action_dim = action_dim \n self.stacksize = stacksize\n \n self.policy_optimizer = optim.Adam(policy.parameters(), lr=lr)\n \n self.num_eval_iteration = 50\n self.envname = envname\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path \n \n # For standardization\n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000):\n \n max_score = -100000.\n \n batch_valid = self.replay_buffer_valid.random_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:] \n prev_expert_action_valid = batch_valid['actions'][:, :-self.action_dim] # For debugging\n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_expert_action_valid = torch.tensor(prev_expert_action_valid, dtype=torch.float32, device=self.device)\n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device) \n\n neg_likelihood = -self.policy.log_prob(obs, actions).mean()\n train_loss = neg_likelihood\n \n self.policy_optimizer.zero_grad()\n train_loss.backward()\n self.policy_optimizer.step()\n\n if (num+1) % eval_freq == 0:\n policy_action = self.policy(obs).sample()\n policy_action_valid = self.policy(obs_valid).sample()\n prev_expert_action = batch['actions'][:, :-self.action_dim] \n prev_expert_action = torch.tensor(prev_expert_action, dtype=torch.float32, device=self.device) \n \n # Train data HSCIC (for debugging) \n policy_embedding = self.policy.forward_embedding(obs)\n if self.standardize:\n Y_std = (prev_expert_action - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n p_std = (policy_action - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n p_std = p_std.to(torch.float32)\n else:\n Y_std = prev_expert_action\n Z_std = actions\n p_std = policy_action\n \n hscic_estimate = estimate_hscic(X=policy_embedding, Y=Y_std, Z=Z_std, ridge_lambda=1e-5)\n \n policy_embedding_valid = self.policy.forward_embedding(obs_valid)\n if self.standardize:\n Y_std = (prev_expert_action_valid - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions_valid - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n else:\n Y_std = prev_expert_action_valid\n Z_std = actions_valid\n p_std = policy_action\n \n valid_hscic_estimate = estimate_hscic(X=policy_embedding_valid, Y=Y_std, Z=Z_std, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_expert_action_valid, Z=actions_valid, ridge_lambda=1e-5)\n\n valid_neg_likelihood = -self.policy.log_prob(obs_valid, actions_valid).mean()\n valid_loss = valid_neg_likelihood\n\n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: train_policy_loss={train_loss.item():.2f}, val_policy_loss={valid_loss.item():.2f}, eval_ret={eval_ret_mean:.2f}+-{eval_ret_std:.2f} ({obs_valid.shape[0]})',)\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({'train_total_loss': train_loss.item(), \n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': neg_likelihood.item(),\n 'valid_neg_likelihood': valid_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep:\n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n \n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "RAP", "path": "imitation/rap.py", "snippet": "class RAP(nn.Module):\n # Implementation of Residual Action Prediction (ECCV 2022)\n # - https://arxiv.org/pdf/2207.09705.pdf\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, embedding_dim=1, stacksize=1, standardize=False\n ):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(RAP, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n \n self.device = device\n \n self.m_embedding_optimizer = optim.Adam(policy.history_embedding_params, lr=lr)\n self.h_embedding_optimizer = optim.Adam(policy.single_embedding_params, lr=lr)\n self.policy_optimizer = optim.Adam(policy.policy_params, lr=lr)\n self.residual_optimizer = optim.Adam(policy.residual_params, lr=lr)\n\n self.num_eval_iteration = 50 \n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.embedding_dim = embedding_dim\n self.stacksize = stacksize\n \n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000):\n \n max_score = -100000. \n min_loss = 100000. \n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim] \n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device) \n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n\n self.m_embedding_optimizer.zero_grad()\n self.residual_optimizer.zero_grad() \n \n # m : history embedding, h : single observation embedding\n m, _ = self.policy.forward_embedding(obs) \n action_residuals = actions - prev_actions\n action_residual_pred = self.policy.forward_residual_from_m(m)\n \n train_residual_loss = torch.mean((action_residual_pred - action_residuals) ** 2)\n train_residual_loss.backward()\n \n self.m_embedding_optimizer.step()\n self.residual_optimizer.step() \n \n self.policy_optimizer.zero_grad() \n self.h_embedding_optimizer.zero_grad() \n \n m, h = self.policy.forward_embedding(obs)\n \n # we follow the original implementation that stop-gradient layer on m ; \n # see `forward_policy_from_embedding` method for detail. (m.detach() in input)\n train_neg_likelihood = -self.policy.log_prob_policy_from_m_h(m, h, actions).mean()\n train_neg_likelihood.backward()\n \n self.policy_optimizer.step()\n self.h_embedding_optimizer.step()\n \n if (num+1) % eval_freq == 0: \n valid_m, valid_h = self.policy.forward_embedding(obs_valid) \n valid_action_residuals = actions_valid - prev_actions_valid\n valid_action_residual_pred = self.policy.forward_residual_from_m(valid_m)\n \n valid_policy_neg_likelihood = -self.policy.log_prob_policy_from_m_h(valid_m, valid_h, actions_valid).mean()\n valid_residual_loss = torch.mean((valid_action_residual_pred - valid_action_residuals) ** 2) \n \n valid_loss = valid_policy_neg_likelihood + valid_residual_loss\n \n policy_action_valid = self.policy(obs_valid).sample() \n \n train_mh = torch.cat([m,h], dim=-1)\n valid_mh = torch.cat([valid_m, valid_h], dim=-1)\n \n hscic_estimate = estimate_hscic(X=train_mh, Y=prev_actions, Z=actions, ridge_lambda=1e-5)\n valid_hscic_estimate = estimate_hscic(X=valid_mh, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5) \n train_hscic_m_a_given_aprev = estimate_hscic(X=m, Y=actions, Z=prev_actions, ridge_lambda=1e-5)\n valid_hscic_m_a_given_aprev = estimate_hscic(X=valid_m, Y=actions_valid, Z=prev_actions_valid, ridge_lambda=1e-5)\n \n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n train_loss = train_neg_likelihood + train_residual_loss\n \n print(f'** iter{num+1}: train_loss={train_loss.item()}, nll={train_neg_likelihood}, residual_loss={train_residual_loss}, eval_ret={eval_ret_mean}+-{eval_ret_std}')\n print(f' valid_loss={valid_loss.item()}, valid_nll={valid_policy_neg_likelihood}, valid_residual_loss={valid_residual_loss}')\n \n print(f'** HSCIC(mh, a_prev | a_current) : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n print(f'** HSCIC(m, a_current | a_prev) : (train){train_hscic_m_a_given_aprev:.6f} (valid){valid_hscic_m_a_given_aprev:.6f} ')\n \n if self.wandb:\n self.wandb.log({\n 'train_total_loss': train_loss.item(),\n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': train_neg_likelihood.item(),\n 'valid_neg_likelihood': valid_policy_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'train_residual_loss': train_residual_loss,\n 'valid_residual_loss': valid_residual_loss,\n 'train_mean_hscic(m,target|prev)': train_hscic_m_a_given_aprev,\n 'valid_mean_hscic(m,target|prev)': valid_hscic_m_a_given_aprev,\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep:\n # obs = obs[:true_obs_dim]\n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()[0]\n next_obs, rew, done, env_info = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "FCA", "path": "imitation/fca.py", "snippet": "class FCA(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True,\n embedding_dim=1, entropy_hidden_size=300, entropy_lr=1e-4, reg_coef=1e-5, info_bottleneck_loss_coef=0.001, \n ):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(FCA, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid \n self.device = device\n \n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.embedding_dim = embedding_dim\n self.stacksize = stacksize \n\n # Additional Network for Conditional Entropy (FCA)\n self.entropy_input_size = embedding_dim + action_dim\n self.entropy_hidden_size = entropy_hidden_size\n self.entropy_net = nn.Sequential(\n nn.Linear(self.entropy_input_size, self.entropy_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(self.entropy_hidden_size, action_dim, device=self.device)\n )\n \n # FCA Hyperparameters\n self.entropy_coef = reg_coef \n self.info_bottleneck_loss_coef = info_bottleneck_loss_coef \n \n self.embedding_optimizer = optim.Adam(policy.embedding_params, lr=lr)\n self.policy_optimizer = optim.Adam(policy.policy_params, lr=lr)\n self.entropy_optimizer = optim.Adam(self.entropy_net.parameters(), lr=entropy_lr)\n\n self.num_eval_iteration = 50\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n\n # For standardization\n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000, inner_steps=1):\n \n max_score = -100000. \n min_loss = 100000. \n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim] \n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device) \n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n\n # conditional entropy input : H(a_{t-1}| a_{t}, varphi_t)\n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1) \n \n self.policy_optimizer.zero_grad()\n self.embedding_optimizer.zero_grad()\n self.entropy_optimizer.zero_grad()\n\n if self.entropy_coef > 0.:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n\n # prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n pred_prev_actions = self.entropy_net(expert_action_and_h) \n entropy_loss = torch.mean((pred_prev_actions - prev_actions) ** 2) \n\n train_loss = neg_likelihood \\\n - self.entropy_coef * entropy_loss \\\n + self.info_bottleneck_loss_coef * info_bottleneck_loss\n \n train_loss.backward() # backprop embedding\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step()\n\n # conditional entropy training\n for _ in range(inner_steps):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device) \n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n \n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1) \n\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device) \n pred_prev_actions = self.entropy_net(expert_action_and_h.detach())\n\n entropy_loss = torch.mean((pred_prev_actions - prev_actions) ** 2)\n \n self.entropy_optimizer.zero_grad()\n entropy_loss.backward()\n self.entropy_optimizer.step()\n\n else:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n \n train_loss = neg_likelihood + self.info_bottleneck_loss_coef * info_bottleneck_loss \n \n train_loss.backward()\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step() \n \n\n if (num+1) % eval_freq == 0: \n h_valid = self.policy.forward_embedding(obs_valid)\n valid_info_bottleneck_loss = 0.5 * (h_valid ** 2).sum()\n \n if self.entropy_coef > 0:\n expert_action_and_h_valid = torch.cat([actions_valid, h_valid], dim=-1) \n pred_prev_actions_valid = self.entropy_net(expert_action_and_h_valid)\n \n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim]\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device)\n \n valid_entropy_loss = torch.mean((pred_prev_actions_valid - prev_actions_valid) ** 2)\n else:\n valid_entropy_loss = 0.\n \n valid_neg_likelihood = - self.policy.log_prob(obs_valid, actions_valid).mean()\n \n valid_loss = valid_neg_likelihood \\\n - self.entropy_coef * valid_entropy_loss \\\n + self.info_bottleneck_loss_coef * valid_info_bottleneck_loss\n \n policy_action_valid = self.policy(obs_valid).sample() \n h_train = self.policy.forward_embedding(obs)\n \n hscic_estimate = estimate_hscic(X=h_train, Y=prev_actions, Z=actions, ridge_lambda=1e-5)\n valid_hscic_estimate = estimate_hscic(X=h_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n \n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: entropy_loss={entropy_loss}, train_loss={train_loss.item()}, eval_ret={eval_ret_mean}+-{eval_ret_std} ')\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({\n 'train_total_loss': train_loss.item(), \n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': neg_likelihood.item(), \n 'valid_neg_likelihood': valid_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'valid_entropy_loss': entropy_loss, \n 'valid_IB_loss': info_bottleneck_loss.item(),\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep: \n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n \n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "MINE_BC", "path": "imitation/mine.py", "snippet": "class MINE_BC(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True,\n embedding_dim=1, mine_lr=1e-4, reg_coef=1e-5, info_bottleneck_loss_coef=0.001, \n ):\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(MINE_BC, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n self.device = device\n \n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.embedding_dim = embedding_dim\n self.stacksize = stacksize\n \n # Additional Network for MINE Neural Estimator\n self.mine = MINE_DV(action_dim, action_dim + embedding_dim, device=device)\n \n # MINE-BC Hyperparameters\n self.reg_coef = reg_coef\n self.info_bottleneck_loss_coef = info_bottleneck_loss_coef\n\n self.embedding_optimizer = optim.Adam(policy.embedding_params, lr=lr)\n self.policy_optimizer = optim.Adam(policy.policy_params, lr=lr)\n self.mine_optimizer = optim.Adam(self.mine.parameters(), lr=mine_lr)\n \n self.num_eval_iteration = 50\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n\n # For standardization \n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000, inner_steps=1):\n \n min_loss = 100000.\n max_score = -100000.\n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations']\n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_actions_valid = batch_valid['actions'][:, :-self.action_dim] \n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_actions_valid = torch.tensor(prev_actions_valid, dtype=torch.float32, device=self.device)\n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n \n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n\n # MINE : I (a_{t-1}; a_{t}, varphi_t)\n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1)\n \n self.policy_optimizer.zero_grad()\n self.embedding_optimizer.zero_grad()\n self.mine_optimizer.zero_grad()\n\n if self.reg_coef > 0:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n mi_estimate = self.mine.get_mi_bound(prev_actions, expert_action_and_h, update_ema=False)\n\n train_loss = neg_likelihood \\\n + self.reg_coef * mi_estimate \\\n + self.info_bottleneck_loss_coef * info_bottleneck_loss\n \n train_loss.backward()\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step()\n\n # MINE training\n for _ in range(inner_steps):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n\n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_actions = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n \n h = self.policy.forward_embedding(obs)\n expert_action_and_h = torch.cat([actions, h], dim=-1)\n \n prev_actions = torch.tensor(prev_actions, dtype=torch.float32, device=self.device)\n \n mine_loss = -self.mine.get_mi_bound(prev_actions, expert_action_and_h.detach(), update_ema=True)\n\n self.mine_optimizer.zero_grad()\n mine_loss.backward()\n self.mine_optimizer.step()\n\n else:\n neg_likelihood = -self.policy.log_prob_policy_from_embedding(h, actions).mean()\n info_bottleneck_loss = 0.5 * (h ** 2).sum()\n \n train_loss = neg_likelihood + self.info_bottleneck_loss_coef * info_bottleneck_loss \n \n train_loss.backward()\n \n self.policy_optimizer.step()\n self.embedding_optimizer.step()\n \n\n if (num+1) % eval_freq == 0:\n h_valid = self.policy.forward_embedding(obs_valid)\n valid_info_bottleneck_loss = 0.5 * (h_valid ** 2).sum()\n \n if self.reg_coef > 0:\n expert_action_and_h_valid = torch.cat([actions_valid, h_valid], dim=-1) \n valid_mi_estimate = self.mine.get_mi_bound(prev_actions_valid, expert_action_and_h_valid, update_ema=False)\n else:\n valid_mi_estimate = 0.\n \n valid_neg_likelihood = -self.policy.log_prob(obs_valid, actions_valid).mean()\n\n valid_loss = valid_neg_likelihood \\\n + self.reg_coef * valid_mi_estimate \\\n + self.info_bottleneck_loss_coef * valid_info_bottleneck_loss\n \n policy_action_valid = self.policy(obs_valid).sample() \n h_train = self.policy.forward_embedding(obs)\n \n hscic_estimate = estimate_hscic(X=h_train, Y=prev_actions, Z=actions, ridge_lambda=1e-5)\n valid_hscic_estimate = estimate_hscic(X=h_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_actions_valid, Z=actions_valid, ridge_lambda=1e-5)\n \n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: mine_loss={-mi_estimate.cpu().item()}, train_loss={train_loss.item()}, eval_ret={eval_ret_mean}+-{eval_ret_std} ')\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({\n 'train_total_loss': train_loss.cpu().item(),\n 'valid_total_loss': valid_loss.cpu().item(),\n 'train_neg_likelihood': neg_likelihood.cpu().item(),\n 'valid_neg_likelihood': valid_neg_likelihood.cpu().item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'valid_mine_loss': -mi_estimate.cpu().item(),\n 'valid_IB_loss': info_bottleneck_loss.cpu().item(),\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep: \n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "PALR", "path": "imitation/palr.py", "snippet": "class PALR(nn.Module):\n def __init__(self, policy, env, best_policy=None,\n replay_buffer=None, replay_buffer_valid=None, seed=0, \n device='cpu', lr=3e-4, wandb=None, save_policy_path=None, \n obs_dim=1, action_dim=1, stacksize=1, standardize=True,\n reg_coef=0.01, ridge_lambda=1e-3):\n \n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n \n super(PALR, self).__init__()\n\n self.env = env\n self.policy = policy\n self.best_policy = best_policy\n self.replay_buffer = replay_buffer\n self.replay_buffer_valid = replay_buffer_valid\n self.device = device\n\n self.obs_dim = obs_dim\n self.action_dim = action_dim\n self.stacksize = stacksize\n \n self.policy_optimizer = optim.Adam(self.policy.parameters(), lr=lr) \n \n self.num_eval_iteration = 50\n \n self.wandb = None\n if wandb:\n self.wandb = wandb\n self.wandb.init()\n\n self.save_policy_path = save_policy_path\n \n # HSCIC Hyperparameters\n self.reg_coef = reg_coef\n self.ridge_lambda = ridge_lambda\n \n # For standardization\n self.standardize = standardize\n\n self.obs_mean_tt = torch.tensor(self.replay_buffer.obs_mean, device=device)\n self.obs_std_tt = torch.tensor(self.replay_buffer.obs_std, device=device)\n self.act_mean_tt = torch.tensor(self.replay_buffer.act_mean, device=device)\n self.act_std_tt = torch.tensor(self.replay_buffer.act_std, device=device)\n\n self.obs_mean = self.replay_buffer.obs_mean\n self.obs_std = self.replay_buffer.obs_std\n self.act_mean = self.replay_buffer.act_mean\n self.act_std = self.replay_buffer.act_std\n \n\n def train(self, total_iteration=1e6, eval_freq=1000, batch_size=1024, num_valid=2000):\n \n min_loss = 100000.\n max_score = -100000.\n \n batch_valid = self.replay_buffer_valid.get_batch(num_valid, standardize=self.standardize)\n \n obs_valid = batch_valid['observations'] \n actions_valid = batch_valid['actions'][:, -self.action_dim:]\n prev_expert_action_valid = batch_valid['actions'][:, :-self.action_dim]\n \n obs_valid = torch.tensor(obs_valid, dtype=torch.float32, device=self.device)\n actions_valid = torch.tensor(actions_valid, dtype=torch.float32, device=self.device)\n prev_expert_action_valid = torch.tensor(prev_expert_action_valid, dtype=torch.float32, device=self.device)\n \n for num in range(0, int(total_iteration)):\n batch = self.replay_buffer.random_batch(batch_size, standardize=self.standardize)\n\n obs = batch['observations']\n actions = batch['actions'][:, -self.action_dim:]\n prev_expert_action = batch['actions'][:, :-self.action_dim]\n \n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n actions = torch.tensor(actions, dtype=torch.float32, device=self.device)\n prev_expert_action = torch.tensor(prev_expert_action, dtype=torch.float32, device=self.device)\n\n neg_likelihood = - self.policy.log_prob(obs, actions).mean() \n policy_action = self.policy(obs).rsample()\n \n if self.reg_coef != 0: \n policy_embedding = self.policy.forward_embedding(obs)\n if self.standardize:\n Y_std = (prev_expert_action - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n else:\n Y_std = prev_expert_action\n Z_std = actions\n \n hscic_estimate = estimate_hscic(X=policy_embedding, Y=Y_std, Z=Z_std, ridge_lambda=self.ridge_lambda)\n \n else:\n hscic_estimate = 0.\n \n train_loss = neg_likelihood + self.reg_coef * hscic_estimate \n\n self.policy_optimizer.zero_grad()\n train_loss.backward()\n self.policy_optimizer.step()\n\n if (num+1) % eval_freq == 0:\n policy_action = self.policy(obs).sample()\n policy_action_valid = self.policy(obs_valid).sample()\n \n # Train data HSCIC (for debugging) \n policy_embedding = self.policy.forward_embedding(obs)\n if self.standardize:\n Y_std = (prev_expert_action - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n p_std = (policy_action - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n p_std = p_std.to(torch.float32)\n \n else:\n Y_std = prev_expert_action\n Z_std = actions\n p_std = policy_action\n \n hscic_estimate = estimate_hscic(X=policy_embedding, Y=Y_std, Z=Z_std, ridge_lambda=self.ridge_lambda)\n \n policy_embedding_valid = self.policy.forward_embedding(obs_valid)\n if self.standardize:\n Y_std = (prev_expert_action_valid - self.act_mean_tt[0, :-self.action_dim])/ self.act_std_tt[0, :-self.action_dim]\n Z_std = (actions_valid - self.act_mean_tt[0, -self.action_dim:])/ self.act_std_tt[0, -self.action_dim:]\n\n Y_std = Y_std.to(torch.float32)\n Z_std = Z_std.to(torch.float32)\n else:\n Y_std = prev_expert_action_valid\n Z_std = actions_valid\n p_std = policy_action\n \n valid_hscic_estimate = estimate_hscic(X=policy_embedding_valid, Y=Y_std, Z=Z_std, ridge_lambda=self.ridge_lambda) \n valid_hscic_estimate_action = estimate_hscic(X=policy_action_valid, Y=prev_expert_action_valid, Z=actions_valid, ridge_lambda=self.ridge_lambda)\n\n valid_neg_likelihood = -self.policy.log_prob(obs_valid, actions_valid).mean()\n valid_loss = valid_neg_likelihood + self.reg_coef * valid_hscic_estimate\n\n eval_ret_mean, eval_ret_std = self.evaluate(num_iteration=self.num_eval_iteration)\n \n print(f'** iter{num+1}: train_policy_loss={train_loss.item():.2f}, val_policy_loss={valid_loss.item():.2f}, eval_ret={eval_ret_mean:.2f}+-{eval_ret_std:.2f}',)\n print(f'** HSCIC : (train){hscic_estimate:.6f} (valid){valid_hscic_estimate:.6f} (valid,action){valid_hscic_estimate_action:.6f}')\n \n if self.wandb:\n self.wandb.log({'train_total_loss': train_loss.item(), \n 'valid_total_loss': valid_loss.item(),\n 'train_neg_likelihood': neg_likelihood.item(),\n 'valid_neg_likelihood': valid_neg_likelihood.item(),\n 'train_mean_hscic(rep,prev|target)': hscic_estimate,\n 'valid_mean_hscic(rep,prev|target)': valid_hscic_estimate,\n 'valid_mean_hscic(act,prev|target)': valid_hscic_estimate_action,\n 'eval_episode_return': eval_ret_mean\n }, step=num+1)\n\n if eval_ret_mean > max_score:\n print(f'** max score record! ')\n max_score = eval_ret_mean\n copy_nn_module(self.policy, self.best_policy)\n \n if self.save_policy_path:\n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_best.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.best_policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_best.pt')\n \n print(f'** save model to ', f'{self.save_policy_path}/bc_actor_last.pt')\n os.makedirs(self.save_policy_path, exist_ok=True)\n torch.save(self.policy.state_dict(), \n f'{self.save_policy_path}/bc_actor_last.pt')\n \n def evaluate(self, num_iteration=5):\n rets = []\n maxtimestep = 1000\n for num in range(0, num_iteration):\n obs_list = []\n obs = np.zeros(self.obs_dim * self.stacksize)\n \n obs_ = self.env.reset()\n obs_list.append(obs_)\n\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[- self.obs_dim:] = obs_\n\n done = False\n t = 0\n ret = 0.\n \n while not done and t < maxtimestep:\n if self.standardize:\n obs = (obs - self.obs_mean[0]) / self.obs_std[0]\n obs = torch.tensor(obs, dtype=torch.float32, device=self.device)\n action = self.policy(obs).mean.cpu().detach().numpy()\n \n next_obs, rew, done, _ = self.env.step(action)\n ret += rew\n \n obs_ = next_obs \n obs_list.append(obs_)\n\n if len(obs_list) < self.stacksize:\n obs_ = np.concatenate(obs_list)\n obs = np.zeros(self.obs_dim * self.stacksize)\n obs[-(len(obs_list)) * self.obs_dim:] = obs_\n \n else:\n obs = np.concatenate(obs_list[-self.stacksize:])\n \n t += 1\n \n rets.append(ret)\n \n return np.mean(rets), np.std(rets)" }, { "identifier": "TanhGaussianPolicyWithEmbedding", "path": "core/policy.py", "snippet": "class TanhGaussianPolicyWithEmbedding(TorchStochasticPolicy):\n \"\"\"\n Reference : \n https://github.com/AlvinWen428/fighting-copycat-agents/blob/52dabfd8b1c42e50f31d84bd431915aad62e09cb/imitation_learning/models/gan_model/__init__.py#L9\n \n Usage:\n\n ```\n policy = TanhGaussianPolicy(...)\n \"\"\"\n\n def __init__(\n self,\n obs_dim,\n action_dim,\n embedding_dim,\n embedding_hidden_size,\n policy_hidden_size, \n policy_std=None,\n disc_std=None,\n init_w=1e-3,\n device='cpu',\n hidden_activation=F.leaky_relu, \n layer_norm=False,\n **kwargs\n ):\n if device =='cuda':\n ptu.set_gpu_mode(True)\n self.device = device\n \n super(TanhGaussianPolicyWithEmbedding, self).__init__()\n # hidden_sizes,\n # input_size=obs_dim,\n # output_size=action_dim,\n # init_w=init_w,\n # device=device,\n # **kwargs\n # )\n\n self.input_size = obs_dim\n self.output_size = action_dim\n self.hidden_activation = hidden_activation\n self.layer_norm = layer_norm\n\n self.embedding_params = []\n self.disc_params = []\n self.policy_params = []\n\n self.embed_fcs = []\n # self.embed_layer_norms = []\n\n self.policy_fcs = []\n # self.policy_layer_norms = []\n\n self.disc_fcs = []\n # self.disc_layer_norms = []\n \n self.device = device\n in_size = self.input_size\n\n self.embed_fcs = nn.Sequential(\n nn.Linear(self.input_size, embedding_hidden_size, bias=False, device=self.device),\n # nn.BatchNorm1d(embedding_hidden_size),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(embedding_hidden_size, embedding_dim, device=self.device), \n )\n self.embedding_params = self.embed_fcs.parameters()\n\n self.policy_fcs = nn.Sequential(\n nn.LeakyReLU(0.2, inplace=False),\n nn.Linear(embedding_dim, policy_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n )\n # self.policy_params.append({'params': self.policy_fcs.parameters()})\n self.policy_mean = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n self.policy_params.append({'params': self.policy_mean.parameters()}) \n \n # self.policy_fc1 = nn.Linear(embedding_dim, policy_hidden_size, device=self.device)\n # self.policy_fc1.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc1.bias.data.fill_(0)\n # self.policy_params.append({'params': self.policy_fc1.parameters()}) \n # self.policy_fc2 = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n # self.policy_fc2.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc2.bias.data.fill_(0)\n # self.policy_params.append({'params': self.policy_fc2.parameters()}) \n\n self.policy_log_std = None\n self.policy_std = policy_std\n \n if policy_std is None:\n self.policy_fc_log_std = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n # self.policy_fc_log_std.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc_log_std.bias.data.uniform_(-init_w, init_w)\n self.policy_params.append({'params': self.policy_fc_log_std.parameters()})\n else:\n self.policy_log_std = np.log(policy_std)\n assert LOG_SIG_MIN <= self.policy_log_std <= LOG_SIG_MAX\n\n def forward(self, obs):\n # h = obs\n\n # h = self.hidden_activation(self.embed_fc1(h))\n # h = self.embed_fc2(h)\n\n # h = self.hidden_activation(self.policy_fc1(h))\n # policy_mean = self.policy_fc2(h)\n\n h = self.embed_fcs(obs)\n h = self.policy_fcs(h)\n policy_mean = self.policy_mean(h)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(h)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n return TanhNormal(policy_mean, policy_std)\n\n def forward_embedding(self, obs):\n # h = obs\n \n # h = self.hidden_activation(self.embed_fc1(h))\n # h = self.embed_fc2(h)\n h = self.embed_fcs(obs)\n\n return h\n\n def forward_policy_from_embedding(self, h):\n # h = self.hidden_activation(h)\n # h = self.hidden_activation(self.policy_fc1(h))\n h = self.policy_fcs(h)\n policy_mean = self.policy_mean(h)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(h)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n return TanhNormal(policy_mean, policy_std)\n\n def logprob(self, action, mean, std):\n tanh_normal = TanhNormal(mean, std)\n log_prob = tanh_normal.log_prob(\n action,\n )\n log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n\n def log_prob(self, obs, action):\n tanh_normal = self.forward(obs)\n log_prob = tanh_normal.log_prob(\n action,\n )\n # log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n\n def log_prob_policy_from_embedding(self, h, action):\n tanh_normal = self.forward_policy_from_embedding(h)\n log_prob = tanh_normal.log_prob(\n action,\n )\n # log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n\n def predict_action_from_embedding(self, h):\n tanh_normal = self.forward_policy_from_embedding(h)\n pred_action = tanh_normal.mean \n # log_prob = log_prob.sum(dim=1, keepdim=True)\n return pred_action" }, { "identifier": "TanhGaussianRAPPolicy", "path": "core/policy.py", "snippet": "class TanhGaussianRAPPolicy(TorchStochasticPolicy):\n \"\"\"\n Reference : \n \n Usage:\n\n ```\n policy = TanhGaussianPolicy(...)\n \"\"\"\n\n def __init__(\n self,\n obs_dim,\n stack_size,\n action_dim,\n embedding_dim,\n embedding_hidden_size,\n policy_hidden_size,\n residual_hidden_size,\n policy_std=None,\n residual_std=0.1,\n device='cpu',\n hidden_activation=F.leaky_relu, \n layer_norm=False,\n **kwargs\n ):\n if device =='cuda':\n ptu.set_gpu_mode(True)\n self.device = device\n \n super(TanhGaussianRAPPolicy, self).__init__()\n \n self.input_size = obs_dim\n self.stack_size = stack_size\n self.output_size = action_dim\n self.hidden_activation = hidden_activation\n self.layer_norm = layer_norm\n\n self.embedding_params = []\n self.residual_params = []\n self.policy_params = []\n\n self.history_embed_fcs = []\n self.single_embed_fcs = []\n # self.embed_layer_norms = []\n\n self.policy_fcs = []\n self.residual_fcs = []\n \n self.device = device\n in_size = self.input_size\n\n self.history_embed_fcs = nn.Sequential(\n nn.Linear(self.input_size * self.stack_size, embedding_hidden_size, bias=False, device=self.device),\n # nn.BatchNorm1d(embedding_hidden_size),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(embedding_hidden_size, embedding_dim, device=self.device)\n )\n self.history_embedding_params = self.history_embed_fcs.parameters()\n \n self.single_embed_fcs = nn.Sequential(\n nn.Linear(self.input_size, embedding_hidden_size, bias=False, device=self.device),\n # nn.BatchNorm1d(embedding_hidden_size),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Linear(embedding_hidden_size, embedding_dim, device=self.device)\n )\n self.single_embedding_params = self.single_embed_fcs.parameters()\n\n self.policy_fcs = nn.Sequential(\n nn.LeakyReLU(0.2, inplace=False),\n nn.Linear(embedding_dim*2, policy_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n )\n self.policy_params.append({'params': self.policy_fcs.parameters()})\n self.policy_mean = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n self.policy_params.append({'params': self.policy_mean.parameters()}) \n\n self.policy_log_std = None\n self.policy_std = policy_std\n \n if policy_std is None:\n self.policy_fc_log_std = nn.Linear(policy_hidden_size, action_dim, device=self.device)\n # self.policy_fc_log_std.weight.data.uniform_(-init_w, init_w)\n # self.policy_fc_log_std.bias.data.uniform_(-init_w, init_w)\n self.policy_params.append({'params': self.policy_fc_log_std.parameters()})\n else:\n self.policy_log_std = np.log(policy_std)\n assert LOG_SIG_MIN <= self.policy_log_std <= LOG_SIG_MAX\n\n self.residual_fcs = nn.Sequential(\n # nn.LeakyReLU(0.2, inplace=False),\n nn.Linear(embedding_dim, residual_hidden_size, device=self.device),\n nn.LeakyReLU(0.2, inplace=True),\n )\n self.residual_params.append({'params': self.residual_fcs.parameters()})\n self.residual_mean = nn.Linear(residual_hidden_size, action_dim, device=self.device) \n self.residual_params.append({'params': self.residual_mean.parameters()})\n\n def forward(self, obs):\n if len(obs.shape) < 2:\n obs = obs[None]\n \n obs_total = obs\n obs_current = obs[:, -self.input_size:]\n\n m = self.history_embed_fcs(obs_total)\n h = self.single_embed_fcs(obs_current) \n \n policy_input = torch.cat([m.detach(), h], dim=-1)\n \n policy_input = self.policy_fcs(policy_input)\n policy_mean = self.policy_mean(policy_input)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(policy_input)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n policy_dist = TanhNormal(policy_mean, policy_std) \n \n return policy_dist #, residual_dist\n\n def forward_embedding(self, obs):\n obs_total = obs\n obs_current = obs[:, -self.input_size:]\n\n m = self.history_embed_fcs(obs_total)\n h = self.single_embed_fcs(obs_current)\n\n return m, h\n\n def forward_residual_from_m(self, m):\n residual_m = self.residual_fcs(m)\n residual_mean = self.residual_mean(residual_m) \n \n return residual_mean\n\n def forward_policy_from_embedding(self, m, h):\n policy_input = torch.cat([m.detach(), h], dim=-1)\n \n policy_input = self.policy_fcs(policy_input)\n policy_mean = self.policy_mean(policy_input)\n\n if self.policy_std is None:\n policy_log_std = self.policy_fc_log_std(policy_input)\n policy_log_std = torch.clamp(policy_log_std, LOG_SIG_MIN, LOG_SIG_MAX)\n policy_std = torch.exp(policy_log_std)\n else:\n policy_std = torch.from_numpy(np.array([self.policy_std, ])).float().to(ptu.device)\n\n return TanhNormal(policy_mean, policy_std)\n\n def logprob(self, action, mean, std):\n tanh_normal = TanhNormal(mean, std)\n log_prob = tanh_normal.log_prob(action)\n log_prob = log_prob.sum(dim=1, keepdim=True)\n return log_prob\n \n def log_prob(self, obs, action):\n tanh_normal = self.forward(obs)\n log_prob = tanh_normal.log_prob(action) \n return log_prob\n \n def log_prob_policy_from_m_h(self, m, h, action): \n tanh_normal = self.forward_policy_from_embedding(m, h)\n log_prob = tanh_normal.log_prob(action)\n return log_prob\n\n def predict_action_from_m_h(self, m, h):\n tanh_normal = self.forward_policy_from_embedding(m, h)\n pred_action = tanh_normal.mean \n return pred_action" }, { "identifier": "EnvReplayBuffer", "path": "core/replay_buffer.py", "snippet": "class EnvReplayBuffer(SimpleReplayBuffer):\n def __init__(\n self,\n max_replay_buffer_size,\n env,\n stack_size=1,\n action_history_len=0,\n env_info_sizes=None,\n train_with_action_history=False\n ):\n \"\"\"\n :param max_replay_buffer_size:\n :param env:\n \"\"\"\n self.env = env\n self._ob_space = env.observation_space #.shape[0] * stack_size\n self._action_space = env.action_space\n\n if train_with_action_history:\n obs_dim = get_dim(self._ob_space) * stack_size + get_dim(self._action_space) * max(stack_size - 1, 1)\n else:\n obs_dim = get_dim(self._ob_space) * stack_size\n\n act_dim = get_dim(self._action_space) * (action_history_len)\n\n if env_info_sizes is None:\n if hasattr(env, 'info_sizes'):\n env_info_sizes = env.info_sizes\n else:\n env_info_sizes = dict()\n\n super().__init__(\n max_replay_buffer_size=max_replay_buffer_size,\n observation_dim=obs_dim,\n action_dim=act_dim,\n env_info_sizes=env_info_sizes\n )\n\n self.obs_mean = None\n self.obs_std = None\n\n self.act_mean = None\n self.act_std = None\n\n # def add_sample(self, observation, action, prev_action, reward, terminal,\n # next_observation, **kwargs):\n # if isinstance(self._action_space, Discrete):\n # new_action = np.zeros(self._action_dim)\n # new_action[action] = 1\n # else:\n # new_action = action\n\n # return super().add_sample(\n # observation=observation,\n # action=new_action,\n # prev_action=prev_action,\n # reward=reward,\n # next_observation=next_observation,\n # terminal=terminal,\n # # **kwargs\n # )\n\n def calculate_statistics(self):\n self.obs_mean = np.mean(self._observations[:self._top], axis=0, keepdims=True)\n self.obs_std = np.std(self._observations[:self._top], axis=0, keepdims=True)\n\n self.act_mean = np.mean(self._actions[:self._top], axis=0, keepdims=True)\n self.act_std = np.std(self._actions[:self._top], axis=0, keepdims=True)\n\n return self.obs_mean, self.obs_std, self.act_mean, self.act_std\n\n def set_statistics(self, obs_mean, obs_std, act_mean, act_std):\n self.obs_mean, self.obs_std, self.act_mean, self.act_std = obs_mean, obs_std, act_mean, act_std\n \n def get_statistics(self):\n return self.obs_mean, self.obs_std, self.act_mean, self.act_std\n\n def random_batch(self, batch_size, standardize=False):\n indices = np.random.choice(self._size, size=batch_size, replace=self._replace or self._size < batch_size)\n if not self._replace and self._size < batch_size:\n warnings.warn('Replace was set to false, but is temporarily set to true because batch size is larger than current size of replay.')\n\n if standardize and self.obs_mean is not None:\n obss = (self._observations[indices] - self.obs_mean) / self.obs_std\n # actions = (self._actions[indices] - self.act_mean) / self.act_std\n next_obss = (self._next_obs[indices] - self.obs_mean) / self.obs_std\n else:\n obss = self._observations[indices] \n # actions = self._actions[indices] \n next_obss = self._next_obs[indices]\n\n actions = self._actions[indices]\n \n batch = dict(\n observations=obss,\n actions=actions,\n # prev_actions=self._prev_actions[indices],\n rewards=self._rewards[indices],\n terminals=self._terminals[indices],\n next_observations=next_obss,\n )\n for key in self._env_info_keys:\n assert key not in batch.keys()\n batch[key] = self._env_infos[key][indices]\n\n return batch\n \n def get_batch(self, batch_size, standardize=False):\n datasize = min(batch_size, self._top) \n indices = np.arange(datasize)\n # if not self._replace and self._size < batch_size:\n # warnings.warn('Replace was set to false, but is temporarily set to true because batch size is larger than current size of replay.')\n\n if standardize and self.obs_mean is not None:\n obss = (self._observations[indices] - self.obs_mean) / self.obs_std\n # actions = (self._actions[indices] - self.act_mean) / self.act_std\n next_obss = (self._next_obs[indices] - self.obs_mean) / self.obs_std\n else:\n obss = self._observations[indices] \n # actions = self._actions[indices] \n next_obss = self._next_obs[indices]\n\n actions = self._actions[indices]\n \n batch = dict(\n observations=obss,\n actions=actions,\n # prev_actions=self._prev_actions[indices],\n rewards=self._rewards[indices],\n terminals=self._terminals[indices],\n next_observations=next_obss,\n )\n for key in self._env_info_keys:\n assert key not in batch.keys()\n batch[key] = self._env_infos[key][indices]\n\n return batch\n\n def add_sample(self, observation, action, reward, terminal,\n next_observation, **kwargs):\n if isinstance(self._action_space, Discrete):\n new_action = np.zeros(self._action_dim)\n new_action[action] = 1\n else:\n new_action = action\n\n return super().add_sample(\n observation=observation,\n action=new_action,\n reward=reward,\n next_observation=next_observation,\n terminal=terminal,\n # **kwargs\n )" }, { "identifier": "preprocess_dataset_with_prev_actions", "path": "core/preprocess.py", "snippet": "def preprocess_dataset_with_prev_actions(mdpfile, envtype, stacksize=1, partially_observable=False, action_history_len=2):\n \n indx = list(np.arange(20))\n # Indices of position information observations\n if partially_observable:\n envtype_to_idx = {\n 'hopper': indx[:5], \n 'ant': indx[:13], \n 'walker2d': indx[:8], \n 'halfcheetah': indx[:4] + indx[8:13]\n }\n obs_idx = envtype_to_idx[envtype]\n observations = np.array(mdpfile['observations'])[:, obs_idx]\n next_observations = np.array(mdpfile['next_observations'])[:, obs_idx]\n else:\n observations = np.array(mdpfile['observations'])\n next_observations = np.array(mdpfile['next_observations'])\n \n terminals = np.array(mdpfile['terminals'])\n timeouts = np.array(mdpfile['timeouts'])\n rewards = np.array(mdpfile['rewards'])\n actions = np.array(mdpfile['actions'])\n\n obs_dim = observations.shape[-1]\n action_dim = actions.shape[-1]\n\n n_data = observations.shape[0]\n new_observations_list = []\n new_next_observations_list = []\n prev_action_list = []\n action_history_list = []\n \n idx_from_initial_state = 0\n num_trajs = 0\n\n for i in range(n_data):\n if idx_from_initial_state == 0:\n prev_action = np.zeros(action_dim)\n else:\n prev_action = actions[i-1]\n prev_action_list.append(prev_action)\n\n if idx_from_initial_state < stacksize:\n if idx_from_initial_state == 0:\n initial_obs = observations[i]\n \n new_observation = np.zeros(obs_dim * stacksize)\n new_observation_ = np.concatenate(observations[i-idx_from_initial_state: i+1])\n new_observation[-(idx_from_initial_state+1) * obs_dim:] = new_observation_\n \n new_next_observation = np.zeros(obs_dim * stacksize)\n new_next_observation_ = np.concatenate(next_observations[i-idx_from_initial_state: i+1])\n new_next_observation[-(idx_from_initial_state+1) * obs_dim:] = new_next_observation_\n \n if idx_from_initial_state + 1 != stacksize:\n new_next_observation[-(idx_from_initial_state+2) * obs_dim:-(idx_from_initial_state+1) * obs_dim] \\\n = initial_obs\n \n else:\n new_observation = np.concatenate(observations[i+1-stacksize:i+1])\n new_next_observation = np.concatenate(next_observations[i+1-stacksize:i+1])\n\n if idx_from_initial_state < action_history_len:\n action_history = np.zeros(action_dim * action_history_len)\n action_history_ = np.concatenate(actions[i-idx_from_initial_state: i+1])\n action_history[-(idx_from_initial_state+1) * action_dim:] = action_history_\n \n else:\n action_history = np.concatenate(actions[i+1-action_history_len:i+1])\n\n\n new_observations_list.append(new_observation)\n new_next_observations_list.append(new_next_observation)\n action_history_list.append(action_history)\n\n idx_from_initial_state += 1\n if terminals[i] or timeouts[i]:\n idx_from_initial_state = 0\n num_trajs += 1 \n\n new_observations = np.array(new_observations_list)\n new_next_observations = np.array(new_next_observations_list)\n new_actions = np.array(action_history_list)\n\n new_paths = {\n 'observations': new_observations,\n 'next_observations': new_next_observations,\n 'rewards': rewards,\n 'actions': new_actions,\n 'terminals': terminals,\n 'timeouts': timeouts \n }\n \n return new_paths" }, { "identifier": "data_select_num_transitions", "path": "core/preprocess.py", "snippet": "def data_select_num_transitions(path, num_transitions=1000, start_idx=0, random=False):\n new_path = {}\n \n if random:\n num_full_trajs = len(path['observations'])\n choice_idx = np.random.choice(num_full_trajs, num_transitions)\n \n else:\n choice_idx = np.arange(start_idx, start_idx + num_transitions)\n \n for key in path.keys():\n new_path[key] = np.array(path[key])[choice_idx]\n \n return new_path" }, { "identifier": "NormalizedBoxEnv", "path": "rlkit/envs/wrappers.py", "snippet": "class NormalizedBoxEnv(ProxyEnv):\n \"\"\"\n Normalize action to in [-1, 1].\n\n Optionally normalize observations and scale reward.\n \"\"\"\n\n def __init__(\n self,\n env,\n reward_scale=1.,\n obs_mean=None,\n obs_std=None,\n ):\n ProxyEnv.__init__(self, env)\n self._should_normalize = not (obs_mean is None and obs_std is None)\n if self._should_normalize:\n if obs_mean is None:\n obs_mean = np.zeros_like(env.observation_space.low)\n else:\n obs_mean = np.array(obs_mean)\n if obs_std is None:\n obs_std = np.ones_like(env.observation_space.low)\n else:\n obs_std = np.array(obs_std)\n self._reward_scale = reward_scale\n self._obs_mean = obs_mean\n self._obs_std = obs_std\n ub = np.ones(self._wrapped_env.action_space.shape)\n self.action_space = Box(-1 * ub, ub)\n\n def estimate_obs_stats(self, obs_batch, override_values=False):\n if self._obs_mean is not None and not override_values:\n raise Exception(\"Observation mean and std already set. To \"\n \"override, set override_values to True.\")\n self._obs_mean = np.mean(obs_batch, axis=0)\n self._obs_std = np.std(obs_batch, axis=0)\n\n def _apply_normalize_obs(self, obs):\n return (obs - self._obs_mean) / (self._obs_std + 1e-8)\n\n def step(self, action):\n lb = self._wrapped_env.action_space.low\n ub = self._wrapped_env.action_space.high\n scaled_action = lb + (action + 1.) * 0.5 * (ub - lb)\n scaled_action = np.clip(scaled_action, lb, ub)\n\n wrapped_step = self._wrapped_env.step(scaled_action)\n next_obs, reward, done, info = wrapped_step\n if self._should_normalize:\n next_obs = self._apply_normalize_obs(next_obs)\n return next_obs, reward * self._reward_scale, done, info\n\n def __str__(self):\n return \"Normalized: %s\" % self._wrapped_env" } ]
import os import wandb import envs import d4rl import gym import torch from imitation.bc import BC from imitation.rap import RAP from imitation.fca import FCA from imitation.mine import MINE_BC from imitation.palr import PALR from argparse import ArgumentParser from itertools import product from core.policy import TanhGaussianPolicyWithEmbedding, TanhGaussianRAPPolicy from core.replay_buffer import EnvReplayBuffer from core.preprocess import preprocess_dataset_with_prev_actions, data_select_num_transitions from rlkit.envs.wrappers import NormalizedBoxEnv
18,875
wandb_dir = '.' os.environ['WANDB_DIR'] = wandb_dir os.environ['D4RL_DATASET_DIR'] = './dataset/' def train(configs):
wandb_dir = '.' os.environ['WANDB_DIR'] = wandb_dir os.environ['D4RL_DATASET_DIR'] = './dataset/' def train(configs):
env = NormalizedBoxEnv(gym.make(configs['envname']))
10
2023-11-06 08:35:34+00:00
24k
tylerlight071/Project-Cipher
main.py
[ { "identifier": "clear_terminal", "path": "components/common_functions.py", "snippet": "def clear_terminal():\n os.system('cls' if os.name == 'nt' else 'clear')" }, { "identifier": "print_slow", "path": "components/common_functions.py", "snippet": "def print_slow(text, delay=0.00): # change to 0.01\n for char in text:\n print(char, end='', flush=True)\n time.sleep(delay)\n print()" }, { "identifier": "shop_help", "path": "components/common_functions.py", "snippet": "def shop_help():\n print_slow(Fore.YELLOW + \"Shop Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[buy] - Use the 'buy [upgrade]' command to purchase the upgrade in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "help_user", "path": "components/common_functions.py", "snippet": "def help_user():\n print_slow(Fore.MAGENTA + \"Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[connect] - Use the 'connect' command to hack into Enigma Corps network.\")\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to view and respond to emails from your client and other characters.\")\n print_slow(\"\")\n print_slow(\"[balance] - Use the 'balance' command to view your current earnings which you can spend on upgrades. \")\n print_slow(\"\")\n print_slow(\"[shop] - Use the 'shop' command to view upgrades available in the shop. \")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[help] - Use the 'help' command if you need assistance at any time.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the Main Menu.\")\n print_slow(\"\")" }, { "identifier": "connect_help", "path": "components/common_functions.py", "snippet": "def connect_help():\n print_slow(Fore.MAGENTA + \"Connect Help:\" + Style.RESET_ALL)\n print_slow(\n \"[scan] - Use the 'scan' command to scan the network and search for available systems and vulnerabilities.\")\n print_slow(\"\")\n print_slow(\"[hack] - Use the 'hack [system/vulnerability]' to hack into different systems.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[disconnect] - Use the 'disconnect' command to disconnect from the current system or vulnerability.\")\n print_slow(\"\")" }, { "identifier": "mail_help", "path": "components/common_functions.py", "snippet": "def mail_help():\n print_slow(Fore.LIGHTBLUE_EX + \"Mail Help:\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list all emails.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [subject]' command to read an email with the specified subject.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[exit] - Use the 'exit' command to return to the main terminal.\")\n print_slow(\"\")" }, { "identifier": "system_help", "path": "components/common_functions.py", "snippet": "def system_help():\n print_slow(\"\")\n print_slow(\"[mail] - Use the 'mail' command to log into the users emails.\")\n print_slow(\"\")\n print_slow(\"[l] - Use the 'l' command to list files in a users system.\")\n print_slow(\"\")\n print_slow(\"[clear] - Use the 'clear' command to clear the terminal.\")\n print_slow(\"\")\n print_slow(\"[r] - Use the 'r [file]' command to read files in a users system\")\n print_slow(\"\")" }, { "identifier": "intro_call", "path": "conversations/calls.py", "snippet": "def intro_call():\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Welcome, Cipher. Operation Enigma is our covert mission against Enigma Corp, a powerful and secretive entity.\")\n print_slow(\n \"Your skills and secrecy have brought you to our attention. Your mission is to dig through their systems and servers looking for valuable data.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Got it, Anonymous. Exposing secrets and bringing justice. I'm in.\")\n print_slow(\"What's my first move? Talk to me about this 'EnigmaLink'.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Excellent, Cipher. EnigmaLink is a specialized tool available on the Hacker's Market. It contains a hidden backdoor, allowing access to Enigma Corps servers.\")\n print_slow(\n \"Your task is to acquire EnigmaLink and initiate your infiltration. Use the 'connect' command to navigate the network and gather crucial intelligence.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"EnigmaLink, got it. I'll secure it and initiate the infiltration. What about this employee, Amy?\")\n print_slow(\"You mentioned her password is 'sexinthecity.' What's my objective with her?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Good question, Cipher. Amy is a key target. Use her password to access her computer and gather any pertinent information.\")\n print_slow(\n \"This data is vital to our cause. Be thorough and meticulous in your investigation. The success of our operation depends on it.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"Understood, Anonymous. I'll focus on Amy, gather intel, and proceed with precision.\")\n print_slow(\"Consider it done. Anything else I should know before I dive in?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender (Anonymous)\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"One last thing, Cipher. All collected data is highly confidential. This contract is binding, and your success is paramount.\")\n print_slow(\"Execute with diligence, and may the odds be in your favor. Good luck, Cipher.\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "first_call", "path": "conversations/calls.py", "snippet": "def first_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"That's a good start, but we already have that information.\")\n print_slow(\"Regardless, I've transferred £20 into the account for your troubles.\")\n print_slow(\"Keep digging Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "second_call", "path": "conversations/calls.py", "snippet": "def second_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Hey Cipher, you nailed it! 'Billy' just spilled the beans about wanting to climb the corporate ladder into management.\")\n print_slow(\n \"This is gold for us. We can guide 'Billy' toward training and workshops that align with our interests, nudging things in our favor.\")\n print_slow(\n \"Picture it – we're pulling the strings, helping 'Billy' grow, and steering the ship where we want it to go.\")\n print_slow(\"Keep the ball rolling, Cipher!\" + Style.RESET_ALL)\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "third_call", "path": "conversations/calls.py", "snippet": "def third_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\"\n \"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've stumbled upon a perplexing development regarding Enigma's interest in a mysterious 'compound.'\")\n print_slow(\n \"I'm cross-referencing our existing intel to unveil more details. Stay vigilant and be prepared for the unknown.\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A compound, huh? Any hints on whether we're talking metal, chemicals, or something else entirely?\")\n print_slow(\"This feels like navigating in the dark. What exactly am I dealing with?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Response\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"Cipher, we're in the dark too. Initial reports are unclear—could be metal, chemical, or something beyond our comprehension.\")\n print_slow(\n \"Your mission is to identify the nature of this compound. Exercise extreme caution; this goes deeper than we anticipated.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Inquiry\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"So, we're playing 'guess the compound.' Any leads, any connections I should explore?\")\n print_slow(\"This is starting to sound like one of those high-stakes puzzles.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Clarification\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW +\n \"I wish I had more details, Cipher. This is uncharted territory for us. Investigate discreetly, and trust no one.\")\n print_slow(\n \"I'll attempt to gather more intel. Stay on the line, and keep me updated on any findings.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fourth_call", "path": "conversations/calls.py", "snippet": "def fourth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've got our hands on an intriguing document – an Employee Performance Review for 'Billy Constantine'.\")\n print_slow(\n \"This could be a goldmine of information. Let's dig in and see if there's anything we can leverage to our advantage.\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"An Employee Performance Review? Interesting choice. What's the scoop on 'Billy Constantine'?\")\n print_slow(\"Give me the details, and we'll figure out our next move.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, 'Billy Constantine' is making waves. The review highlights exceptional performance as a sales representative.\")\n print_slow(\n \"He's exceeding sales targets, mentoring new team members, and earning a solid 4.5/5 rating. A rising star, it seems.\")\n print_slow(\"We might use this to our advantage. Let's explore how we can align his ambitions with our agenda.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Strategy\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"A high-performing sales rep, huh? We could steer 'Billy' towards projects that align with our goals.\")\n print_slow(\"Let's use this performance review to our advantage. Maybe mentorship programs, leadership initiatives?\")\n print_slow(\"I'm ready to play this card strategically. What's the next move?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Great thinking, Cipher. Let's work on a plan to subtly guide 'Billy' toward initiatives that benefit us.\")\n print_slow(\"We'll need to dig deeper into 'Billy's' aspirations and weave our influence seamlessly.\")\n print_slow(\"Stay vigilant, Cipher. This could be a game-changer.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "fifth_call", "path": "conversations/calls.py", "snippet": "def fifth_call():\n clear_terminal()\n\n # Anonymous Sender\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n Fore.YELLOW + \"Cipher, we've intercepted some Meeting Minutes dated 24/06/2025. It's related to 'Project X' and involves key players.\")\n print_slow(\n \"This could be our chance to uncover more about Enigma's activities. Let's dive into the details and see what we can extract.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Response\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(\n Fore.BLUE + \"Meeting Minutes, huh? 'Project X' sounds intriguing. Who were the players involved, and what's the agenda?\")\n print_slow(\"I'm ready to dissect this information and uncover any hidden gems.\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Briefing\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Cipher, the meeting involved key personnel—Amy, Billy, Kyle, and others. 'Project X' is on the agenda, and there's mention of sensitive materials.\")\n print_slow(\n \"This could be a crucial insight into Enigma's plans. Let's analyze the action items and plan our next move.\")\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Cipher Analysis\n print_slow(cipher_art)\n print_slow(\"\")\n print_box(\"Cipher\")\n print_slow(\"\")\n print_slow(Fore.BLUE + \"'Project X,' sensitive materials, and action items. This is a goldmine of information.\")\n print_slow(\n \"Let's focus on dissecting the action items and see if we can connect the dots. What's our strategy, Anonymous?\" + Style.RESET_ALL)\n print_slow(\"\")\n input(\"Press [Enter] to continue: \")\n clear_terminal()\n\n # Anonymous Sender Next Steps\n print_slow(sender_art)\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(\n \"Agreed, Cipher. Let's delve into the action items, especially the data compilation and safety protocol training.\")\n print_slow(\"We might uncover more about 'Project X' and gain insights into Enigma's plans.\")\n print_slow(\"Stay sharp, Cipher. This could be a pivotal moment in our mission.\")\n print_slow(\"\")\n print_slow(Fore.RED + \"Line Disconnected...\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "sixth_call", "path": "conversations/calls.py", "snippet": "def sixth_call():\n print_slow(\"ADD CALL STUFF HERE\")" }, { "identifier": "markus_seen_call", "path": "conversations/calls.py", "snippet": "def markus_seen_call():\n print_slow(\"Something goes here\")" }, { "identifier": "code_shatter_call", "path": "conversations/minigame_calls.py", "snippet": "def code_shatter_call():\n clear_terminal()\n print_slow(sender_art)\n print_slow(\"\")\n print_slow(\"\")\n print_box(\"Anonymous\")\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"I see you have bought CodeShatter!\")\n print_slow(\"This item is a one time use upgrade so once you get the password, it is gone so use wisely!\")\n print_slow(\"But don't threat, if you fail, you get a chance to retry. The item is only used when you get the password, so be sure to write it down!\" + Style.RESET_ALL)\n input(\"Press [Enter] to continue: \")\n clear_terminal()" }, { "identifier": "code_shatter_minigame", "path": "minigames/code_shatter_minigame.py", "snippet": "def code_shatter_minigame():\n # Generate a random 5-digit number\n target = [str(random.randint(1, 9)) for _ in range(5)]\n\n print_slow(\"Welcome to CodeShatter!\")\n print_slow(\"\")\n print_slow(\"Guess the 5-digit number.\")\n print_slow(\"\")\n print_slow(\"The sequence can contain multiple same numbers\")\n print_slow(\"\")\n print_slow(Fore.GREEN + \"Green: Correct digit in correct position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.YELLOW + \"Orange: Correct digit in incorrect position.\" + Style.RESET_ALL)\n print_slow(\"\")\n print_slow(Fore.RED + \"Red: Incorrect digit.\" + Style.RESET_ALL)\n print_slow(\"\")\n\n attempts = 0\n while attempts < 7:\n # Get the user's guess\n guess = input(\"Enter your guess: \")\n\n if len(guess) != 5 or not guess.isdigit():\n print_slow(\"Invalid input. Please enter a 5-digit number.\")\n continue\n\n attempts += 1\n\n # Check the guess against the target\n feedback = []\n for i in range(5):\n if guess[i] == target[i]:\n feedback.append(Fore.GREEN + guess[i] + Style.RESET_ALL)\n elif guess[i] in target:\n feedback.append(Fore.YELLOW + guess[i] + Style.RESET_ALL)\n else:\n feedback.append(Fore.RED + guess[i] + Style.RESET_ALL)\n\n print_slow(\"Feedback: \" + \" \".join(feedback))\n\n # Check if the guess is correct\n if guess == \"\".join(target):\n print_slow(Fore.GREEN + \"Access granted.\" + Style.RESET_ALL)\n break\n else:\n print_slow(Fore.RED + \"Access denied. Too many attempts.\" + Style.RESET_ALL)\n time.sleep(1)\n print_slow(\"\")\n print_slow(Fore.RED + \"Rebooting CodeShatter with new proxy...\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n code_shatter_minigame()" }, { "identifier": "port_scanning", "path": "minigames/eye_spy_minigame.py", "snippet": "def port_scanning():\n num_ports = 10\n open_ports, closed_ports = generate_ports(num_ports)\n attempts = 5\n correct_guesses = 0\n scan_attempts = 2\n\n print_slow(\"Welcome to the Port Scanning minigame!\")\n print_slow(\"\")\n print_slow(f\"Find the open ports in the range 1-{num_ports}.\")\n print_slow(\"\")\n print_slow(f\"You have {attempts} attempts.\")\n print_slow(\"\")\n\n while scan_attempts > 0:\n print_slow(\"\")\n print_slow(f\"\\nYou have {scan_attempts} scan attempts left.\")\n print_slow(\"\")\n start = int(input(\"Enter the start of the range to scan: \"))\n print_slow(\"\")\n end = int(input(\"Enter the end of the range to scan: \"))\n print_slow(\"\")\n\n num_open_ports_in_range = len(open_ports.intersection(range(start, end + 1)))\n print_slow(\"\")\n print_slow(f\"There are {num_open_ports_in_range} open ports in the range {start}-{end}.\")\n\n scan_attempts -= 1\n\n while attempts > 0 and len(open_ports) > 0:\n port = int(input(\"\\nEnter a port number to guess: \"))\n\n if port in open_ports:\n print_slow(Fore.GREEN + \"Port is open!\" + Style.RESET_ALL)\n open_ports.remove(port)\n correct_guesses += 1\n elif port in closed_ports:\n print_slow(Fore.RED + \"Port is closed.\" + Style.RESET_ALL)\n closed_ports.remove(port)\n else:\n print_slow(\"Invalid port number. Please enter a number between 1 and\", num_ports)\n\n attempts -= 1\n\n if len(open_ports) == 0:\n print_slow(\n Fore.GREEN + \"\\nCongratulations! You have successfully found all the open ports and gained access to the camera.\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n else:\n print_slow(\n Fore.RED + f\"\\nHack Failed! You found {correct_guesses} out of {len(open_ports) + correct_guesses} open ports.\" + Style.RESET_ALL)\n time.sleep(1)\n clear_terminal()\n port_scanning()" }, { "identifier": "AmySystem", "path": "systems/level_1/amy/amy_system.py", "snippet": "class AmySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"return_to_work_form.txt\",\n \"content\": (\n \"Employee Name: _______________\\n\"\n \"Employee ID: ____________\\n\"\n \"Department: _______________\\n\"\n \"Date of Return: ______\\n\\n\"\n \"I, [Employee Name], certify that I have followed the company's \"\n \"guidelines for returning to work after an absence. \"\n \"I understand that it is my responsibility to adhere to all safety \"\n \"protocols and procedures to ensure the health and well-being of my \"\n \"colleagues and myself.\\n\\n\"\n \"I acknowledge that I have completed any necessary training and have \"\n \"been briefed on any updates to the company's policies and procedures. \"\n \"I am aware that I must report any symptoms or exposure to COVID-19 to \"\n \"my supervisor immediately.\\n\\n\"\n \"I am committed to doing my part to maintain a safe and healthy work \"\n \"environment for everyone. I will continue to follow all guidelines \"\n \"and protocols and will cooperate with any additional measures that \"\n \"may be implemented in the future.\\n\\n\"\n \"Signature: [Employee Signature]\\n\"\n \"Date: [Date]\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"benefits_summary.txt\",\n \"content\": (\n \"At Enigma Corps, we believe in taking care of our employees and \"\n \"offer a comprehensive benefits package to support your health, well-being, \"\n \"and financial security. Below is a summary of the benefits available to \"\n \"you as an employee of Enigma Corps.\\n\\n\"\n \"Health Insurance: We offer a choice of medical, dental, and vision \"\n \"plans to meet your needs. Our plans provide coverage for preventive care, \"\n \"hospitalization, prescription drugs, and more.\\n\\n\"\n \"Retirement Savings: We offer a 401(k) plan with a generous company \"\n \"match to help you save for your future. You can choose from a variety of \"\n \"investment options to suit your needs.\\n\\n\"\n \"Paid Time Off: We provide a generous amount of paid time off, \"\n \"including vacation, sick leave, and holiday pay. We also offer paid \"\n \"parental leave for new parents.\\n\\n\"\n \"Flexible Work Arrangements: We understand the importance of work-life \"\n \"balance and offer flexible work arrangements, such as remote work and \"\n \"flexible schedules, where possible.\\n\\n\"\n \"Wellness Programs: We offer a variety of wellness programs and \"\n \"resources to support your physical and mental health, including fitness \"\n \"classes, stress management programs, and counseling services.\\n\\n\"\n \"Professional Development: We are committed to supporting your growth \"\n \"and development and offer a variety of training and development \"\n \"opportunities, including tuition reimbursement, workshops, and seminars.\"\n \"\\n\\n\"\n \"We encourage you to review this summary carefully and take advantage of \"\n \"the benefits available to you. If you have any questions or need further \"\n \"information, please contact the HR department.\"\n )\n },\n ]\n self.emails = [\n {\n \"sender\": \"Amy\",\n \"subject\": \"Can't Stop Thinking About You\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I hope this message finds you in good spirits. I've been meaning to write to you for a while now, but I couldn't find the right words to express what I've been feeling.\\n\\n\"\n \"Ever since that night we spent together, I can't seem to get you out of my mind. There's something about the way you make me feel that I've never experienced before. \"\n \"\\nIt's exhilarating, yet terrifying all at the same time.\\n\\n\"\n \"I know we both have a lot on our plates right now, and I don't want to add any more stress to your life. But I can't help but wonder what could happen if we gave this a real shot. \"\n \"I know it's complicated, and there are a lot of factors to consider, but I think we owe it to ourselves to explore this connection we have.\\n\\n\"\n \"I understand if you're not ready to take that step, and I don't want to pressure you into anything you're not comfortable with. \"\n \"\\nBut I can't shake the feeling that we could have something truly special together.\\n\\n\"\n \"I'd love to hear your thoughts on this, and I'm more than willing to take things slow if that's what you need. Maybe we could meet up for dinner and talk about it in person?\"\n \" I think it would be easier to have this conversation face-to-face.\\n\\n\"\n \"I hope you're doing well, and I look forward to hearing from you soon.\\n\\n\"\n \"Take care,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and ask for your help on the Smith project. I've been having some trouble with the data analysis portion,\"\n \"\\nand I know you have a lot of experience in that area.\\n\\n\"\n \"The project involves analyzing customer feedback data to identify trends and areas for improvement. I've been working on it for a few weeks now, but I'm finding it challenging to make sense of the data and\"\n \"\\ndraw meaningful conclusions.\\n\\n\"\n \"Would you be available for a quick meeting later this week to go over some of the data with me? I would really appreciate your input and guidance on this. \"\n \"\\nI think your expertise could really help me make progress and ensure the success of the project.\\n\\n\"\n \"If you're available, please let me know your preferred date and time, and I'll send out a calendar invite. I'm flexible and can work around your schedule.\\n\\n\"\n \"Thank you in advance for your help, and I look forward to hearing from you soon.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Request for Time Off\",\n \"body\": (\n \"Good Afternoon Katie,\\n\\n\"\n \"I hope this email finds you well. I wanted to request some time off next month for a family vacation. I am planning to be out of the office from 10/09/2024 to 18/09/2024\\n\\n\"\n \"I have been working hard on the Johnson project and have made significant progress. I will make sure to finish up any outstanding work and hand off any ongoing projects to my colleagues before I leave. I will also be available by email in case of any urgent matters.\\n\\n\"\n \"I understand that this is a busy time for the team, and I want to ensure that my absence doesn't cause any disruptions. I have already spoken to Markus and he has kindly agreed to cover for me while I'm away.\\n\\n\"\n \"Thank you for considering my request. I look forward to spending some quality time with my family and coming back to work refreshed and recharged.\"\n \"\\nI am confident that the time off will help me come back with renewed energy and focus.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Apology for the Mistake\",\n \"body\": (\n \"Good Morning Kyle,\\n\\n\"\n \"I hope this email finds you well. I wanted to reach out and apologize for the mistake I made on the Johnson report. I realize now that I overlooked some important data, and I take full responsibility for it.\\n\\n\"\n \"I have gone back and corrected the report, and I will make sure to double-check my work in the future to avoid any similar mistakes. I have also attached the updated report for your reference.\\n\\n\"\n \"I understand if you are disappointed or frustrated, and I am more than willing to do whatever it takes to make it right. Please let me know if there's anything else I can do to fix this,\"\n \"\\nor if you would like to discuss this further.\\n\\n\"\n \"Once again, I am truly sorry for the mistake, and I appreciate your understanding. I value our working relationship and hope that this incident doesn't tarnish it. I am committed to making amends and ensuring that this doesn't happen again in the future.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n },\n {\n \"sender\": \"Amy\",\n \"subject\": \"Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Billy,\\n\\n\"\n \"I wanted to take a moment to express my gratitude for allowing me to use your computer while mine was being serviced by IT. \"\n \"It was a huge help and allowed me to stay productive during that time.\\n\\n\"\n \"I also noticed that your password is 'football'. While I understand it's easy to remember, it's important to choose a more secure password to protect your accounts.\"\n \"\\nI would recommend changing it to something more complex and unique. You never know who's watching after all.\\n\\n\"\n \"Thanks again for your generosity and understanding.\\n\\n\"\n \"Best,\\n\"\n \"Amy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "BillySystem", "path": "systems/level_1/billy/billy_system.py", "snippet": "class BillySystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"cover_letter.txt\",\n \"content\": (\n \"Dear Hiring Manager,\\n\\n\"\n \"I am writing to express my interest in the management position at Enigma Corps. \"\n \"I have been with the company for over 7 years and have consistently demonstrated my commitment to driving excellence and fostering collaboration within the team.\\n\\n\"\n \"During my tenure at Enigma Corps, I have been involved in various projects, including the successful completion of the Q3 deliverables project, where I played a key role in the planning and execution stages. \"\n \"My dedication to achieving project milestones and my ability to work under pressure make me a strong candidate for a management role.\\n\\n\"\n \"I possess strong leadership skills, which I have honed through my experiences in leading teams and coordinating cross-functional efforts. \"\n \"My ability to communicate effectively and build positive relationships with team members and stakeholders has resulted in successful project outcomes and increased productivity.\\n\\n\"\n \"In addition to my technical and leadership skills, I am also committed to continuous learning and professional development. \"\n \"I have participated in various training programs and workshops to enhance my management skills and stay up-to-date with industry trends and best practices.\\n\\n\"\n \"I am excited about the opportunity to contribute to the growth and success of Enigma Corps as a member of the management team. \"\n \"I am confident that my skills and experience will be valuable assets to the company, and I look forward to the opportunity to work closely with the team to drive innovation and excellence.\\n\\n\"\n \"Thank you for considering my application. I am looking forward to the opportunity to discuss my qualifications further and explore how I can contribute to the success of Enigma Corps.\\n\\n\"\n \"Sincerely,\\n\"\n \"Billy Constantine\\n\"\n )\n },\n {\n \"name\": \"employee_handbook.txt\",\n \"content\": (\n \"Welcome to Enigma Corps We are thrilled to have you as part of our \"\n \"team. This employee handbook has been designed to help you understand \"\n \"our company's policies, procedures, and expectations.\\n\\n\"\n \"Our company is committed to fostering a positive and inclusive work \"\n \"environment where all employees feel valued and supported. We believe \"\n \"in treating everyone with respect and dignity and expect all employees \"\n \"to do the same.\\n\\n\"\n \"In this handbook, you will find information on topics such as:\\n\\n\"\n \"- Code of Conduct\\n\"\n \"- Dress Code\\n\"\n \"- Attendance and Punctuality\\n\"\n \"- Time Off and Leave Policies\\n\"\n \"- Performance Evaluations\\n\"\n \"- Health and Safety\\n\"\n \"- Equal Employment Opportunity\\n\"\n \"- Harassment and Discrimination\\n\\n\"\n \"Please take the time to read through this handbook carefully and \"\n \"familiarize yourself with our policies and procedures. If you have any \"\n \"questions or concerns, do not hesitate to reach out to your supervisor \"\n \"or the HR department.\\n\\n\"\n \"We look forward to working with you and hope you have a long and \"\n \"successful career with Enigma Corps!\"\n )\n },\n {\n \"name\": \"meeting_minutes.txt\",\n \"content\": (\n \"Meeting Minutes\\n\\n\"\n \"Date: 24/06/2025\\n\"\n \"Location: REDACTED\\n\"\n \"Attendees: Amy, REDACTED, Billy, Kyle, REDACTED, REDACTED, REDACTED\\n\\n\"\n \"Agenda:\\n\"\n \"- Discuss progress on Project REDACTED\\n\"\n \"- Review safety protocols for handling sensitive materials\\n\"\n \"- Plan next steps for research and development\\n\\n\"\n \"Action Items:\\n\"\n \"- Compile data from recent experiments and share with team\\n\"\n \"- Schedule training session on updated safety protocols\\n\"\n \"- Develop timeline for next phase of Project X\\n\\n\"\n \"Next Meeting: 05/08/24, 12:00pm\\n\"\n )\n },\n {\n \"name\": \"employee_performance_review.txt\",\n \"content\": (\n \"Employee Performance Review\\n\\n\"\n \"Employee Name: Billy Constantine\\n\"\n \"Employee ID: 035854\\n\"\n \"Review Date: 28/06/2024\\n\\n\"\n \"Performance Summary:\\n\"\n \"Billy has demonstrated exceptional performance in his role as a sales representative. He has consistently exceeded sales targets, built strong relationships with clients, and demonstrated leadership qualities in team meetings and projects.\\n\\n\"\n \"Strengths:\\n\"\n \"- Exceeded quarterly sales targets by 15%.\\n\"\n \"- Successfully onboarded and mentored two new team members.\\n\"\n \"- Demonstrated excellent communication and negotiation skills.\\n\\n\"\n \"Areas for Improvement:\\n\"\n \"- Time management skills can be further developed to ensure all tasks are completed in a timely manner.\\n\"\n \"- Continued development of technical knowledge to stay up-to-date with industry trends.\\n\"\n \"- Strengthen collaboration with cross-functional teams to drive more integrated solutions.\\n\\n\"\n \"Goals for Next Review Period:\\n\"\n \"- Increase sales targets by 20%.\\n\"\n \"- Complete a management training program.\\n\"\n \"- Improve time management skills through prioritization and delegation.\\n\\n\"\n \"Overall Rating: 4.5/5\\n\"\n \"Reviewer Name: Katie Thompson\\n\"\n \"Reviewer Signature: Katie Thompson\\n\"\n \"Date: 28/06/2024\\n\"\n )\n }\n ]\n self.emails = [\n\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Need Your Help on the Smith Project\",\n \"body\": (\n \"Hi Amy,\\n\\n\"\n \"I hope this message finds you in great spirits! I'm more than happy to lend a helping hand with the Smith project. After all, two heads are better than one, especially when it comes to data analysis, right?\\n\\n\"\n \"How about we grab a coffee and chat about the project in person? I think it would be nice to catch up and discuss the data over a cup of joe. I'm sure we can brainstorm some ideas and come up with a game plan together.\\n\\n\"\n \"I'm free [date] at [time], does that work for you? If not, just let me know your availability, and we can find a time that suits us both. I'm really looking forward to our coffee date and tackling the project together.\\n\\n\"\n \"Can't wait to see you and dive into the data!\\n\\n\"\n \"Best,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Project Update\",\n \"body\": (\n \"Hello Team,\\n\\n\"\n \"I wanted to provide everyone with a quick update on our progress with the Q3 deliverables project. We've successfully completed the initial research phase and are now moving into the planning stage.\\n\\n\"\n \"In our last meeting, we discussed the following key points:\\n\"\n \"- Compound Analysis: We've identified a unique compound with potential applications in various industries. Further testing and analysis are required to unlock its full potential.\\n\"\n \"- Resource Management: We've allocated a special team and dedicated resources to handle the delicate nature of this project, ensuring utmost confidentiality and security.\\n\"\n \"- Safety Protocols: We've developed strict safety protocols to handle the compound, and we're conducting regular training sessions to ensure compliance.\\n\\n\"\n \"Our next steps include finalizing the project plan, assigning tasks to team members, and setting deadlines. I would appreciate input and feedback from all team members to ensure we're on the right track. Please review the attached project plan document for more details.\\n\\n\"\n \"Additionally, I want to remind everyone of the confidential nature of this project. It's imperative that we maintain discretion and follow all security protocols to safeguard our work. Let's work together to make this project a success and uphold the company's reputation for innovation and excellence.\\n\\n\"\n \"If you have any questions or concerns, please don't hesitate to reach out. Your cooperation and commitment to this project are greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Can't Stop Thinking About You\",\n \"body\": (\n \"Hey there, Amy,\\n\\n\"\n \"Wow, your message really caught me by surprise! But in the best way possible, of course. I've been trying to play it cool, but I have to admit, I've been thinking about that night a lot too. There was just something electric in the air, wasn't there?\\n\\n\"\n \"I've been tossing and turning, wondering if I should reach out to you or if I should wait for you to make the first move. I guess you beat me to it, and I'm glad you did. It's like you read my mind.\\n\\n\"\n \"I can't deny that there's a certain chemistry between us, and I'm intrigued to see where it could lead. I agree that our lives are complicated, and we don't want to add more stress to each other's plates. But sometimes, taking a risk is what makes life exciting, don't you think?\\n\\n\"\n \"I don't want to rush things or make you feel pressured in any way. I'm more than happy to take things slow and let them unfold naturally. But I can't help but imagine the possibilities if we give this a real shot. We could have something truly special, and I don't want to let that pass us by.\\n\\n\"\n \"How about we meet up for dinner and drinks next week? We can talk about it more and see where the night takes us. I think it would be a fun and relaxed way to get to know each other better and explore this connection we have. What do you say?\\n\\n\"\n \"I hope you're doing well, and I'm eagerly awaiting your reply. Until then, I'll be daydreaming about our next encounter.\\n\\n\"\n \"Take care, and talk to you soon.\\n\\n\"\n \"Yours truly,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Re: Thank You for Letting Me Use Your Computer\",\n \"body\": (\n \"Hey Amy,\\n\\n\"\n \"No problem at all! I'm always here to help out when I can. It's what teammates do, right?\\n\\n\"\n \"Oh, and about the password thing – haha, I know it's not the most secure choice. I've been meaning to change it, but I guess old habits die hard, right? \"\n \"Thanks for looking out for me though! I'll try to come up with something a bit more creative next time.\\n\\n\"\n \"If you ever need anything else, just give me a shout. Happy to help!\\n\\n\"\n \"Take care,\\n\"\n \"Billy\"\n )\n },\n {\n \"sender\": \"Billy\",\n \"subject\": \"Professional Development\",\n \"body\": (\n \"Good Evening Katie,\\n\\n\"\n \"I hope this email finds you well. I'm reaching out to express my interest in professional development opportunities within the company, particularly in the area of management and leadership.\\n\\n\"\n \"I've been with the company for several years now, and I've had the chance to work on various projects and collaborate with different teams. I'm keen to build on this experience and take on more responsibility, and I believe that acquiring the necessary skills for a management role would be a great next step in my career.\\n\\n\"\n \"Could you please provide information on available training programs, workshops, or seminars that focus on leadership development and management skills? I'm particularly interested in areas such as team leadership, strategic planning, conflict resolution, and decision-making.\\n\\n\"\n \"Additionally, if there are any tuition reimbursement programs or resources for management training and certification, I'd like to learn more about them. I'm committed to investing time and effort in my professional growth and believe that these opportunities would greatly benefit both myself and the company.\\n\\n\"\n \"Your guidance and assistance in exploring these options would be greatly appreciated. I look forward to your response and any recommendations you may have.\\n\\n\"\n \"Thank you for your support, and I'm excited about the prospect of contributing to the company's success in a management role.\\n\\n\"\n \"Best regards,\\n\"\n \"Billy\"\n )\n }\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" }, { "identifier": "camera_first", "path": "systems/level_1/cameras/camera_1.py", "snippet": "def camera_first():\n print(camera_1)\n print()\n print()\n move = input(Fore.GREEN + \"> \" + Style.RESET_ALL)\n\n if move.lower() == \"forward\":\n clear_terminal()\n camera_second()\n elif move.lower() == \"back\":\n print(Fore.RED + \"There is nothing to go back to...\" + Style.RESET_ALL)\n time.sleep(2)\n clear_terminal()\n camera_first()" }, { "identifier": "MarkusSystem", "path": "systems/level_1/markus/markus_system.py", "snippet": "class MarkusSystem:\n def __init__(self):\n self.files = [\n {\n \"name\": \"system_log.txt\",\n \"content\": (\n \"Enigma Corps System Log\\n\\n\"\n \"Date: 2023-11-16 08:00 AM\\n\"\n \"Event Type: System Startup\\n\"\n \"Description: The Enigma Corps systems smoothly initiated startup procedures, ensuring a seamless beginning to the workday.\\n\\n\"\n \"Date: 2023-11-16 10:30 AM\\n\"\n \"Event Type: Network Upgrade\\n\"\n \"Description: Implemented a network upgrade to enhance data transfer speeds, providing improved efficiency across departments.\\n\\n\"\n \"Date: 2023-11-16 01:45 PM\\n\"\n \"Event Type: Security Patch Applied\\n\"\n \"Description: Critical security patch successfully applied to safeguard against potential vulnerabilities, ensuring system integrity.\\n\\n\"\n \"Date: 2023-11-16 04:20 PM\\n\"\n \"Event Type: Server Maintenance\\n\"\n \"Description: Conducted routine maintenance on Enigma Corps servers, optimizing performance and minimizing downtime.\\n\\n\"\n \"This dynamic system log captures key events, from the smooth startup of the day to network upgrades, security enhancements, and routine maintenance. It serves as a valuable record for troubleshooting and analysis, ensuring the optimal functionality of Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"technical_documentation.docx\",\n \"content\": (\n \"Enigma Corps System Technical Documentation\\n\\n\"\n \"1. System Architecture:\\n\"\n \" - Overview of the system's structural design and components.\\n\\n\"\n \"2. Network Configuration:\\n\"\n \" - Details on the configuration of Enigma Corps' network setup for efficient communication.\\n\\n\"\n \"3. Security Protocols:\\n\"\n \" - Comprehensive overview of security measures and protocols implemented to safeguard sensitive data.\\n\\n\"\n \"4. Troubleshooting Guide:\\n\"\n \" - Step-by-step guide for identifying and resolving common issues to ensure seamless system functionality.\\n\\n\"\n \"5. Software Installation Procedures:\\n\"\n \" - Instructions for installing and updating software components within the Enigma Corps system.\\n\\n\"\n \"6. Hardware Specifications:\\n\"\n \" - Detailed specifications of the hardware components utilized in the Enigma Corps infrastructure.\\n\\n\"\n \"This meticulously crafted technical documentation serves as a go-to resource for understanding the Enigma Corps system, covering everything from its architecture and network configuration to security protocols, troubleshooting, and hardware specifications. It's an invaluable reference for maintaining optimal system performance.\"\n )\n },\n {\n \"name\": \"passwords.txt\",\n \"content\": (\n \"Sensitive Password Information for Enigma Corps\\n\\n\"\n \"Admin Password: *********\\n\"\n \"Database Password: *********\\n\"\n \"Router Password: *********\\n\"\n \"WiFi Password: *********\\n\"\n \"Encryption Key: *********\\n\\n\"\n \"Warning: This file contains confidential information. Keep it secure, and refrain from sharing passwords without explicit authorization. Safeguarding this information is crucial to maintaining the security and integrity of the Enigma Corps systems.\"\n )\n },\n {\n \"name\": \"software_inventory.csv\",\n \"content\": (\n \"Software Inventory for Enigma Corps\\n\\n\"\n \"Software Name, Version, License Key\\n\"\n \"1. Enigma Security Suite, v2.0, X1Y2Z3A4-B5C6D7E8-F9G0H1I2\\n\"\n \"2. DataGuard Backup, v1.5, Y3X2W1V0-U9T8S7R6-Q5P4O3N2\\n\"\n \"3. Office Suite, v2022, Z9Z8Z7Z6-Z5Z4Z3Z2-Z1Z0Z9Z8-Z7Z6Z5\\n\"\n \"4. VPN Client, v3.1, W6W5W4W3-W2W1W0-W9W8W7-W6W5W4\\n\"\n \"5. Project Management Tool, v4.2, VV8V7V6V5-V4V3V2V1-V0V9V8V7-V6V5V4\\n\\n\"\n \"Important: This inventory is crucial for tracking and managing software across Enigma Corps systems. The provided license keys are randomized for security reasons. Handle this information responsibly, and ensure it is only accessible to authorized personnel to maintain the security and compliance of our software assets.\"\n )\n }\n ]\n self.emails = [\n # Email to Management\n {\n \"sender\": \"Markus\",\n \"subject\": \"System Maintenance Scheduled\",\n \"body\": (\n \"Dear Michael,\\n\\n\"\n \"I hope this email finds you well. We wanted to inform you that we have scheduled a system maintenance session for the upcoming weekend to ensure the optimal performance and security of our systems.\\n\\n\"\n \"Maintenance Details:\\n\"\n \"- Date: 16/12/23 - 17/12/23\\n\"\n \"- Time: 3:00pm\\n\"\n \"- Duration: 1 Hour\\n\"\n \"- Impact: No impact expected\\n\\n\"\n \"During this period, there might be temporary disruptions in certain services. Our team will be working diligently to minimize any inconvenience. If you have any concerns or specific considerations, please feel free to reach out to us.\\n\\n\"\n \"Thank you for your understanding and cooperation.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Department\"\n )\n },\n {\n # Email to Employees\n \"sender\": \"Markus\",\n \"subject\": \"Upcoming Software Update\",\n \"body\": (\n \"Good afternoon, Kyle,\\n\\n\"\n \"We hope you're doing well. Our IT team is excited to inform you about an upcoming software update that will enhance the functionality and security of our systems. The update is scheduled for [Date] at [Time]. Please take note of the following details:\\n\\n\"\n \"- Expected Duration: Two Days\\n\"\n \"- Action Required: As this will be processed during the weekend, no action is required.\\n\"\n \"- Impact: While we anticipate minimal impact on your day-to-day activities, it's essential to be aware of any potential changes. These include: New UI to navigate, logging in or logging out issues.\\n\\n\"\n \"We recommend saving your work and logging out of your system before the update. If you encounter any issues post-update, don't hesitate to contact our IT support team for assistance.\\n\\n\"\n \"Thank you for your cooperation and understanding.\\n\\n\"\n \"Best regards,\\n\"\n \"IT Support Team\"\n )\n },\n # Email from Markus to Billy\n {\n \"sender\": \"Markus\",\n \"subject\": \"Urgent: Password Security Update Required\",\n \"body\": (\n \"Billy,\\n\\n\"\n \"I hope this email finds you well. I wanted to bring to your attention the importance of updating your current password. This is not the first time I've raised this concern, and I want to emphasize its critical nature.\\n\\n\"\n \"In recent security assessments, it has been flagged that your current password might not meet the latest security standards. To ensure the safety of your account and our overall cybersecurity, it is imperative that you change your password promptly.\\n\\n\"\n \"I understand that these reminders may seem repetitive, but they stem from a genuine concern for the security of your account and our collective responsibility in maintaining a robust cybersecurity posture.\\n\\n\"\n \"Please take a moment at your earliest convenience to update your password. If you encounter any issues or have questions, feel free to reach out. Your cooperation is greatly appreciated.\\n\\n\"\n \"Best regards,\\n\"\n \"Markus, Security Team\"\n )\n }\n\n ]\n\n def list_files(self):\n print_slow(\"\\nFiles:\")\n for file in self.files:\n print_slow(f\"\\n{file['name']}\")\n\n def read_file(self, file_name):\n file_found = False\n for file in self.files:\n if file['name'] == file_name:\n file_found = True\n return file['content']\n\n if not file_found:\n print_slow(\"\\nNo file found with that name, please try again.\")\n return None\n\n def list_emails(self):\n print_slow(\"\\nEmails:\")\n for i, email in enumerate(self.emails):\n print_slow(f\"\\n{email['subject']} - From: {email['sender']}\")\n\n def read_email(self, subject):\n for email in self.emails:\n if email['subject'].lower() == subject.lower():\n print_slow(f\"\\nFrom: {email['sender']}\\nSubject: {email['subject']}\\n\\n{email['body']}\")\n return\n print_slow(\"\\nNo email found with that subject, please try again.\")" } ]
import msvcrt import os import pickle import sys import time import colorama import pygame from colorama import Fore, Style from components.common_functions import clear_terminal, print_slow, shop_help, help_user, connect_help, mail_help, \ system_help from conversations.calls import intro_call, first_call, second_call, third_call, fourth_call, fifth_call, sixth_call, \ markus_seen_call from conversations.minigame_calls import code_shatter_call from minigames.code_shatter_minigame import code_shatter_minigame from minigames.eye_spy_minigame import port_scanning from systems.level_1.amy.amy_system import AmySystem from systems.level_1.billy.billy_system import BillySystem from systems.level_1.cameras.camera_1 import camera_first from systems.level_1.markus.markus_system import MarkusSystem
15,883
print_slow(Fore.LIGHTBLUE_EX + f"\n{email['subject']} - From: {email['sender']}" + Style.RESET_ALL) def read_email(emails, subject): global has_read_email, evidence global balance email_found = False for email in emails: if email['subject'].lower() == subject.lower(): email_found = True print_slow( Fore.LIGHTBLUE_EX + f"\nFrom: {email['sender']}\nSubject: {email['subject']}\n\n{email['body']}" + Style.RESET_ALL) # Check if the email is one of the specific emails that increases evidence count if email['subject'].lower() in ["project update"]: evidence_item = 3 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) third_call() if email['subject'].lower() in ["professional development"]: evidence_item = 2 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) second_call() if email['subject'].lower() == "can't stop thinking about you" and email['sender'].lower() == 'amy': evidence_item = 1 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) first_call() if email['subject'].lower() == "upcoming software update" and email['sender'].lower() == 'markus': evidence_item = 6 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) sixth_call() # Add money to balance based on the email subject if email['subject'].lower() == "professional development": balance += 30 elif email['subject'].lower() == "project update": balance += 50 elif email['subject'].lower() == "can't stop thinking about you": balance += 20 elif email['subject'].lower() == "upcoming software update": balance += 50 if not email_found: print_slow(Fore.RED + "\nNo email found with that subject, please try again." + Style.RESET_ALL) def connect(): if has_item("EnigmaLink"): print_slow("") print_slow(Fore.GREEN + "Connecting to Enigma Corps network using EnigmaLink..." + Style.RESET_ALL) time.sleep(0.5) print_slow("") print_slow(Fore.GREEN + "Establishing connection...") time.sleep(1) print_slow("") print_slow(Fore.GREEN + "Linking EnigmaLink to remote server...") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "Decrypting server security protocols...") time.sleep(3) print_slow("") print_slow(Fore.GREEN + "Bypassing firewall...") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "Connection established!") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "You are now connected to Enigma Corps network.") print_slow("") # Network command loop while True: command = input(Fore.GREEN + "> " + Style.RESET_ALL) # Scan the network for systems and vulnerabilities if command.lower() == "scan": scan() # Hack into a system or vulnerability elif command.lower().startswith("hack "): target = command[5:] hack(target) # Display connect help message elif command.lower() == "help":
# Set the PYGAME_HIDE_SUPPORT_PROMPT environment variable os.environ['PYGAME_HIDE_SUPPORT_PROMPT'] = "1" # Initialize pygame mixer pygame.mixer.init() # Load the bg music file and loop it pygame.mixer.music.load('bg_music.mp3') pygame.mixer.music.play(-1) # sets the volume to 20% (change value to adjust) pygame.mixer.music.set_volume(0.2) # Define the global variables at the module level inventory = [] balance = 300 emails = [] has_read_email = False has_read_file = False has_intro_call = False seen_markus = False evidence = [] amy_system = AmySystem() billy_system = BillySystem() markus_system = MarkusSystem() bg_music_enabled = True player_level = 1 has_started_game = False # Save the game state to a file def save_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus with open('savegame.pkl', 'wb') as f: pickle.dump( (inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus), f) # Load the game state from a file def load_game(): global inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus if os.path.exists('savegame.pkl'): with open('savegame.pkl', 'rb') as f: inventory, balance, emails, has_read_email, evidence, player_level, has_intro_call, has_started_game, seen_markus = pickle.load( f) else: # If the savegame file doesn't exist, set the default values inventory = [] player_level = 1 evidence = [] has_intro_call = False has_started_game = False seen_markus = False balance = 30000 emails = [ { "sender": "Hacker's Digest", "subject": "Weekly Hacker's Digest", "body": ( "Issue #143\n\n" "Cipher,\n\n" "Welcome to the latest edition of Hacker's Digest! In this issue: \n\n" "- Unveiling the Latest Exploits\n" "- Spotlight on Cryptocurrency Security\n" "- Interview with a Grey Hat Hacker\n" "- Tool of the Week: EnigmaLink\n\n" "Don't miss out on the latest in the world of hacking and cybersecurity. Stay informed and stay secure!\n\n" "Best regards,\n" "Hacker's Digest Team" ) }, { "sender": "The Cyber Mythbuster", "subject": "Busting Cybersecurity Myths", "body": ( "Cipher,\n\n" "Heard any wild cybersecurity myths lately? This week, we're busting the craziest ones, including:\n\n" "- Using 'Password123' for Maximum Security\n" "- Cyber Ninjas and Their Stealthy VPNs\n" "- USB Drives: The Fountain of Eternal Data\n\n" "Stay myth-free and keep on hacking (responsibly)!\n\n" "Mythbustingly,\n" "The Cyber Mythbuster" ) }, { "sender": "CyberSilliness", "subject": "Where Cyber Meets Comedy", "body": ( "Welcome to the CyberSilliness Gazette\n" "Where we believe that a good laugh is the ultimate antivirus! In this week's hilarity-packed issue:\n\n" "- Cyber Jokes to Crack You Up (Without Cracking Your Passwords)\n" "- Tech Support Horror Stories: A Comedy of Errors\n" "- Chuckle Challenge: Share Your Funniest Cybersecurity Anecdote\n" "- Meet the Cyber Clowns: Our Team's Silly Security Habits Revealed\n\n" "Laughter is contagious, and so is good cybersecurity. Dive into the giggles and stay safe!\n\n" "Silly Regards,\n" "The CyberSilliness Team" ) }, { "sender": "Security Insight Weekly", "subject": "Navigating the Cybersecurity Landscape", "body": ( "Hello Cipher,\n\n" "Welcome to Security Insight Weekly, your reliable source for navigating the ever-evolving cybersecurity landscape. In this week's issue:\n\n" "- Threat Analysis: Understanding Recent Cybersecurity Incidents\n" "- Best Practices for Endpoint Security\n" "- Industry Spotlight: Healthcare Cybersecurity Challenges\n" "- Security Compliance Update: Staying Aligned with Regulations\n\n" "Stay informed and empowered as we delve into the serious aspects of cybersecurity. Your security is our priority.\n\n" "Best regards,\n" "The Security Insight Team" ) }, ] # New function for game settings def game_settings(): global bg_music_enabled print_slow(Fore.GREEN + "░██████╗███████╗████████╗████████╗██╗███╗░░██╗░██████╗░░██████╗") print_slow(Fore.GREEN + "██╔════╝██╔════╝╚══██╔══╝╚══██╔══╝██║████╗░██║██╔════╝░██╔════╝") print_slow(Fore.GREEN + "╚█████╗░█████╗░░░░░██║░░░░░░██║░░░██║██╔██╗██║██║░░██╗░╚█████╗░") print_slow(Fore.GREEN + "░╚═══██╗██╔══╝░░░░░██║░░░░░░██║░░░██║██║╚████║██║░░╚██╗░╚═══██╗") print_slow(Fore.GREEN + "██████╔╝███████╗░░░██║░░░░░░██║░░░██║██║░╚███║╚██████╔╝██████╔╝") print_slow(Fore.GREEN + "╚═════╝░╚══════╝░░░╚═╝░░░░░░╚═╝░░░╚═╝╚═╝░░╚══╝░╚═════╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow( Fore.GREEN + f"| [Background Music] {'Enabled |' if bg_music_enabled else 'Disabled |'}" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Delete Savegame] |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Back to Main Menu] |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) if choice.lower() == "background music": # Toggle background music bg_music_enabled = not bg_music_enabled if bg_music_enabled: pygame.mixer.music.play(-1) print_slow(Fore.GREEN + "\nBackground Music Enabled" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() else: pygame.mixer.music.stop() print_slow(Fore.RED + "\nBackground Music Disabled" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() elif choice.lower() == "delete savegame": # Delete savegame confirm = input(Fore.RED + "\nAre you sure you want to delete the savegame? (yes/no): " + Style.RESET_ALL) if confirm.lower() == "yes": try: os.remove("savegame.pkl") print_slow(Fore.GREEN + "\nSavegame Deleted" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() except FileNotFoundError: print_slow(Fore.RED + "\nSavegame not found" + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() elif choice.lower() == "back" or choice.lower() == "back to main menu": # Return to Main Menu print_slow(Fore.GREEN + "\nReturning to Main Menu..." + Style.RESET_ALL) time.sleep(1) clear_terminal() else: print_slow(Fore.RED + "\nInvalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() game_settings() # Function to add an item to the inventory def add_to_inventory(item): inventory.append(item) def remove_from_inventory(item): if item in inventory: inventory.remove(item) def add_evidence(evidence_item): evidence.append(evidence_item) def has_evidence(evidence_item): return evidence_item in evidence # Prints the games title def main(): clear_terminal() colorama.init() print_slow(Fore.GREEN + "██████╗░██╗░░░░░░█████╗░░█████╗░██╗░░██╗██╗░░██╗░█████╗░████████╗" + Style.RESET_ALL) print_slow(Fore.GREEN + "██╔══██╗██║░░░░░██╔══██╗██╔══██╗██║░██╔╝██║░░██║██╔══██╗╚══██╔══╝" + Style.RESET_ALL) print_slow(Fore.GREEN + "██████╦╝██║░░░░░███████║██║░░╚═╝█████═╝░███████║███████║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "██╔══██╗██║░░░░░██╔══██║██║░░██╗██╔═██╗░██╔══██║██╔══██║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "██████╦╝███████╗██║░░██║╚█████╔╝██║░╚██╗██║░░██║██║░░██║░░░██║░░░" + Style.RESET_ALL) print_slow(Fore.GREEN + "╚═════╝░╚══════╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝░░░╚═╝░░░" + Style.RESET_ALL) # Pause for 2 seconds before clearing the console time.sleep(5) # Clear the console clear_terminal() # Main menu loop while True: print_slow(Fore.GREEN + "███╗░░░███╗░█████╗░██╗███╗░░██╗  ███╗░░░███╗███████╗███╗░░██╗██╗░░░██╗") print_slow(Fore.GREEN + "████╗░████║██╔══██╗██║████╗░██║  ████╗░████║██╔════╝████╗░██║██║░░░██║") print_slow(Fore.GREEN + "██╔████╔██║███████║██║██╔██╗██║  ██╔████╔██║█████╗░░██╔██╗██║██║░░░██║") print_slow(Fore.GREEN + "██║╚██╔╝██║██╔══██║██║██║╚████║  ██║╚██╔╝██║██╔══╝░░██║╚████║██║░░░██║") print_slow(Fore.GREEN + "██║░╚═╝░██║██║░░██║██║██║░╚███║  ██║░╚═╝░██║███████╗██║░╚███║╚██████╔╝") print_slow( Fore.GREEN + "╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝╚═╝░░╚══╝  ╚═╝░░░░░╚═╝╚══════╝╚═╝░░╚══╝░╚═════╝░" + Style.RESET_ALL) print_slow("") print_slow("") print_slow("") print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Start] Start the game |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Options] Change the settings |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| |" + Style.RESET_ALL) print_slow(Fore.GREEN + "| [Exit] Exit the game |" + Style.RESET_ALL) print_slow(Fore.GREEN + " --------------------------------------------" + Style.RESET_ALL) choice = input(Fore.GREEN + "\n> " + Style.RESET_ALL) # Start the game if choice.lower() == "start": load_game() start_game() # Open game settings elif choice.lower() == "options": clear_terminal() game_settings() # Exit the game elif choice.lower() == "exit": print_slow(Fore.GREEN + "\nExiting..." + Style.RESET_ALL) pygame.mixer.music.stop() sys.exit() else: print_slow(Fore.RED + "\nInvalid choice, please try again." + Style.RESET_ALL) time.sleep(2) clear_terminal() # Function to get the user's balance def get_balance(): return balance # Function to add money to the user's balance def add_money(amount): global balance balance += amount # Function to subtract money from the user's balance def subtract_money(amount): global balance balance -= amount def add_level(level): global player_level player_level += level # Function to print the user's balance def print_balance(): print_slow(f"Your current balance is: £{get_balance()}") # Function to read files and marks files as evidence def read_file(file_content, file_name): global has_read_file, evidence global balance # Print the file content print_slow(Fore.LIGHTBLUE_EX + f"\n{file_name}:\n\n{file_content}" + Style.RESET_ALL) print_slow("") # Check if the file is one of the specific files that increases evidence count if file_name.lower() in ["employee_performance_review.txt"]: evidence_item = 4 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) fourth_call() if file_name.lower() in ["meeting_minutes.txt"]: evidence_item = 5 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) fifth_call() # Add more file names here as needed # Add money to balance based on the file name if file_name.lower() == "employee_performance_review.txt": balance += 30 elif file_name.lower() == "meeting_minutes.txt": balance += 50 # List of available upgrades upgrades = [ {"name": "EnigmaLink", "description": "Application required to connect to Enigma Corps network.", "price": 100}, {"name": "CodeShatter", "description": "A powerful password breaker that can crack even the strongest passwords.", "price": 250}, {"name": "EyeSpy", "description": "A privacy breaker to gain access to the smallest of cameras.", "price": 500}, {"name": "Rift", "description": "Break the barrier between the Server and Network.", "price": 800} ] # Function to display the shop def shop(): clear_terminal() print_slow(Fore.YELLOW + r''' ██╗░░██╗░█████╗░░█████╗░██╗░░██╗███████╗██████╗░  ███╗░░░███╗░█████╗░██████╗░██╗░░██╗███████╗████████╗ ██║░░██║██╔══██╗██╔══██╗██║░██╔╝██╔════╝██╔══██╗  ████╗░████║██╔══██╗██╔══██╗██║░██╔╝██╔════╝╚══██╔══╝ ███████║███████║██║░░╚═╝█████═╝░█████╗░░██████╔╝  ██╔████╔██║███████║██████╔╝█████═╝░█████╗░░░░░██║░░░ ██╔══██║██╔══██║██║░░██╗██╔═██╗░██╔══╝░░██╔══██╗  ██║╚██╔╝██║██╔══██║██╔══██╗██╔═██╗░██╔══╝░░░░░██║░░░ ██║░░██║██║░░██║╚█████╔╝██║░╚██╗███████╗██║░░██║  ██║░╚═╝░██║██║░░██║██║░░██║██║░╚██╗███████╗░░░██║░░░ ╚═╝░░╚═╝╚═╝░░╚═╝░╚════╝░╚═╝░░╚═╝╚══════╝╚═╝░░╚═╝  ╚═╝░░░░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚═╝░░╚═╝╚══════╝░░░╚═╝░░░''' + Style.RESET_ALL) print_slow(Fore.YELLOW + "\nWelcome to the Hacker's Market!" + Style.RESET_ALL) print_slow("") print_slow(Fore.YELLOW + "Here you can buy upgrades to improve your hacking abilities.\n" + Style.RESET_ALL) while True: # Display the list of available upgrades for i, upgrade in enumerate(upgrades): print_slow( Fore.YELLOW + f"\n{upgrade['name']} - {upgrade['description']} - £{upgrade['price']}" + Style.RESET_ALL) # Get the user's choice command = input(Fore.YELLOW + "\n> " + Style.RESET_ALL) # Buy the chosen upgrade if command.lower() == 'exit': print_slow(Fore.YELLOW + "\nExiting Hacker's Market" + Style.RESET_ALL) time.sleep(1) clear_terminal() start_game() elif command.lower() == 'help': shop_help() elif command.lower().startswith('buy '): upgrade_name = command[4:] # [4:] removes first 4 characters if has_item('EnigmaLink'): if upgrade_name.lower() == 'enigmalink': print_slow("") print_slow(Fore.RED + "Sold Out" + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: for upgrade in upgrades: if upgrade_name.lower() == upgrade['name'].lower(): if get_balance() >= upgrade['price']: print_slow("") print_slow( Fore.GREEN + f"You have successfully purchased {upgrade['name']} for ${upgrade['price']}!" + Style.RESET_ALL) subtract_money(upgrade['price']) print_slow("") print_balance() add_to_inventory(upgrade['name']) time.sleep(2) clear_terminal() # Check if the purchased upgrade is CodeShatter if upgrade_name.lower() == 'codeshatter': print_slow("") print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) code_shatter_call() shop() else: clear_terminal() shop() else: print_slow( Fore.RED + "You don't have enough money to buy this upgrade." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: print_slow(Fore.RED + "Invalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() else: for upgrade in upgrades: if upgrade_name.lower() == upgrade['name'].lower(): if get_balance() >= upgrade['price']: print_slow("") print_slow( Fore.GREEN + f"You have successfully purchased {upgrade['name']} for ${upgrade['price']}!" + Style.RESET_ALL) subtract_money(upgrade['price']) print_slow("") print_balance() add_to_inventory(upgrade['name']) time.sleep(2) clear_terminal() shop() else: print_slow( Fore.RED + "You don't have enough money to buy this upgrade." + Style.RESET_ALL) shop() else: print_slow(Fore.RED + "Invalid choice, please try again." + Style.RESET_ALL) time.sleep(1) clear_terminal() shop() # Function to start the game def start_game(): global has_intro_call, has_started_game, seen_markus if has_intro_call: clear_terminal() pass else: print_slow("\nStarting game...") time.sleep(1) print_slow("\nLoading assets...") time.sleep(1) clear_terminal() print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) intro_call() has_intro_call = True has_started_game = True print_slow(Fore.MAGENTA + "\nHint: Type 'help' to get a list of available commands." + Style.RESET_ALL) pass if seen_markus: print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) markus_seen_call() else: pass # Game command loop command = input(Fore.GREEN + "> " + Style.RESET_ALL) # Connect to the network if command.lower() == "connect": connect() # Access the mail system elif command.lower() == "mail": mail() # Display help message elif command.lower() == "help": help_user() # Check balance elif command.lower() == "balance": print_balance() # Enter shop elif command.lower() == "shop": shop() # Clear terminal elif command.lower() == "clear": clear_terminal() # Return to the main menu elif command.lower() == "exit": print_slow("Returning to Main Menu...") time.sleep(1) main() else: print_slow("Invalid command, please try again.") time.sleep(1) clear_terminal() start_game() # Save the game state save_game() # Function to check if an item is in the inventory def has_item(item): return item in inventory def scan(): print_slow("") print_slow(Fore.YELLOW + "Scanning network..." + Style.RESET_ALL) time.sleep(2) print_slow("") print_slow(Fore.YELLOW + "\nAvailable Systems:" + Style.RESET_ALL) print_slow("") for system in all_systems: if system['level'] == player_level: print_slow("") print_slow(f"{system['name']} ({system['type']})") print_slow("") def getpass_star(prompt="Password: "): print(prompt, end='', flush=True) password = [] while True: char = msvcrt.getch().decode('utf-8') if char == '\r' or char == '\n': break elif char == '\b': # Backspace if password: password.pop() print('\b \b', end='', flush=True) else: password.append(char) print('*', end='', flush=True) print() # Move to the next line return ''.join(password) def hack(system_name): global seen_markus # Find the system in the all_systems list system = next((s for s in all_systems if s['name'].lower() == system_name.lower()), None) if system: if system['level'] == player_level: # Check for CodeShatter before prompting for password if system['name'] == 'Markus' and has_item("CodeShatter"): clear_terminal() code_shatter_minigame() print_slow("Password Cracked: 735@&!//") input("Press [Enter] to continue") clear_terminal() markus_system_command_loop(markus_system) add_level(player_level) remove_from_inventory(item="CodeShatter") seen_markus = True elif system['name'] == 'Lobby Camera' and has_item("EyeSpy"): port_scanning() add_level(player_level) camera_first() else: # Prompt the user for the password print_slow("") password = getpass_star("Enter password: ") print_slow("") if password == system['password']: print_slow("") print_slow(Fore.GREEN + "Access granted!" + Style.RESET_ALL) if system['name'] == 'Amy': amy_system_command_loop(amy_system) elif system['name'] == 'Billy': billy_system_command_loop(billy_system) elif system['name'] == 'Markus': markus_system_command_loop(markus_system) add_level(player_level) seen_markus = True elif system['name'] == 'Lobby Camera': camera_first() elif system['name'] == 'Kyle': # Implement Kyle System else: # Add more conditions for other systems pass else: print_slow("") print_slow(Fore.RED + "Access denied! Incorrect password." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) else: print_slow("") print_slow(Fore.RED + "System not found! Please try again." + Style.RESET_ALL) def list_emails(emails): print_slow(Fore.LIGHTBLUE_EX + "\nEmails:" + Style.RESET_ALL) for i, email in enumerate(emails): print_slow(Fore.LIGHTBLUE_EX + f"\n{email['subject']} - From: {email['sender']}" + Style.RESET_ALL) def read_email(emails, subject): global has_read_email, evidence global balance email_found = False for email in emails: if email['subject'].lower() == subject.lower(): email_found = True print_slow( Fore.LIGHTBLUE_EX + f"\nFrom: {email['sender']}\nSubject: {email['subject']}\n\n{email['body']}" + Style.RESET_ALL) # Check if the email is one of the specific emails that increases evidence count if email['subject'].lower() in ["project update"]: evidence_item = 3 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) third_call() if email['subject'].lower() in ["professional development"]: evidence_item = 2 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) second_call() if email['subject'].lower() == "can't stop thinking about you" and email['sender'].lower() == 'amy': evidence_item = 1 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) first_call() if email['subject'].lower() == "upcoming software update" and email['sender'].lower() == 'markus': evidence_item = 6 if not has_evidence(evidence_item): print_slow("Adding evidence to the list...") print_slow("") print_slow(Fore.GREEN + "Evidence Secured" + Style.RESET_ALL) add_evidence(evidence_item) print_slow("") print_slow("") time.sleep(3) print_slow(Fore.GREEN + "Incoming Call..." + Style.RESET_ALL) input(Fore.GREEN + "> " + Style.RESET_ALL) sixth_call() # Add money to balance based on the email subject if email['subject'].lower() == "professional development": balance += 30 elif email['subject'].lower() == "project update": balance += 50 elif email['subject'].lower() == "can't stop thinking about you": balance += 20 elif email['subject'].lower() == "upcoming software update": balance += 50 if not email_found: print_slow(Fore.RED + "\nNo email found with that subject, please try again." + Style.RESET_ALL) def connect(): if has_item("EnigmaLink"): print_slow("") print_slow(Fore.GREEN + "Connecting to Enigma Corps network using EnigmaLink..." + Style.RESET_ALL) time.sleep(0.5) print_slow("") print_slow(Fore.GREEN + "Establishing connection...") time.sleep(1) print_slow("") print_slow(Fore.GREEN + "Linking EnigmaLink to remote server...") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "Decrypting server security protocols...") time.sleep(3) print_slow("") print_slow(Fore.GREEN + "Bypassing firewall...") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "Connection established!") time.sleep(2) print_slow("") print_slow(Fore.GREEN + "You are now connected to Enigma Corps network.") print_slow("") # Network command loop while True: command = input(Fore.GREEN + "> " + Style.RESET_ALL) # Scan the network for systems and vulnerabilities if command.lower() == "scan": scan() # Hack into a system or vulnerability elif command.lower().startswith("hack "): target = command[5:] hack(target) # Display connect help message elif command.lower() == "help":
connect_help()
4
2023-11-06 09:52:13+00:00
24k
ziqi-zhang/TAOISM
python/test/test_conv.py
[ { "identifier": "register_layer", "path": "python/common_net.py", "snippet": "def register_layer(layer, name):\n layer.register_forward_hook(hooking_layer(name))\n layer.register_backward_hook(hooking_layer_backward(name))\n layer_names.append(name)" }, { "identifier": "register_weight_layer", "path": "python/common_net.py", "snippet": "def register_weight_layer(layer, name):\n register_layer(layer, name)\n layer_weight[name] = layer.weight\n linear_layer_names.append(name)" }, { "identifier": "get_layer_weight", "path": "python/common_net.py", "snippet": "def get_layer_weight(name):\n return layer_weight[name]" }, { "identifier": "get_layer_input", "path": "python/common_net.py", "snippet": "def get_layer_input(name):\n return layer_input[name]" }, { "identifier": "get_layer_weight_grad", "path": "python/common_net.py", "snippet": "def get_layer_weight_grad(name):\n return layer_weight[name].grad.data" }, { "identifier": "get_layer_output", "path": "python/common_net.py", "snippet": "def get_layer_output(name):\n return layer_output[name]" }, { "identifier": "get_layer_output_grad", "path": "python/common_net.py", "snippet": "def get_layer_output_grad(name):\n return layer_output_grad[name]" }, { "identifier": "get_layer_input_grad", "path": "python/common_net.py", "snippet": "def get_layer_input_grad(name):\n return layer_input_grad[name]" }, { "identifier": "GlobalTensor", "path": "python/enclave_interfaces.py", "snippet": "class GlobalTensor(object):\n cpu_tensor = {}\n gpu_tensors = {}\n encrypted_tensors = {}\n LinkedTags = {}\n InverseLinkedTags = {}\n IsInitEnclaveTensor = {}\n EnclaveInterface = None\n eid = None\n is_init_global_tensor = False\n\n @staticmethod\n def init():\n if GlobalTensor.is_init_global_tensor:\n return\n GlobalTensor.EnclaveInterface = EnclaveInterface()\n GlobalTensor.EnclaveInterface.init_enclave()\n GlobalTensor.is_init_global_tensor = True\n\n @staticmethod\n def destroy():\n GlobalTensor.EnclaveInterface.destroy_enclave()\n\n GlobalTensor.cpu_tensor = {}\n GlobalTensor.gpu_tensors = {}\n GlobalTensor.encrypted_tensors = {}\n GlobalTensor.LinkedTags = {}\n GlobalTensor.InverseLinkedTags = {}\n GlobalTensor.IsInitEnclaveTensor = {}\n GlobalTensor.EnclaveInterface = None\n GlobalTensor.eid = None\n GlobalTensor.is_init_global_tensor = False\n\n\n @staticmethod\n def get_eid():\n return GlobalTensor.EnclaveInterface.get_eid()\n\n @staticmethod\n def link_tags(tag1, tag2):\n if tag1 == tag2:\n return\n\n friends = []\n\n def add_friends(tag):\n nonlocal friends\n if tag in GlobalTensor.LinkedTags:\n its_leader_tag = GlobalTensor.LinkedTags[tag]\n if its_leader_tag in GlobalTensor.InverseLinkedTags:\n friends += GlobalTensor.InverseLinkedTags.pop(its_leader_tag)\n else:\n friends += [tag]\n\n add_friends(tag1)\n add_friends(tag2)\n leader_tag = min(friends)\n\n GlobalTensor.InverseLinkedTags[leader_tag] = friends\n for t in friends:\n if t in GlobalTensor.IsInitEnclaveTensor:\n raise ValueError(\"Tags must linked before tensor initialization\")\n GlobalTensor.LinkedTags[t] = leader_tag\n\n @staticmethod\n def get_remapped_tags(tag):\n return GlobalTensor.LinkedTags[tag] if tag in GlobalTensor.LinkedTags else tag\n\n @staticmethod\n def set_cpu(tag, tensor):\n GlobalTensor.cpu_tensor[tag] = tensor.to(torch.device(\"cpu\"))\n\n @staticmethod\n def set_gpu(tag, tensor):\n GlobalTensor.gpu_tensors[tag] = tensor\n\n @staticmethod\n def set_encrypted(tag, tensor):\n GlobalTensor.encrypted_tensors[tag] = tensor\n\n @staticmethod\n def get_cpu(tag):\n return GlobalTensor.cpu_tensor[tag]\n\n @staticmethod\n def get_gpu(tag):\n return GlobalTensor.gpu_tensors[tag]\n\n @staticmethod\n def get_encryption(tag):\n return GlobalTensor.encrypted_tensors[tag]\n\n @staticmethod\n def init_enclave_tensor(tag, size):\n size = list(size)\n if len(size) < 4:\n size = [1] * (4 - len(size)) + size\n remapped_tag = GlobalTensor.get_remapped_tags(tag)\n if remapped_tag in GlobalTensor.IsInitEnclaveTensor:\n return\n else:\n GlobalTensor.IsInitEnclaveTensor[remapped_tag] = True\n eid = GlobalTensor.get_eid()\n GlobalTensor.EnclaveInterface.lib.InitTensor(eid, remapped_tag, size[0], size[1], size[2], size[3])\n\n @staticmethod\n def init_encrypted_tensor(tag, shape):\n GlobalTensor.encrypted_tensors[GlobalTensor.get_remapped_tags(tag)] = \\\n GlobalTensor.EnclaveInterface.create_encrypt_torch(shape)" }, { "identifier": "SecretBatchNorm2dLayer", "path": "python/layers/batch_norm_2d.py", "snippet": "class SecretBatchNorm2dLayer(SecretActivationLayer):\n # https://pytorch.org/docs/stable/nn.html#batchnorm2d\n\n BatchSize = None\n NumChannel = None\n ImgH = None\n ImgW = None\n WeightShape = None\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False, merge_own_tensors=False\n ):\n \n super().__init__(\n sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next, merge_own_tensors\n )\n \n self.ForwardFuncName = \"BatchNorm2d\"\n self.BackwardFuncName = \"DerBatchNorm2d\"\n self.PlainFunc = torch.nn.BatchNorm2d\n self.IsAffine = True\n self.momentum = 0.1\n self.IsCumulative = (self.momentum is None)\n self.epsilon = 1e-5\n\n if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.BatchNorm2d\n # if self.is_enclave_mode:\n # self.StoreInEnclave = True\n # else:\n # self.ForwardFunc = torch.nn.BatchNorm2d\n # self.StoreInEnclave = False\n \n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n self.OutputShape = self.InputShape\n self.BatchSize, self.NumChannel, self.ImgH, self.ImgW = self.InputShape\n self.WeightShape = [self.NumChannel]\n self.LearnableParamsList = [\n LearnableParamTuple(dw_name=\"DerWeight\", w_name=\"weight\", shape=self.WeightShape),\n LearnableParamTuple(dw_name=\"DerBias\", w_name=\"bias\", shape=self.WeightShape),\n ]\n \n\n # def init(self, start_enclave=True):\n \n # if self.sid == 2:\n # return\n # TensorLoader.init(self, start_enclave)\n\n # if self.is_enclave_mode:\n # self.PlainFunc = self.PlainFunc(self.InputShape[1])\n # self.PlainFunc.eval()\n # self.get_cpu(\"weight\").data.copy_(self.PlainFunc.weight.data)\n # self.get_cpu(\"bias\").data.copy_(self.PlainFunc.bias.data)\n # self.get_cpu(\"RunMean\").data.copy_(self.PlainFunc.running_mean.data)\n # # inject sqrt(running_var) instead of running_var for precision\n # self.get_cpu(\"RunVar\").data.copy_(self.PlainFunc.running_var.data)\n # self.transfer_cpu_to_enclave(\"weight\")\n # self.transfer_cpu_to_enclave(\"bias\")\n # self.transfer_cpu_to_enclave(\"RunMean\")\n # self.transfer_cpu_to_enclave(\"RunVar\")\n # self.batchnorm_init(\n # self.LayerName,\n # \"input\", \"output\", \"weight\", \"bias\",\n # \"DerInput\", \"DerOutput\", \"DerWeight\", \"DerBias\",\n # \"RunMean\", \"RunVar\", \"CurMean\", \"CurVar\",\n # \"mu\",\n # self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,\n # int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)\n # else:\n # self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n # self.PlainFunc = self.PlainFunc(self.InputShape[1])\n # self.PlainFunc.eval()\n # self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n # self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n # self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n # self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n # self.set_cpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n # self.set_cpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n # self.set_cpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n # self.set_cpu(\"RunVar\", self.ForwardFunc.running_var.data)\n # self.ForwardFunc.eval()\n\n def init(self, start_enclave=True):\n # if self.LayerName == \"Layer3.10.proxies.0.bn2\":\n # st()\n TensorLoader.init(self, start_enclave)\n\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.PlainFunc.eval()\n self.get_cpu(\"weight\").data.copy_(self.PlainFunc.weight.data)\n self.get_cpu(\"bias\").data.copy_(self.PlainFunc.bias.data)\n self.get_cpu(\"RunMean\").data.copy_(self.PlainFunc.running_mean.data)\n # inject sqrt(running_var) instead of running_var for precision\n self.get_cpu(\"RunVar\").data.copy_(self.PlainFunc.running_var.data)\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n self.batchnorm_init(\n self.LayerName,\n \"input\", \"output\", \"weight\", \"bias\",\n # \"DerInput\", \"DerOutput\", \"DerWeight\", \"DerBias\",\n \"RunMean\", \"RunVar\", \"CurMean\", \"CurVar\",\n \"mu\",\n self.BatchSize, self.NumChannel, self.ImgH, self.ImgW,\n int(self.IsAffine), int(self.IsCumulative), self.momentum, self.epsilon)\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.PlainFunc.eval()\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n self.set_cpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_cpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n self.set_cpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n self.set_cpu(\"RunVar\", self.ForwardFunc.running_var.data)\n self.ForwardFunc.eval()\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = self.ForwardFunc(self.InputShape[1])\n self.PlainFunc = self.PlainFunc(self.InputShape[1])\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n self.ForwardFunc.running_mean.data.copy_(self.PlainFunc.running_mean.data)\n self.ForwardFunc.running_var.data.copy_(self.PlainFunc.running_var.data)\n self.set_gpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_gpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n self.set_gpu(\"RunMean\", self.ForwardFunc.running_mean.data)\n self.set_gpu(\"RunVar\", self.ForwardFunc.running_var.data)\n self.PlainFunc.eval()\n self.ForwardFunc.cuda().eval()\n\n # def inject_params(self, params):\n # if self.sid == -2:\n # raise ValueError(\"S2 has no learnable parameters for injection\")\n # self.get_cpu(\"weight\").copy_(params.weight.data)\n # self.get_cpu(\"bias\").copy_(params.bias.data)\n # self.get_cpu(\"RunMean\").copy_(params.running_mean.data)\n # # inject sqrt(running_var) instead of running_var for precision\n # self.get_cpu(\"RunVar\").copy_(params.running_var.data)\n # if self.is_enclave_mode:\n # self.transfer_cpu_to_enclave(\"weight\")\n # self.transfer_cpu_to_enclave(\"bias\")\n # self.transfer_cpu_to_enclave(\"RunMean\")\n # self.transfer_cpu_to_enclave(\"RunVar\")\n\n def inject_params(self, params):\n if self.sid == -2:\n raise ValueError(\"S2 has no learnable parameters for injection\")\n if self.EnclaveMode in [ExecutionModeOptions.CPU, ExecutionModeOptions.Enclave]: \n self.get_cpu(\"weight\").copy_(params.weight.data)\n self.get_cpu(\"bias\").copy_(params.bias.data)\n self.get_cpu(\"RunMean\").copy_(params.running_mean.data)\n self.get_cpu(\"RunVar\").copy_(params.running_var.data)\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.get_gpu(\"weight\").copy_(params.weight.data)\n self.get_gpu(\"bias\").copy_(params.bias.data)\n self.get_gpu(\"RunMean\").copy_(params.running_mean.data)\n self.get_gpu(\"RunVar\").copy_(params.running_var.data)\n\n def reset_plain_bn(self):\n # module = torch.BatchNorm2d()\n self.get_cpu(\"weight\").copy_(torch.ones(self.InputShape[1]))\n self.get_cpu(\"bias\").copy_(torch.zeros(self.InputShape[1]))\n self.get_cpu(\"RunMean\").copy_(torch.zeros(self.InputShape[1]))\n self.get_cpu(\"RunVar\").copy_(torch.ones(self.InputShape[1]))\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.transfer_cpu_to_enclave(\"RunMean\")\n self.transfer_cpu_to_enclave(\"RunVar\")\n\n\n def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:\n raise NotImplementedError\n if self.sid == -2:\n raise ValueError(\"S2 has no learnable parameters for injection\")\n self.make_sure_cpu_is_latest(\"weight\")\n self.make_sure_cpu_is_latest(\"bias\")\n plain_layer.weight.data.copy_(self.get_cpu(\"weight\"))\n plain_layer.bias.data.copy_(self.get_cpu(\"bias\"))\n plain_layer.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n plain_layer.running_var.data.copy_(self.get_cpu(\"RunVar\"))\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n NeededTensorNames = [\n (\"input\", self.InputShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"output\", self.OutputShape, None),\n # (\"DerOutput\", self.OutputShape, None),\n (\"weight\", self.WeightShape, None),\n # (\"DerWeight\", self.WeightShape, None),\n (\"bias\", self.WeightShape, None),\n # (\"DerBias\", self.WeightShape, None),\n (\"RunMean\", self.WeightShape, None),\n (\"CurMean\", self.WeightShape, None),\n (\"RunVar\", self.WeightShape, None),\n (\"CurVar\", self.WeightShape, None),\n (\"mu\", self.InputShape, None),\n ]\n else:\n NeededTensorNames = [\n (\"output\", self.OutputShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n (\"weight\", self.WeightShape, None),\n # (\"DerWeight\", self.WeightShape, None),\n (\"bias\", self.WeightShape, None),\n # (\"DerBias\", self.WeightShape, None),\n # (\"DerOutput\", self.OutputShape, None)\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n # def forward(self):\n # if self.sid == 2:\n # return\n # with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n # if self.is_enclave_mode:\n # self.forward_tensor_transfer()\n # self.batchnorm_forward(self.LayerName, int(False))\n # else:\n # self.forward_tensor_transfer()\n # self.requires_grad_on_cpu(\"input\")\n # self.ForwardFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n # self.ForwardFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n # self.ForwardFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # # running_var of PlainFunc is ^2 of that in the enclave\n # enclave_running_var = self.get_cpu(\"RunVar\")\n # self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n # if self.LayerName == \"Layer2.0.downsample.bn\":\n # st()\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} batchnorm_forward\", verbose_level=VerboseLevel.LAYER):\n self.batchnorm_forward(self.LayerName, int(False))\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n self.forward_tensor_transfer()\n self.ForwardFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n self.ForwardFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.ForwardFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_cpu(\"RunVar\")\n self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n self.forward_tensor_transfer()\n self.ForwardFunc.bias.data.copy_(self.get_gpu(\"bias\"))\n self.ForwardFunc.weight.data.copy_(self.get_gpu(\"weight\"))\n self.ForwardFunc.running_mean.data.copy_(self.get_gpu(\"RunMean\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_gpu(\"RunVar\")\n self.ForwardFunc.running_var.data.copy_(enclave_running_var)\n # st()\n # print(self.get_gpu(\"input\")[0,0,0])\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\").type(SecretConfig.dtypeForCpuOp)))\n\n def backward(self):\n raise NotImplementedError\n if self.sid == 2:\n return\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n if self.is_enclave_mode:\n self.backward_tensor_transfer()\n self.batchnorm_backward(self.LayerName)\n else:\n self.backward_tensor_transfer()\n BackwardInput, BackwardWeight, BackwardBias = self.get_cpu(\"output\").grad_fn(self.get_cpu(\"DerOutput\"))\n self.set_cpu(\"DerInput\", BackwardInput.data)\n self.set_cpu(\"DerWeight\", BackwardWeight.data)\n self.set_cpu(\"DerBias\", BackwardBias.data)\n if list(self.get_cpu(\"DerWeight\").shape) != self.WeightShape:\n real_shape = self.get_cpu(\"DerWeight\").shape\n ideal_shape = self.WeightShape\n raise ValueError(\n f\"DerWeight is not of shape self.AffineShape: real: {real_shape}, ideal: {ideal_shape}\")\n if list(self.get_cpu(\"DerBias\").shape) != self.WeightShape:\n raise ValueError(\"DerBias is not of shape self.AffineShape\")\n\n def plain_forward(self, NeedBackward=False):\n if self.sid == 2:\n return\n if self.EnclaveMode in [ExecutionModeOptions.Enclave, ExecutionModeOptions.GPU]:\n self.make_sure_cpu_is_latest(\"input\")\n self.make_sure_cpu_is_latest(\"bias\")\n self.make_sure_cpu_is_latest(\"weight\")\n self.requires_grad_on_cpu(\"input\")\n self.PlainFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n self.PlainFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.PlainFunc.running_mean.data.copy_(self.get_cpu(\"RunMean\"))\n # self.PlainFunc.running_var.data.copy_(self.get_cpu(\"RunVar\"))\n # running_var of PlainFunc is ^2 of that in the enclave\n enclave_running_var = self.get_cpu(\"RunVar\")\n self.PlainFunc.running_var.data.copy_(enclave_running_var)\n else:\n self.make_sure_cpu_is_latest(\"input\")\n self.requires_grad_on_cpu(\"input\")\n\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n torch.set_num_threads(1)\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n torch.set_num_threads(4)\n\n def plain_backward(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"DerOutput\")\n GradFunction = self.PlainForwardResult.grad_fn\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n torch.set_num_threads(1)\n self.PlainBackwardResult = GradFunction(self.get_cpu(\"DerOutput\"))\n torch.set_num_threads(4)\n\n def show_plain_error(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n if self.is_enclave_mode:\n self.make_sure_cpu_is_latest(\"DerInput\")\n self.make_sure_cpu_is_latest(\"DerWeight\")\n self.make_sure_cpu_is_latest(\"DerBias\")\n else:\n self.make_sure_cpu_is_latest(\"DerInput\")\n BackwardInput, BackwardWeight, BackwardBias = self.PlainBackwardResult\n err_input = compare_expected_actual(BackwardInput, self.get_cpu(\"DerInput\"), show_where_err=False, get_relative=True)\n err_weight = compare_expected_actual(BackwardWeight, self.get_cpu(\"DerWeight\"), show_where_err=False,\n get_relative=True)\n err_bias = compare_expected_actual(BackwardBias, self.get_cpu(\"DerBias\"))\n print(f\"S{self.sid}: {self.LayerName} Backward Error input: {err_input}, weight {err_weight}, bias: {err_bias}\")\n\n def show_plain_error_forward(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=False, show_values=False)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")" }, { "identifier": "SecretFlattenLayer", "path": "python/layers/flatten.py", "snippet": "class SecretFlattenLayer(SecretNonlinearLayer):\n batch_size = None\n n_features = None\n input_shape = None\n output_shape = None\n\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.StoreInEnclave = False\n self.ForwardFuncName = \"Flatten\"\n self.BackwardFuncName = \"DerFlatten\"\n\n\n def init(self, start_enclave=True):\n super().init(start_enclave)\n self.ForwardFunc = lambda x: x.view(-1, self.n_features)\n self.PlainFunc = lambda x: x.view(-1, self.n_features)\n\n def init_shape(self):\n self.input_shape = self.PrevLayer.get_output_shape()\n if len(self.input_shape) != 4:\n return ValueError(\"The dimension of the tensor form prev. layer has to be 4D.\")\n\n self.batch_size = self.input_shape[0]\n self.n_features = self.input_shape[1] * self.input_shape[2] * self.input_shape[3]\n self.output_shape = [self.batch_size, self.n_features]\n\n def get_output_shape(self):\n return self.output_shape\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n NeededTensorNames = [(\"output\", self.output_shape, None),\n (\"input\", self.input_shape, None),\n (\"DerInput\", self.input_shape, None),\n (\"DerOutput\", self.output_shape, None)\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n self.transfer_enclave_to_cpu(\"input\")\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n self.transfer_cpu_to_enclave(\"output\")\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\")))\n\n # self.forward_tensor_transfer()\n # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n\n def backward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n self.backward_tensor_transfer()\n self.set_cpu(\"DerInput\", self.get_cpu(\"DerOutput\").view(self.input_shape))\n\n def plain_forward(self, NeedBackward=False):\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n\n def plain_backward(self):\n self.make_sure_cpu_is_latest(\"DerOutput\")\n GradFunction = self.PlainForwardResult.grad_fn\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n self.PlainBackwardResult = GradFunction(self.get_cpu(\"DerOutput\"))\n\n def show_plain_error(self):\n if self.StoreInEnclave:\n self.transfer_enclave_to_cpu(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"))\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n err = compare_expected_actual(self.PlainBackwardResult, self.get_cpu(\"DerInput\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Backward Error {err}\")" }, { "identifier": "SecretInputLayer", "path": "python/layers/input.py", "snippet": "class SecretInputLayer(SecretNonlinearLayer):\n shape = None\n\n def __init__(\n self, sid, LayerName, input_shape, EnclaveMode, link_prev=True, link_next=True, \n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.shape = input_shape\n\n def link_tensors(self):\n gt.link_tags(self.get_tag(\"input\", remap=False), self.get_tag(\"output\", remap=False))\n super().link_tensors()\n\n def init_shape(self):\n return\n\n def set_input(self, tensor):\n self.set_tensor_cpu_gpu_enclave(\"input\", tensor)\n\n def get_output_shape(self):\n return self.shape\n\n def forward(self):\n return\n\n def backward(self):\n return\n\n def plain_forward(self):\n return\n\n def plain_backward(self):\n return\n\n def show_plain_error(self):\n return\n\n def print_connection_info(self):\n print(f\"{self.LayerName:30} shape{self.shape} output {self.NextLayer.LayerName:30}\")" }, { "identifier": "SecretMaxpool2dLayer", "path": "python/layers/maxpool2d.py", "snippet": "class SecretMaxpool2dLayer(SecretActivationLayer):\n def __init__(\n self, sid, LayerName, EnclaveMode, filter_hw, stride, padding, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.ForwardFuncName = \"Maxpool2d\"\n self.BackwardFuncName = \"DerMaxpool2d\"\n self.filter_hw = filter_hw\n self.startmaxpool = False\n self.PlainFunc = torch.nn.MaxPool2d\n self.maxpoolpadding = padding\n self.stride = stride\n self.STORE_CHUNK_ELEM = 401408\n\n self.ForwardFunc = torch.nn.MaxPool2d\n\n if EnclaveMode == ExecutionModeOptions.Enclave :\n self.ForwardFunc = self.maxpoolfunc\n self.BackwardFunc = self.maxpoolbackfunc\n else:\n self.ForwardFunc = torch.nn.MaxPool2d\n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n if len(self.InputShape) != 4:\n raise ValueError(\"Maxpooling2d apply only to 4D Tensor\")\n if self.InputShape[2] != self.InputShape[3]:\n raise ValueError(\"The input tensor has to be square images\")\n if self.InputShape[2] % self.stride != 0:\n raise ValueError(\"The input tensor needs padding for this filter size\")\n InputHw = self.InputShape[2]\n output_hw = InputHw // self.stride\n self.OutputShape = [self.InputShape[0], self.InputShape[1], output_hw, output_hw]\n self.HandleShape = self.InputShape\n # self.Shapefortranspose = [int(round(((self.InputShape[0] * self.InputShape[1] * self.InputShape[2] * self.InputShape[3])/262144)+1/2)), 262144, 1, 1]\n self.Shapefortranspose = [\n int(round(((self.InputShape[0] * self.InputShape[1] * self.InputShape[2] * self.InputShape[3])/self.STORE_CHUNK_ELEM)+1/2)), self.STORE_CHUNK_ELEM, 1, 1]\n\n\n def init(self, start_enclave=True):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(self.filter_hw, self.stride, self.maxpoolpadding)\n TensorLoader.init(self, start_enclave)\n\n if self.startmaxpool is False:\n self.startmaxpool = True\n return self.maxpoolinit(self.LayerName, \"inputtrans\", \"outputtrans\")\n else:\n self.ForwardFunc = self.ForwardFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n self.PlainFunc = self.PlainFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n\n # TensorLoader.init(self, start_enclave)\n # self.ForwardFunc = self.ForwardFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n # self.PlainFunc = self.PlainFunc(self.filter_hw, stride=self.stride, padding=self.maxpoolpadding)\n\n # TensorLoader.init(self, start_enclave)\n\n # def forward(self):\n # with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n # self.forward_tensor_transfer()\n # # self.requires_grad_on_cpu(\"input\")\n # if self.EnclaveMode == ExecutionModeOptions.Enclave:\n # self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\")))\n # st()\n\n # # if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.Enclave:\n # # self.transfer_enclave_to_cpu(\"input\")\n # # if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n # # raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n # # self.transfer_cpu_to_enclave(\"input\")\n # # self.transfer_enclave_to_cpu(\"input\")\n # # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n # # self.transfer_cpu_to_enclave(\"output\")\n # elif self.EnclaveMode == ExecutionModeOptions.CPU:\n # if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.CPU and torch.sum(self.get_cpu(\"input\").abs()) == 0:\n # raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n # self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n # elif self.EnclaveMode == ExecutionModeOptions.GPU:\n # if self.PrevLayer.EnclaveMode is not ExecutionModeOptions.GPU and torch.sum(self.get_gpu(\"input\").abs()) == 0:\n # raise RuntimeError(f\"{self.LayerName}: SGX input not load\")\n # self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\")))\n # else:\n # raise RuntimeError\n\n def maxpoolfunc(self, namein, nameout):\n # assume row_stride and col_stride are both None or both not None\n # assume row_pad and col_pad are both None or both not None\n # if self.LayerName == \"Layer3.0.proxies.2.maxpool\":\n # print(self.LayerName, \"Input: \", self.get_cpu(\"input\")[0,0,0,:10])\n output = self.maxpoolnew(self.LayerName, namein, nameout, self.InputShape, self.OutputShape[2], self.OutputShape[3],\n self.filter_hw, self.filter_hw, self.stride, self.stride, self.maxpoolpadding,\n self.maxpoolpadding)\n # if self.LayerName == \"Layer3.0.proxies.2.maxpool\":\n # self.transfer_enclave_to_cpu(\"output\")\n # print(self.LayerName, \"Output: \", self.get_cpu(\"output\")[0,0,0,:])\n # self.transfer_cpu_to_enclave(\"output\")\n return output\n\n def maxpoolbackfunc(self, nameout, namedout, namedin):\n return self.maxpoolback(self.LayerName, namedout, namedin, self.InputShape, self.OutputShape[2], self.OutputShape[3],\n self.filter_hw, self.filter_hw, self.row_stride, self.col_stride, self.maxpoolpadding,\n self.maxpoolpadding)" }, { "identifier": "SecretOutputLayer", "path": "python/layers/output.py", "snippet": "class SecretOutputLayer(SecretNonlinearLayer):\n TargetShape = None\n loss = 0\n\n def __init__(\n self, sid, LayerName, EnclaveMode, inference=False, link_prev=True, link_next=True, \n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n self.ForwardFunc = torch.nn.CrossEntropyLoss()\n self.PlainFunc = torch.nn.CrossEntropyLoss()\n self.EnclaveMode = ExecutionModeOptions.CPU\n self.inference = inference\n\n\n def init_shape(self):\n self.InputShape = self.PrevLayer.get_output_shape()\n self.OutputShape = [1]\n self.TargetShape = [self.InputShape[0]] # number of Minibatch\n\n def init(self, start_enclave=True):\n TensorLoader.init(self, start_enclave)\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n self.tensor_name_list = {}\n return\n\n NeededTensorNames = [\n (\"output\", self.OutputShape, None),\n (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n (\"target\", self.TargetShape, None),\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def load_target(self, tensor):\n self.set_tensor_with_name(\"target\", tensor)\n\n def get_loss(self):\n return self.loss\n \n def get_prediction(self):\n self.forward_tensor_transfer(\"input\")\n if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n raise RuntimeError(\"SGX input not load\")\n return self.get_cpu(\"input\")\n\n def forward(self):\n if not self.inference:\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n self.set_cpu(\"input\", self.get_cpu(\"input\").detach())\n self.requires_grad_on_cpu(\"input\")\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\"), self.get_cpu(\"target\")))\n loss = self.get_cpu(\"output\").item()\n self.loss = loss\n\n def backward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Backward\", verbose_level=VerboseLevel.LAYER):\n self.backward_tensor_transfer(transfer_tensor=\"output\")\n self.get_cpu(\"output\").backward()\n self.set_cpu(\"DerInput\", self.get_cpu(\"input\").grad)\n\n def plain_forward(self):\n if not self.inference:\n self.make_sure_cpu_is_latest(\"input\")\n self.set_cpu(\"input\", self.get_cpu(\"input\").detach())\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"), self.get_cpu(\"target\"))\n\n def plain_backward(self):\n self.make_sure_cpu_is_latest(\"output\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainBackward\"):\n self.PlainForwardResult.backward()\n self.set_cpu(\"DerInput\", self.get_cpu(\"input\").grad)\n\n def show_plain_error(self):\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"))\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n if self.PlainBackwardResult is None:\n return\n self.make_sure_cpu_is_latest(\"DerInput\")\n\n err = compare_expected_actual(self.PlainBackwardResult, self.get_cpu(\"DerInput\"))\n print(f\"S{self.sid}: {self.LayerName} Backward Error {err}\")\n\n def print_connection_info(self):\n print(f\"{self.LayerName:30} shape{self.InputShape}{' ':30} input {self.PrevLayer.LayerName:30}\")" }, { "identifier": "SecretReLULayer", "path": "python/layers/relu.py", "snippet": "class SecretReLULayer(SecretActivationLayer):\n def __init__(\n self, sid, LayerName, EnclaveMode, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False, merge_own_tensors=False\n ):\n super().__init__(\n sid, LayerName, EnclaveMode, link_prev, link_next,\n manually_register_prev, manually_register_next, merge_own_tensors\n )\n self.ForwardFuncName = \"ReLU\"\n self.BackwardFuncName = \"DerReLU\"\n self.PlainFunc = torch.nn.ReLU\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.ForwardFunc = self.relufunc\n self.BackwardFunc = self.relubackfunc\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n self.ForwardFunc = torch.nn.ReLU\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.ReLU\n\n # if self.is_enclave_mode:\n # self.ForwardFunc = self.relufunc\n # self.BackwardFunc = self.relubackfunc\n # self.StoreInEnclave = True\n # else:\n # self.ForwardFunc = torch.nn.ReLU\n # self.StoreInEnclave = False\n\n def init(self, start_enclave=True):\n super().init(start_enclave)\n self.PlainFunc = self.PlainFunc()\n # if not self.is_enclave_mode:\n if self.EnclaveMode is not ExecutionModeOptions.Enclave:\n self.ForwardFunc = self.ForwardFunc()\n\n def relufunc(self, namein, nameout):\n return self.relunew(namein, nameout, self.InputShape)\n\n def relubackfunc(self, nameout, namedout, namedin):\n return self.relubackward(nameout, namedout, namedin, self.InputShape)\n\n def show_plain_error_forward(self):\n if self.sid == 2:\n return\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=False, show_values=False)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")" }, { "identifier": "init_communicate", "path": "python/sgx_net.py", "snippet": "def init_communicate(rank, master_address, master_port, backend='gloo'):\n os.environ['MASTER_ADDR'] = master_address\n os.environ['MASTER_PORT'] = master_port\n dist.init_process_group(backend, rank=rank, world_size=SecretConfig.worldSize)" }, { "identifier": "warming_up_cuda", "path": "python/sgx_net.py", "snippet": "def warming_up_cuda():\n device = torch.device(\"cuda:0\")\n # device = torch.device(\"cpu\")\n\n print(\"Execution device: \", device)\n print(\"PyTorch version: \", torch.__version__)\n print(\"CUDA version: \", torch.version.cuda)\n print(\"CUDA device:\", torch.cuda.get_device_name(0))\n\n batch_size, n_input_channel, n_output_channel, img_hw, filter_hw = 512, 512, 256, 4, 3\n x_shape = [batch_size, n_input_channel, img_hw, img_hw]\n w_shape = [n_output_channel, n_input_channel, filter_hw, filter_hw]\n with NamedTimerInstance(\"Warming up Cuda double\"):\n dummy_a = get_random_uniform(SecretConfig.PrimeLimit, x_shape).type(SecretConfig.dtypeForSave)\n dummy_b = get_random_uniform(SecretConfig.PrimeLimit, w_shape).type(SecretConfig.dtypeForSave)\n F.conv2d(dummy_a.cuda().type(SecretConfig.dtypeForCudaMm), dummy_b.cuda().type(SecretConfig.dtypeForCudaMm),\n padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda dobule 2nd\"):\n F.conv2d(dummy_a.cuda().type(torch.double), dummy_b.cuda().type(torch.double),\n padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda float\"):\n F.conv2d(dummy_a.cuda().type(torch.float), dummy_b.cuda().type(torch.float), padding=1)\n\n with NamedTimerInstance(\"Warming up Cuda float 2nd\"):\n F.conv2d(dummy_a.cuda().type(torch.float), dummy_b.cuda().type(torch.float), padding=1)\n\n batch_size, n_input_channel, n_output_channel, img_hw, filter_hw = 64, 64, 64, 8, 3\n x_shape = [batch_size, n_input_channel, img_hw, img_hw]\n w_shape = [n_output_channel, n_input_channel, filter_hw, filter_hw]\n with NamedTimerInstance(\"Warming up Cpu\"):\n dummy_a = get_random_uniform(SecretConfig.PrimeLimit, x_shape).type(SecretConfig.dtypeForSave)\n dummy_b = get_random_uniform(SecretConfig.PrimeLimit, w_shape).type(SecretConfig.dtypeForSave)\n F.conv2d(dummy_a.type(SecretConfig.dtypeForCpuOp), dummy_b.type(SecretConfig.dtypeForCpuOp),\n padding=1)\n\n with NamedTimerInstance(\"Warming up CppExtension\"):\n GlobalCppExtension.get_conv2d_cudnn()" }, { "identifier": "SecretNeuralNetwork", "path": "python/sgx_net.py", "snippet": "class SecretNeuralNetwork(TensorLoader):\n nn_name = None\n layers = None\n\n def __init__(self, sid, nn_name):\n super().__init__()\n self.sid = sid\n self.init(start_enclave=False)\n self.nn_name = nn_name\n\n def set_layers(self, layers):\n self.layers = layers\n\n if not isinstance(self.layers[0], SecretInputLayer):\n raise ValueError(\"The first layer has to be input layer\")\n if not isinstance(self.layers[-1], SecretOutputLayer):\n raise ValueError(\"The last layer has to be output layer\")\n \n for i in range(len(self.layers) - 1):\n PrevLayer = self.layers[i]\n NextLayer = self.layers[i + 1]\n if not PrevLayer.manually_register_next:\n PrevLayer.register_next_layer(NextLayer)\n if not NextLayer.manually_register_prev:\n NextLayer.register_prev_layer(PrevLayer)\n\n \n for layer in self.layers:\n # print(f\"Init_shape/link layer {layer.LayerName}\")\n layer.set_eid(self.get_eid())\n layer.init_shape()\n # if layer.LayerName in [\"Layer1.0.weighted_add\", \"Layer1.0.proxies.0.bn\"]:\n # st()\n layer.link_tensors()\n # print(layer.LayerName)\n # layer.print_tensor_link_relation()\n # if layer.LayerName in [\"Layer1.0.weighted_add\", \"Layer1.0.proxies.0.bn\"]:\n # st()\n \n for idx, layer in enumerate(self.layers):\n # print(f\"Init layer {layer.LayerName}\")\n # if layer.LayerName == \"Layer1.0.main.relu2\":\n # st()\n layer.init(start_enclave=False)\n # if idx > 3:\n # print(layer.LayerName, self.layers[4].get_cpu(\"input\").shape, self.layers[4].PrevLayer.LayerName)\n\n def execute_for_each_layer(self, func, reverse=False):\n layers = self.layers[::-1] if reverse else self.layers\n for layer in layers:\n # print(f\"SID: {self.sid} {layer.LayerName}, {func}\")\n if self.sid == 2 and layer.IsDummyForS2:\n continue\n # print(\"Processing \", layer.LayerName)\n func(layer)\n \n # st()\n\n def classifier_output(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.nn_name} classifier_output\"):\n self.forward()\n if self.sid == 2:\n return\n # layers: input_layer, ..., fc_layer, output_layer\n last_fc = self.layers[-2]\n last_fc.transfer_enclave_to_cpu(\"output\")\n outputs = last_fc.get_cpu(\"output\")\n _, predicted = torch.max(outputs.data, 1)\n return predicted\n\n def get_loss(self):\n return self.layers[-1].get_loss()\n\n def forward_with_time(self):\n def run_forward(layer):\n layer.forward()\n t0 = time()\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} Forward\"):\n self.execute_for_each_layer(run_forward)\n t1 = time()\n # time in ms\n elapse_time = (t1 - t0) * (10 ** 3) \n return elapse_time\n\n def forward(self):\n def run_forward(layer):\n layer.forward()\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} Forward\"):\n self.execute_for_each_layer(run_forward)\n\n def backward(self):\n def run_backward(layer):\n layer.backward()\n with NamedTimerInstance(f\"S{self.sid}: {self.nn_name} Backward\"):\n self.execute_for_each_layer(run_backward, reverse=True)\n\n def plain_forward(self):\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} PlainForward\"):\n self.execute_for_each_layer(lambda x: x.plain_forward())\n\n def plain_backward(self):\n with NetworkNamedTimerInstance(f\"S{self.sid}: {self.nn_name} PlainBackward\"):\n self.execute_for_each_layer(lambda x: x.plain_backward(), reverse=True)\n\n def show_plain_error(self):\n self.execute_for_each_layer(lambda x: x.show_plain_error())" }, { "identifier": "SgdOptimizer", "path": "python/sgx_net.py", "snippet": "class SgdOptimizer(TensorLoader):\n def __init__(self, sid):\n super().__init__()\n self.sid = sid\n self.learning_rate = 0.05\n self.momentum = 0.9\n self.weight_decay = 5e-4\n self.momentum_init_flags = defaultdict(lambda: False)\n self.ideal_momentum_buf = {}\n\n self.lr_gamma = 0.5\n self.lr_step = 30\n self.step_counter = 0\n\n self.layers = None\n\n def set_layers(self, layers):\n self.layers = layers\n\n def generate_tensor_name_list(self, force=False):\n # Run if forced or self.tensor_name_list is not generated\n if not force and self.tensor_name_list:\n return\n if self.sid == 2:\n return\n\n self.tensor_name_list = []\n for layer in self.layers:\n for (DerName, ParamName, shape) in layer.LearnableParamsList:\n self.tensor_name_list.append((ParamName + \"Momentum\", shape, None))\n\n def update_params(self, test_with_ideal=False):\n if self.sid == 2:\n return\n for layer in self.layers:\n self.update_params_in_layer(layer, test_with_ideal=test_with_ideal)\n\n def update_params_in_layer(self, layer, test_with_ideal=False):\n # ref: https://github.com/pytorch/pytorch/blob/master/torch/optim/sgd.py\n if layer.LearnableParamsList is None:\n return\n\n task_ids = []\n for (der_name, param_name, shape) in layer.LearnableParamsList:\n momentum_name = param_name + \"Momentum\"\n global_momentum_name = layer.name_modifier(momentum_name)\n\n if layer.StoreInEnclave:\n if test_with_ideal:\n ideal_p, ideal_momentum = self.ideal_update_params_with_name(layer, der_name, param_name, shape)\n first_momentum = not self.momentum_init_flags[global_momentum_name]\n if first_momentum:\n # print(\"FIRST MOMENTUM\")\n self.momentum_init_flags[global_momentum_name] = True\n layer.init_enclave_tensor(momentum_name, shape)\n task_id = layer.sgd_update(param_name=param_name, grad_name=der_name, momentum_name=momentum_name,\n lr=self.learning_rate, momentum=self.momentum,\n weight_decay=self.weight_decay,\n first_momentum=first_momentum, is_async=True)\n if test_with_ideal:\n while not self.get_task_status(task_id):\n pass\n layer.generate_cpu_tensor(momentum_name, shape)\n layer.transfer_enclave_to_cpu(momentum_name)\n layer.transfer_enclave_to_cpu(param_name)\n param_err = compare_expected_actual(ideal_p, layer.get_cpu(param_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Param Error: {param_err}\")\n momentum_err = compare_expected_actual(ideal_momentum, layer.get_cpu(momentum_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Momentum Error: {momentum_err}\")\n else:\n task_ids.append(task_id)\n else:\n DerCpu = layer.get_cpu(der_name)\n ParamsCpu = layer.get_cpu(param_name)\n\n if test_with_ideal:\n ideal_p, ideal_momentum = self.ideal_update_params_with_name(layer, der_name, param_name, shape)\n\n DerCpu.add_(self.weight_decay, ParamsCpu)\n\n if not self.momentum_init_flags[global_momentum_name]:\n self.momentum_init_flags[global_momentum_name] = True\n layer.generate_cpu_tensor(momentum_name, shape)\n layer.get_cpu(momentum_name).copy_(DerCpu)\n MomentumCpu = layer.get_cpu(momentum_name)\n else:\n MomentumCpu = layer.get_cpu(momentum_name)\n MomentumCpu.mul_(self.momentum).add_(1, DerCpu)\n\n ParamsCpu.add_(-self.learning_rate, MomentumCpu)\n\n if test_with_ideal:\n param_err = compare_expected_actual(ideal_p, layer.get_cpu(param_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Param Error: {param_err}\")\n momentum_err = compare_expected_actual(ideal_momentum, layer.get_cpu(momentum_name), get_relative=True)\n print(f\"S{self.sid}: {layer.LayerName} Momentum Error: {momentum_err}\")\n\n # Wait for all tasks to be finished\n for task_id in task_ids:\n while not self.get_task_status(task_id):\n pass\n\n def ideal_update_params_with_name(self, layer, der_name, param_name, shape):\n weight_decay = self.weight_decay\n momentum = self.momentum\n dampening = 0\n nesterov = False\n lr = self.learning_rate\n\n global_momentum_name = layer.name_modifier(param_name + 'Momentum')\n\n if layer.StoreInEnclave:\n layer.transfer_enclave_to_cpu(der_name)\n layer.transfer_enclave_to_cpu(param_name)\n d_p = torch.clone(layer.get_cpu(der_name)).detach()\n p = torch.clone(layer.get_cpu(param_name)).detach()\n\n if weight_decay != 0:\n d_p.add_(weight_decay, p)\n if global_momentum_name not in self.ideal_momentum_buf:\n buf = self.ideal_momentum_buf[global_momentum_name] = torch.clone(d_p).detach()\n else:\n buf = self.ideal_momentum_buf[global_momentum_name]\n buf.mul_(momentum).add_(1 - dampening, d_p)\n if nesterov:\n d_p = d_p.add(momentum, buf)\n else:\n d_p = buf\n p.add_(-lr, d_p)\n\n return p, buf" }, { "identifier": "SGXLinearBase", "path": "python/layers/sgx_linear_base.py", "snippet": "class SGXLinearBase(SecretLayerBase):\n batch_size = None\n InputShape = None\n WeightShape = None\n OutputShape = None\n\n def __init__(\n self, sid, LayerName, EnclaveMode, batch_size, n_output_features, \n n_input_features=None, is_enclave_mode=False, link_prev=True, link_next=True,\n manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n\n self.ForwardFuncName = \"SGXLinear\"\n self.BackwardFuncName = \"DerSGXLinear\"\n self.PlainFunc = torch.nn.Linear\n self.is_enclave_mode = is_enclave_mode\n self.n_output_features = n_output_features\n self.n_input_features = n_input_features\n self.batch_size = batch_size\n\n if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.Linear\n # if self.is_enclave_mode:\n # self.StoreInEnclave = True\n # else:\n # self.ForwardFunc = torch.nn.Linear\n # self.StoreInEnclave = False\n\n def init_shape(self):\n self.WeightShape = self.DerWeightShape = [self.n_output_features, self.n_input_features]\n self.BiasShape = self.DerBiasShape = [self.n_output_features]\n if self.n_input_features is None:\n self.InputShape = self.PrevLayer.get_output_shape()\n else:\n self.InputShape = self.DerInputShape = [self.batch_size, self.n_input_features]\n self.OutputShape = self.DerOutputShape = [self.batch_size, self.n_output_features]\n self.LearnableParamsList = [\n LearnableParamTuple(dw_name=\"DerWeight\", w_name=\"weight\", shape=self.WeightShape),\n LearnableParamTuple(dw_name=\"DerBias\", w_name=\"bias\", shape=self.WeightShape),\n ]\n\n def init(self, start_enclave=True):\n TensorLoader.init(self, start_enclave)\n \n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(self.n_input_features, self.n_output_features)\n self.get_cpu(\"weight\").data.copy_(self.PlainFunc.weight.data)\n self.get_cpu(\"bias\").data.copy_(self.PlainFunc.bias.data)\n self.transfer_cpu_to_enclave(\"weight\")\n self.transfer_cpu_to_enclave(\"bias\")\n self.sgx_linear_init(\n self.LayerName,\n \"input\", \"output\", \"weight\", \"bias\",\n # \"DerInput\", \"DerOutput\", \"DerWeight\", \"DerBias\",\n self.batch_size, self.n_input_features, self.n_output_features)\n else:\n self.ForwardFunc = self.ForwardFunc(self.n_input_features, self.n_output_features)\n self.PlainFunc = self.PlainFunc(self.n_input_features, self.n_output_features)\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n if self.EnclaveMode is ExecutionModeOptions.CPU:\n self.set_cpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_cpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.set_gpu(\"weight\", list(self.ForwardFunc.parameters())[0].data)\n self.set_gpu(\"bias\", list(self.ForwardFunc.parameters())[1].data)\n self.ForwardFunc.cuda()\n # print(\"======== SGX linear init finish\")\n\n def link_tensors(self):\n super().link_tensors()\n\n def init_params(self):\n cpu_w = torch.zeros(self.w_shape)\n torch.nn.init.xavier_normal_(cpu_w, 1)\n self.set_tensor_cpu_enclave(\"weight\", cpu_w)\n cpu_b = torch.zeros(self.b_shape)\n torch.nn.init.constant_(cpu_b, 0)\n self.set_tensor_cpu_enclave(\"bias\", cpu_b)\n\n def get_output_shape(self):\n return self.OutputShape\n\n def inject_params(self, params):\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n cpu_w = self.get_cpu(\"weight\")\n cpu_w.copy_(params.weight.data)\n self.transfer_cpu_to_enclave(\"weight\")\n cpu_b = self.get_cpu(\"bias\")\n cpu_b.copy_(params.bias.data)\n self.transfer_cpu_to_enclave(\"bias\")\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n cpu_w = self.get_cpu(\"weight\")\n cpu_w.copy_(params.weight.data)\n cpu_b = self.get_cpu(\"bias\")\n cpu_b.copy_(params.bias.data)\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n cpu_w = self.get_gpu(\"weight\")\n cpu_w.copy_(params.weight.data)\n cpu_b = self.get_gpu(\"bias\")\n cpu_b.copy_(params.bias.data)\n\n def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:\n self.make_sure_cpu_is_latest(\"weight\")\n plain_layer.weight.data.copy_(self.get_cpu(\"weight\"))\n self.make_sure_cpu_is_latest(\"bias\")\n plain_layer.bias.data.copy_(self.get_cpu(\"bias\"))\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n NeededTensorNames = [(\"output\", self.OutputShape, None),\n # (\"DerInput\", self.InputShape, None),\n (\"input\", self.InputShape, None),\n # (\"DerOutput\", self.OutputShape, None),\n (\"weight\", self.WeightShape, None),\n (\"bias\", self.BiasShape, None),\n ]\n\n self.tensor_name_list = NeededTensorNames\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.forward_tensor_transfer()\n self.sgx_linear_forward(self.LayerName)\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n self.forward_tensor_transfer()\n self.requires_grad_on_cpu(\"input\")\n self.ForwardFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.ForwardFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n self.forward_tensor_transfer()\n self.ForwardFunc.weight.data.copy_(self.get_gpu(\"weight\"))\n self.ForwardFunc.bias.data.copy_(self.get_gpu(\"bias\"))\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\").type(SecretConfig.dtypeForCpuOp)))\n\n def plain_forward(self, NeedBackward=False):\n if self.is_enclave_mode:\n self.make_sure_cpu_is_latest(\"input\")\n self.make_sure_cpu_is_latest(\"weight\")\n self.make_sure_cpu_is_latest(\"bias\")\n # self.requires_grad_on_cpu(\"input\")\n self.PlainFunc.weight.data.copy_(self.get_cpu(\"weight\"))\n self.PlainFunc.bias.data.copy_(self.get_cpu(\"bias\"))\n else:\n self.make_sure_cpu_is_latest(\"input\")\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n # torch.set_num_threads(1)\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n # torch.set_num_threads(4)\n\n def show_plain_error_forward(self):\n self.make_sure_cpu_is_latest(\"output\")\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")" }, { "identifier": "SGXConvBase", "path": "python/layers/sgx_conv_base.py", "snippet": "class SGXConvBase(SecretLayerBase):\n batch_size = None\n pytorch_x_shape, sgx_x_shape = None, None\n pytorch_w_shape, sgx_w_shape = None, None\n bias_shape = None\n pytorch_y_shape, sgx_y_shape = None, None\n\n def __init__(\n self, sid, LayerName, EnclaveMode,\n n_output_channel, filter_hw, stride, padding, batch_size=None, n_input_channel=None,\n img_hw=None, bias=True,\n is_enclave_mode=False, link_prev=True, link_next=True, manually_register_prev=False, manually_register_next=False\n ):\n super().__init__(sid, LayerName, EnclaveMode, link_prev, link_next, manually_register_prev, manually_register_next)\n\n self.ForwardFuncName = \"SGXConv\"\n self.BackwardFuncName = \"DerSGXConv\"\n self.PlainFunc = torch.nn.Conv2d\n self.is_enclave_mode = is_enclave_mode\n self.batch_size = batch_size\n self.n_input_channel = n_input_channel\n self.n_output_channel = n_output_channel\n self.img_hw = img_hw\n self.filter_hw = filter_hw\n self.padding = padding\n self.stride = stride\n self.bias = bias\n\n if EnclaveMode is ExecutionModeOptions.CPU or EnclaveMode is ExecutionModeOptions.GPU:\n self.ForwardFunc = torch.nn.Conv2d\n\n # --------------\n # Add BIAS!!!!!\n # --------------\n\n def init_shape(self):\n if self.batch_size is None and self.PrevLayer is not None:\n self.pytorch_x_shape = self.PrevLayer.get_output_shape()\n self.batch_size, self.n_input_channel, self.img_hw, _ = self.pytorch_x_shape\n else:\n self.pytorch_x_shape = [self.batch_size, self.n_input_channel, self.img_hw, self.img_hw]\n # print(self.LayerName)\n # st()\n # BHWC\n self.sgx_x_shape = [self.pytorch_x_shape[0], self.pytorch_x_shape[2], self.pytorch_x_shape[3], self.pytorch_x_shape[1]]\n # pytorch weight is out * in * h * w\n self.pytorch_w_shape = [self.n_output_channel, self.n_input_channel, self.filter_hw, self.filter_hw]\n # w shape is in * w * h * out, the transpose of out * h * w * in\n self.sgx_w_shape = [self.n_output_channel, self.filter_hw, self.filter_hw, self.n_input_channel]\n # BCHW\n self.pytorch_y_shape = calc_conv2d_output_shape_stride(self.pytorch_x_shape, self.pytorch_w_shape, self.padding, self.stride)\n # BHWC\n self.sgx_y_shape = [self.pytorch_y_shape[0], self.pytorch_y_shape[2], self.pytorch_y_shape[3], self.pytorch_y_shape[1]]\n self.bias_shape = [self.n_output_channel]\n\n # print(\n # f\"Init_shape pytorch_input {self.pytorch_x_shape}, sgx_input {self.sgx_x_shape}, \"\n # f\"pytorch_output {self.pytorch_y_shape}, sgx_output {self.sgx_y_shape}, \"\n # f\"pytorch_weight {self.pytorch_w_shape}, sgx_weight {self.sgx_w_shape}, \"\n # f\"bias {self.bias_shape}\"\n # )\n\n self.LearnableParamsList = [\n LearnableParamTuple(dw_name=\"DerWeight\", w_name=\"weight\", shape=self.sgx_w_shape),\n LearnableParamTuple(dw_name=\"DerBias\", w_name=\"bias\", shape=self.bias_shape),\n ]\n\n def init(self, start_enclave=True):\n # print(f\"Weight shape {self.sgx_w_shape}\")\n TensorLoader.init(self, start_enclave)\n \n \n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n self.PlainFunc = self.PlainFunc(\n self.n_input_channel, self.n_output_channel, self.filter_hw,\n self.stride, self.padding, bias=self.bias)\n weight_pytorch_form = self.PlainFunc.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n self.get_cpu(\"weight\").data.copy_(weight_tf_form)\n self.transfer_cpu_to_enclave(\"weight\")\n # Bias\n if self.bias:\n bias_data = self.PlainFunc.bias.data\n else:\n bias_data = torch.zeros(self.bias_shape)\n self.get_cpu(\"bias\").data.copy_(bias_data)\n self.transfer_cpu_to_enclave(\"bias\")\n self.sgx_conv_init(\n self.LayerName,\n \"sgx_input\", \"sgx_output\", \"weight\", \"bias\",\n # \"sgx_DerInput\", \"sgx_DerOutput\", \"DerWeight\", \"DerBias\",\n # \"input\", \"output\", \"weight\", \n # \"DerInput\", \"DerOutput\", \"DerWeight\", \n self.batch_size, self.img_hw, self.img_hw, self.n_input_channel, \n self.pytorch_y_shape[2], self.pytorch_y_shape[3], self.n_output_channel, \n self.filter_hw, self.padding, self.stride)\n elif self.EnclaveMode in[ ExecutionModeOptions.CPU, ExecutionModeOptions.GPU]:\n self.ForwardFunc = self.ForwardFunc(\n self.n_input_channel, self.n_output_channel, self.filter_hw,\n self.stride, self.padding, bias=self.bias)\n self.PlainFunc = self.PlainFunc(\n self.n_input_channel, self.n_output_channel, self.filter_hw,\n self.stride, self.padding, bias=self.bias)\n self.ForwardFunc.weight.data.copy_(self.PlainFunc.weight.data)\n weight_pytorch_form = list(self.ForwardFunc.parameters())[0].data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n if self.EnclaveMode is ExecutionModeOptions.CPU:\n self.set_cpu(\"weight\", weight_tf_form)\n if self.bias:\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n bias_data = self.PlainFunc.bias.data\n self.set_cpu(\"bias\", bias_data)\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n self.set_gpu(\"weight\", weight_tf_form)\n if self.bias:\n self.ForwardFunc.bias.data.copy_(self.PlainFunc.bias.data)\n bias_data = self.PlainFunc.bias.data\n self.set_gpu(\"bias\", bias_data)\n self.ForwardFunc.cuda()\n\n\n def link_tensors(self):\n super().link_tensors()\n\n def init_params(self):\n cpu_w = torch.zeros(self.sgx_w_shape)\n torch.nn.init.xavier_normal_(cpu_w, 1)\n self.set_tensor_cpu_gpu_enclave(\"weight\", cpu_w)\n\n def get_output_shape(self):\n return self.pytorch_y_shape\n \n def weight_pytorch2tf(self, weight_pytorch_form):\n # weight_pytorch_form is out * in * h * w\n # out * (h * w) * in, \n # h and w dont transpose\n # weight_tf_form = weight_pytorch_form.permute(1,3,2,0).contiguous()\n weight_tf_form = weight_pytorch_form.permute(0,2,3,1).contiguous()\n return weight_tf_form\n\n def weight_tf2pytorch(self, weight_tf_form):\n # weight_tf_form is out * (h * w) * in, the transpose of out * (h * w) * in\n # out * in * h * w\n # h and w dont transpose\n # weight_pytorch_form = weight_tf_form.permute(3, 0, 2, 1).contiguous()\n weight_pytorch_form = weight_tf_form.permute(0,3,1,2).contiguous()\n return weight_pytorch_form\n\n def feature_pytorch2tf(self, tensor_pytorch_form):\n # tensor_pytorch_form is b * in * h * w\n # b * h * w * in\n tensor_tf_form = tensor_pytorch_form.permute(0, 2, 3, 1).contiguous()\n return tensor_tf_form\n \n def feature_tf2pytorch(self, tensor_tf_form):\n # tensor_tf_form is b * h * w * in\n # b * in * h * w\n tensor_pytorch_form = tensor_tf_form.permute(0, 3, 1, 2).contiguous()\n return tensor_pytorch_form\n\n def inject_params(self, params):\n if self.EnclaveMode is ExecutionModeOptions.Enclave:\n cpu_w = self.get_cpu(\"weight\")\n weight_pytorch_form = params.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n cpu_w.copy_(weight_tf_form)\n self.transfer_cpu_to_enclave(\"weight\")\n\n # bias\n assert (\n (self.bias and params.bias is not None) or\n (not self.bias and params.bias is None)\n )\n if self.bias:\n bias_data = params.bias.data\n else:\n bias_data = torch.zeros(self.n_output_channel)\n cpu_b = self.get_cpu(\"bias\")\n cpu_b.copy_(bias_data)\n self.transfer_cpu_to_enclave(\"bias\")\n elif self.EnclaveMode is ExecutionModeOptions.CPU:\n weight_pytorch_form = params.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n self.get_cpu(\"weight\").copy_(weight_tf_form)\n # bias\n assert (\n (self.bias and params.bias is not None) or\n (not self.bias and params.bias is None)\n )\n if self.bias:\n self.get_cpu(\"bias\").copy_(params.bias.data)\n\n # Move weight to ForwardFunc\n weight_tf_form = self.get_cpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n\n elif self.EnclaveMode is ExecutionModeOptions.GPU:\n weight_pytorch_form = params.weight.data\n weight_tf_form = self.weight_pytorch2tf(weight_pytorch_form)\n self.get_gpu(\"weight\").copy_(weight_tf_form)\n # bias\n assert (\n (self.bias and params.bias is not None) or\n (not self.bias and params.bias is None)\n )\n if self.bias:\n self.get_gpu(\"bias\").copy_(params.bias.data)\n\n # Move weight to ForwardFunc\n weight_tf_form = self.get_gpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n\n\n def inject_to_plain(self, plain_layer: torch.nn.Module) -> None:\n self.make_sure_cpu_is_latest(\"weight\")\n weight_tf_form = self.get_cpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n plain_layer.weight.data.copy_(weight_pytorch_form)\n\n assert (\n (self.bias and plain_layer.bias is not None) or\n (not self.bias and plain_layer.bias is None)\n )\n if self.bias:\n self.make_sure_cpu_is_latest(\"bias\")\n bias_data = self.get_cpu(\"bias\")\n plain_layer.weight.data.copy_(bias_data)\n\n def generate_tensor_name_list(self, force=False):\n if not force and self.tensor_name_list:\n return\n NeededTensorNames = [(\"output\", self.pytorch_y_shape, None), (\"sgx_output\", self.sgx_y_shape, None),\n (\"DerInput\", self.pytorch_x_shape, None), (\"sgx_DerInput\", self.sgx_x_shape, None),\n (\"input\", self.pytorch_x_shape, None), (\"sgx_input\", self.sgx_x_shape, None),\n (\"DerOutput\", self.pytorch_y_shape, None), (\"sgx_DerOutput\", self.sgx_y_shape, None),\n (\"weight\", self.sgx_w_shape, None),\n (\"bias\", self.bias_shape, None),\n ]\n self.tensor_name_list = NeededTensorNames\n\n\n def forward(self):\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} Forward\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer(\"input\")\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n \n # \"input\" is pytorch form\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n if self.PrevLayer.EnclaveMode is ExecutionModeOptions.Enclave:\n self.transfer_enclave_to_cpu(\"input\")\n input_pytorch_form = self.get_cpu(\"input\")\n \n if torch.sum(self.get_cpu(\"input\").abs()) == 0:\n print(self.LayerName)\n raise RuntimeError(\"SGX input not load\")\n input_tf_form = self.feature_pytorch2tf(input_pytorch_form)\n self.set_cpu(\"sgx_input\", input_tf_form)\n self.transfer_cpu_to_enclave(\"sgx_input\")\n # self.forward_tensor_transfer(\"sgx_input\")\n # print(self.get_cpu(\"sgx_input\").squeeze())\n \n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} sgx_conv_forward\", verbose_level=VerboseLevel.LAYER):\n # if self.LayerName == \"Layer2.0.downsample.conv\":\n # st()\n self.sgx_conv_forward(self.LayerName)\n \n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Output Postprocess\", verbose_level=VerboseLevel.LAYER):\n self.make_sure_cpu_is_latest(\"sgx_output\")\n output_tf_form = self.get_cpu(\"sgx_output\")\n output_pytorch_form = self.feature_tf2pytorch(output_tf_form)\n self.set_cpu(\"output\", output_pytorch_form)\n self.transfer_cpu_to_enclave(\"output\")\n elif self.EnclaveMode == ExecutionModeOptions.CPU:\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n # self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Weight Transfer\", verbose_level=VerboseLevel.LAYER):\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} get weight_tf_form\", verbose_level=VerboseLevel.LAYER):\n weight_tf_form = self.get_cpu(\"weight\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} weight_tf2pytorch\", verbose_level=VerboseLevel.LAYER):\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} copy data\", verbose_level=VerboseLevel.LAYER):\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} GPU conv forward\", verbose_level=VerboseLevel.LAYER):\n self.set_cpu(\"output\", self.ForwardFunc(self.get_cpu(\"input\")))\n elif self.EnclaveMode == ExecutionModeOptions.GPU:\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Input Preprocess\", verbose_level=VerboseLevel.LAYER):\n self.forward_tensor_transfer()\n # self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} Weight Transfer\", verbose_level=VerboseLevel.LAYER):\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} get weight_tf_form\", verbose_level=VerboseLevel.LAYER):\n weight_tf_form = self.get_gpu(\"weight\")\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} weight_tf2pytorch\", verbose_level=VerboseLevel.LAYER):\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} copy data\", verbose_level=VerboseLevel.LAYER):\n self.ForwardFunc.weight.data.copy_(weight_pytorch_form)\n with NamedTimerInstance(f\" S{self.sid}: {self.LayerName} GPU conv forward\", verbose_level=VerboseLevel.LAYER):\n self.set_gpu(\"output\", self.ForwardFunc(self.get_gpu(\"input\").type(SecretConfig.dtypeForCpuOp)))\n\n\n def plain_forward(self, NeedBackward=False):\n if self.EnclaveMode == ExecutionModeOptions.Enclave:\n self.make_sure_cpu_is_latest(\"input\")\n self.make_sure_cpu_is_latest(\"weight\")\n if self.bias:\n self.make_sure_cpu_is_latest(\"bias\")\n # self.requires_grad_on_cpu(\"input\")\n weight_tf_form = self.get_cpu(\"weight\")\n weight_pytorch_form = self.weight_tf2pytorch(weight_tf_form)\n self.PlainFunc.weight.data.copy_(weight_pytorch_form)\n if self.bias:\n bias_data = self.get_cpu(\"bias\")\n self.PlainFunc.bias.data.copy_(bias_data)\n elif self.EnclaveMode in [ExecutionModeOptions.CPU, ExecutionModeOptions.GPU]:\n self.make_sure_cpu_is_latest(\"input\")\n self.requires_grad_on_cpu(\"input\")\n with NamedTimerInstance(f\"S{self.sid}: {self.LayerName} PlainForward\"):\n # torch.set_num_threads(1)\n self.PlainForwardResult = self.PlainFunc(self.get_cpu(\"input\"))\n # torch.set_num_threads(4)\n\n def show_plain_error_forward(self):\n err = compare_expected_actual(self.PlainForwardResult, self.get_cpu(\"output\"), get_relative=True)\n print(f\"S{self.sid}: {self.LayerName} Forward Error: {err}\")\n\n def print_connection_info(self):\n print(f\"{self.LayerName:20} shape{self.pytorch_x_shape}{' ':20} mode{self.EnclaveMode}{' ':20} input {self.PrevLayer.LayerName:20} output {self.NextLayer.LayerName:20}\")" }, { "identifier": "ExecutionModeOptions", "path": "python/utils/basic_utils.py", "snippet": "class ExecutionModeOptions(Enum):\n Enclave = 1\n CPU = 2\n GPU = 3" }, { "identifier": "Logger", "path": "python/utils/logger_utils.py", "snippet": "class Logger(object):\n logfile_path = \"logfile.log\"\n\n def __init__(self):\n self.terminal = sys.stdout\n self.log = open(self.logfile_path, \"a\")\n\n def reset_logfile(self, path):\n self.logfile_path = path\n self.log = open(self.logfile_path, \"a\")\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n #this flush method is needed for python 3 compatibility.\n #this handles the flush command by doing nothing.\n #you might want to specify some extra behavior here.\n # pass\n self.terminal.flush()\n self.log.flush()" }, { "identifier": "NamedTimerInstance", "path": "python/utils/timer_utils.py", "snippet": "class NamedTimerInstance(object):\n def __init__(self, name, verbose_level=VerboseLevel.EVERY):\n self.name = name\n self.verbose_level = verbose_level\n\n def __enter__(self):\n return NamedTimer.start(self.name, verbose_level=self.verbose_level)\n ...\n\n def __exit__(self, *args):\n NamedTimer.end(self.name)\n ..." }, { "identifier": "VerboseLevel", "path": "python/utils/timer_utils.py", "snippet": "class VerboseLevel(IntEnum):\n EVERY = 1\n LAYER = 2\n RUN = 3\n EPOCH = 4" }, { "identifier": "NamedTimer", "path": "python/utils/timer_utils.py", "snippet": "class NamedTimer(object):\n __instance = None\n\n @staticmethod\n def get_instance():\n if NamedTimer.__instance is None:\n NamedTimer()\n return NamedTimer.__instance\n\n def __init__(self):\n NamedTimer.__instance = self\n self.timers = {}\n self.verbose_level = VerboseLevel.EVERY\n\n @staticmethod\n def start_timer(name, **kwargs):\n NamedTimer.get_instance().timers[name] = Timer(name, **kwargs)\n return NamedTimer.get_instance().timers[name]\n\n @staticmethod\n def start(name, **kwargs):\n return NamedTimer.get_instance().start_timer(name, **kwargs)\n\n @staticmethod\n def end_timer(name, **kwargs):\n NamedTimer.get_instance().timers[name].end(**kwargs)\n\n @staticmethod\n def end(name, tmp_name=None):\n # print(NamedTimer.get_instance().timers[name].verbose_level, NamedTimer.get_instance().verbose_level)\n NamedTimer.get_instance().end_timer(name, tmp_name=tmp_name)\n\n @staticmethod\n def set_verbose_level(verbose_level):\n if not isinstance(verbose_level, VerboseLevel):\n raise ValueError(\"Please set an enum from VerboseLevel\")\n NamedTimer.get_instance().verbose_level = verbose_level" }, { "identifier": "compare_expected_actual", "path": "python/utils/torch_utils.py", "snippet": "def compare_expected_actual(expected, actual, show_where_err=False, get_relative=False, verbose=False, show_values=False):\n def purify(x):\n # return torch.tensor(x)\n res = x\n # if not (isinstance(x, torch.Tensor) or isinstance(x, torch.Variable)):\n if not (isinstance(x, torch.Tensor) ):\n res = torch.tensor(x)\n # return x.detach().numpy()\n return res.type(torch.float).to(\"cpu\")\n expected = purify(expected)\n actual = purify(actual)\n\n if show_values:\n print(\"expected:\", expected[0, 0])\n print(\"actual:\", actual[0, 0])\n\n avg_abs_diff = torch.mean(torch.abs(expected - actual)).item()\n res = avg_abs_diff\n\n if show_where_err:\n show_indices = torch.abs(expected - actual) / torch.abs(expected) > 0.5\n # show_indices = (expected != actual)\n print(\"error indices: \", np.where(show_indices.cpu()))\n print(\"expected values:\", expected[show_indices])\n print(\"difference:\", (expected - actual)[show_indices])\n\n if get_relative:\n tmp_expected, tmp_actual = expected[expected != 0], actual[expected != 0]\n relative_diff = torch.abs(tmp_expected - tmp_actual) / torch.abs(tmp_expected)\n relative_avg_diff = torch.mean(torch.abs(tmp_actual - tmp_expected)) / torch.mean(torch.abs(tmp_expected))\n Error = namedtuple(\"Error\", (\"AvgAbsDiff\", \"RelAvgDiff\", \"AvgRelDiff\", \"StdRelDiff\"))\n res = Error(avg_abs_diff, relative_avg_diff.item(), torch.mean(relative_diff).item(), torch.std(relative_diff).item())\n\n if verbose:\n print(res)\n\n return res" } ]
import os import sys import numpy as np import torch import torch.distributed as dist import sys import pdb from pdb import set_trace as st from torch import optim, nn from python.common_net import register_layer, register_weight_layer, get_layer_weight, get_layer_input, \ get_layer_weight_grad, get_layer_output, get_layer_output_grad, get_layer_input_grad from python.enclave_interfaces import GlobalTensor from python.layers.batch_norm_2d import SecretBatchNorm2dLayer from python.layers.flatten import SecretFlattenLayer from python.layers.input import SecretInputLayer from python.layers.maxpool2d import SecretMaxpool2dLayer from python.layers.output import SecretOutputLayer from python.layers.relu import SecretReLULayer from python.sgx_net import init_communicate, warming_up_cuda, SecretNeuralNetwork, SgdOptimizer from python.layers.sgx_linear_base import SGXLinearBase from python.layers.sgx_conv_base import SGXConvBase from python.utils.basic_utils import ExecutionModeOptions from python.utils.logger_utils import Logger from python.quantize_net import NetQ from python.test_sgx_net import argparser_distributed, marshal_process, load_cifar10, seed_torch from python.utils.timer_utils import NamedTimerInstance, VerboseLevel, NamedTimer from python.utils.torch_utils import compare_expected_actual from pdb import set_trace as st
21,043
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name)
device_cuda = torch.device("cuda:0") torch.set_printoptions(precision=10) def compare_layer_member(layer: SGXLinearBase, layer_name: str, extract_func , member_name: str, save_path=None) -> None: print(member_name) layer.make_sure_cpu_is_latest(member_name) compare_expected_actual(extract_func(layer_name), layer.get_cpu(member_name), get_relative=True, verbose=True) if save_path is not None: if not os.path.exists(save_path): os.makedirs(save_path) print("Directory ", save_path, " Created ") else: print("Directory ", save_path, " already exists") torch.save(extract_func(layer_name), os.path.join(save_path, member_name + "_expected")) torch.save(layer.get_cpu(member_name), os.path.join(save_path, member_name + "_actual")) def compare_layer(layer: SGXLinearBase, layer_name: str, save_path=None) -> None: print("comparing with layer in expected NN :", layer_name)
compare_name_function = [("input", get_layer_input), ("output", get_layer_output),
5
2023-11-01 10:37:37+00:00
24k
Codra-Ingenierie-Informatique/DataLab
cdl/tests/scenarios/common.py
[ { "identifier": "_", "path": "cdl/config.py", "snippet": "CONF_VERSION = \"1.0.0\"\nAPP_NAME = \"DataLab\"\nMOD_NAME = \"cdl\"\nAPP_DESC = _(\"\"\"DataLab is a generic signal and image processing platform\"\"\")\nAPP_PATH = osp.dirname(__file__)\nDEBUG = os.environ.get(\"DEBUG\", \"\").lower() in (\"1\", \"true\")\nTEST_SEGFAULT_ERROR = len(os.environ.get(\"TEST_SEGFAULT_ERROR\", \"\")) > 0\nDATETIME_FORMAT = \"%d/%m/%Y - %H:%M:%S\"\nDATAPATH = configtools.get_module_data_path(MOD_NAME, \"data\")\nSHOTPATH = osp.join(\n configtools.get_module_data_path(MOD_NAME), os.pardir, \"doc\", \"images\", \"shots\"\n)\nOTHER_PLUGINS_PATHLIST = [configtools.get_module_data_path(MOD_NAME, \"plugins\")]\nIS_FROZEN = is_frozen(MOD_NAME)\nPLOTPY_DEFAULTS = {\n \"plot\": {\n # \"antialiasing\": False,\n # \"title/font/size\": 12,\n # \"title/font/bold\": False,\n # \"marker/curve/text/font/size\": 8,\n # \"marker/curve/text/font/family\": \"default\",\n # \"marker/curve/text/font/bold\": False,\n # \"marker/curve/text/font/italic\": False,\n \"marker/curve/text/textcolor\": \"black\",\n # \"marker/curve/text/background_color\": \"#ffffff\",\n # \"marker/curve/text/background_alpha\": 0.8,\n # \"marker/cross/text/font/family\": \"default\",\n # \"marker/cross/text/font/size\": 8,\n # \"marker/cross/text/font/bold\": False,\n # \"marker/cross/text/font/italic\": False,\n \"marker/cross/text/textcolor\": \"black\",\n # \"marker/cross/text/background_color\": \"#ffffff\",\n \"marker/cross/text/background_alpha\": 0.7,\n # \"marker/cross/line/style\": \"DashLine\",\n # \"marker/cross/line/color\": \"yellow\",\n # \"marker/cross/line/width\": 1,\n # \"marker/cursor/text/font/size\": 8,\n # \"marker/cursor/text/font/family\": \"default\",\n # \"marker/cursor/text/font/bold\": False,\n # \"marker/cursor/text/font/italic\": False,\n # \"marker/cursor/text/textcolor\": \"#ff9393\",\n # \"marker/cursor/text/background_color\": \"#ffffff\",\n # \"marker/cursor/text/background_alpha\": 0.8,\n \"shape/drag/symbol/marker\": \"NoSymbol\",\n \"shape/mask/symbol/size\": 5,\n \"shape/mask/sel_symbol/size\": 8,\n # -----------------------------------------------------------------------------\n # Annotated shape style for annotations:\n \"shape/annotation/line/style\": \"SolidLine\",\n \"shape/annotation/line/color\": \"#ffff00\",\n \"shape/annotation/line/width\": 1,\n \"shape/annotation/fill/style\": \"SolidPattern\",\n \"shape/annotation/fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/fill/alpha\": 0.1,\n \"shape/annotation/symbol/marker\": \"Rect\",\n \"shape/annotation/symbol/size\": 3,\n \"shape/annotation/symbol/edgecolor\": \"#ffff00\",\n \"shape/annotation/symbol/facecolor\": \"#ffff00\",\n \"shape/annotation/symbol/alpha\": 1.0,\n \"shape/annotation/sel_line/style\": \"SolidLine\",\n \"shape/annotation/sel_line/color\": \"#00ff00\",\n \"shape/annotation/sel_line/width\": 1,\n \"shape/annotation/sel_fill/style\": \"SolidPattern\",\n \"shape/annotation/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/annotation/sel_fill/alpha\": 0.1,\n \"shape/annotation/sel_symbol/marker\": \"Rect\",\n \"shape/annotation/sel_symbol/size\": 9,\n \"shape/annotation/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/annotation/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/annotation/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / signals:\n \"shape/result/s/line/style\": \"SolidLine\",\n \"shape/result/s/line/color\": MAIN_FG_COLOR,\n \"shape/result/s/line/width\": 1,\n \"shape/result/s/fill/style\": \"SolidPattern\",\n \"shape/result/s/fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/fill/alpha\": 0.1,\n \"shape/result/s/symbol/marker\": \"XCross\",\n \"shape/result/s/symbol/size\": 7,\n \"shape/result/s/symbol/edgecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/facecolor\": MAIN_FG_COLOR,\n \"shape/result/s/symbol/alpha\": 1.0,\n \"shape/result/s/sel_line/style\": \"SolidLine\",\n \"shape/result/s/sel_line/color\": \"#00ff00\",\n \"shape/result/s/sel_line/width\": 1,\n \"shape/result/s/sel_fill/style\": \"SolidPattern\",\n \"shape/result/s/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/s/sel_fill/alpha\": 0.1,\n \"shape/result/s/sel_symbol/marker\": \"Rect\",\n \"shape/result/s/sel_symbol/size\": 9,\n \"shape/result/s/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/s/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/s/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n # Annotated shape style for result shapes / images:\n \"shape/result/i/line/style\": \"SolidLine\",\n \"shape/result/i/line/color\": \"#ffff00\",\n \"shape/result/i/line/width\": 1,\n \"shape/result/i/fill/style\": \"SolidPattern\",\n \"shape/result/i/fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/fill/alpha\": 0.1,\n \"shape/result/i/symbol/marker\": \"Rect\",\n \"shape/result/i/symbol/size\": 3,\n \"shape/result/i/symbol/edgecolor\": \"#ffff00\",\n \"shape/result/i/symbol/facecolor\": \"#ffff00\",\n \"shape/result/i/symbol/alpha\": 1.0,\n \"shape/result/i/sel_line/style\": \"SolidLine\",\n \"shape/result/i/sel_line/color\": \"#00ff00\",\n \"shape/result/i/sel_line/width\": 1,\n \"shape/result/i/sel_fill/style\": \"SolidPattern\",\n \"shape/result/i/sel_fill/color\": MAIN_BG_COLOR,\n \"shape/result/i/sel_fill/alpha\": 0.1,\n \"shape/result/i/sel_symbol/marker\": \"Rect\",\n \"shape/result/i/sel_symbol/size\": 9,\n \"shape/result/i/sel_symbol/edgecolor\": \"#00aa00\",\n \"shape/result/i/sel_symbol/facecolor\": \"#00ff00\",\n \"shape/result/i/sel_symbol/alpha\": 0.7,\n # -----------------------------------------------------------------------------\n },\n}\ndef is_frozen(module_name: str) -> bool:\ndef get_mod_source_dir() -> str | None:\n def get_def_dict(cls, category: str) -> dict:\n def set_def_dict(cls, category: str, def_dict: dict) -> None:\ndef get_old_log_fname(fname):\ndef initialize():\ndef reset():\nclass MainSection(conf.Section, metaclass=conf.SectionMeta):\nclass ConsoleSection(conf.Section, metaclass=conf.SectionMeta):\nclass IOSection(conf.Section, metaclass=conf.SectionMeta):\nclass ProcSection(conf.Section, metaclass=conf.SectionMeta):\nclass ViewSection(conf.Section, metaclass=conf.SectionMeta):\nclass Conf(conf.Configuration, metaclass=conf.ConfMeta):" }, { "identifier": "CDLMainWindow", "path": "cdl/core/gui/main.py", "snippet": "class CDLMainWindow(QW.QMainWindow, AbstractCDLControl, metaclass=CDLMainWindowMeta):\n \"\"\"DataLab main window\n\n Args:\n console: enable internal console\n hide_on_close: True to hide window on close\n \"\"\"\n\n __instance = None\n\n SIG_READY = QC.Signal()\n SIG_SEND_OBJECT = QC.Signal(object)\n SIG_SEND_OBJECTLIST = QC.Signal(object)\n SIG_CLOSING = QC.Signal()\n\n @staticmethod\n def get_instance(console=None, hide_on_close=False):\n \"\"\"Return singleton instance\"\"\"\n if CDLMainWindow.__instance is None:\n return CDLMainWindow(console, hide_on_close)\n return CDLMainWindow.__instance\n\n def __init__(self, console=None, hide_on_close=False):\n \"\"\"Initialize main window\"\"\"\n CDLMainWindow.__instance = self\n super().__init__()\n win32_fix_title_bar_background(self)\n self.setObjectName(APP_NAME)\n self.setWindowIcon(get_icon(\"DataLab.svg\"))\n\n execenv.log(self, \"Starting initialization\")\n\n self.__restore_pos_and_size()\n\n self.ready_flag = True\n\n self.hide_on_close = hide_on_close\n self.__old_size = None\n self.__memory_warning = False\n self.memorystatus = None\n\n self.console = None\n self.macropanel: MacroPanel = None\n\n self.signal_toolbar: QW.QToolBar = None\n self.image_toolbar: QW.QToolBar = None\n self.signalpanel: SignalPanel = None\n self.imagepanel: ImagePanel = None\n self.tabwidget: QW.QTabWidget = None\n self.docks: dict[AbstractPanel, QW.QDockWidget] = None\n self.h5inputoutput = H5InputOutput(self)\n\n self.openh5_action: QW.QAction = None\n self.saveh5_action: QW.QAction = None\n self.browseh5_action: QW.QAction = None\n self.settings_action: QW.QAction = None\n self.quit_action: QW.QAction = None\n self.auto_refresh_action: QW.QAction = None\n self.showlabel_action: QW.QAction = None\n\n self.file_menu: QW.QMenu = None\n self.edit_menu: QW.QMenu = None\n self.operation_menu: QW.QMenu = None\n self.processing_menu: QW.QMenu = None\n self.computing_menu: QW.QMenu = None\n self.plugins_menu: QW.QMenu = None\n self.view_menu: QW.QMenu = None\n self.help_menu: QW.QMenu = None\n\n self.__is_modified = None\n self.set_modified(False)\n\n # Starting XML-RPC server thread\n self.remote_server = RemoteServer(self)\n if Conf.main.rpc_server_enabled.get():\n self.remote_server.SIG_SERVER_PORT.connect(self.xmlrpc_server_started)\n self.remote_server.start()\n\n # Setup actions and menus\n if console is None:\n console = Conf.console.console_enabled.get()\n self.setup(console)\n\n execenv.log(self, \"Initialization done\")\n\n # ------API related to XML-RPC remote control\n @staticmethod\n def xmlrpc_server_started(port):\n \"\"\"XML-RPC server has started, writing comm port in configuration file\"\"\"\n Conf.main.rpc_server_port.set(port)\n\n def __get_current_basedatapanel(self) -> BaseDataPanel:\n \"\"\"Return the current BaseDataPanel,\n or the signal panel if macro panel is active\n\n Returns:\n BaseDataPanel: current panel\n \"\"\"\n panel = self.tabwidget.currentWidget()\n if not isinstance(panel, base.BaseDataPanel):\n panel = self.signalpanel\n return panel\n\n def __get_specific_panel(self, panel: str | None) -> BaseDataPanel:\n \"\"\"Return a specific BaseDataPanel.\n\n Args:\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used.\n\n Returns:\n BaseDataPanel: panel\n\n Raises:\n ValueError: if panel is unknown\n \"\"\"\n if not panel:\n return self.__get_current_basedatapanel()\n if panel == \"signal\":\n return self.signalpanel\n if panel == \"image\":\n return self.imagepanel\n raise ValueError(f\"Unknown panel: {panel}\")\n\n @remote_controlled\n def get_group_titles_with_object_infos(\n self,\n ) -> tuple[list[str], list[list[str]], list[list[str]]]:\n \"\"\"Return groups titles and lists of inner objects uuids and titles.\n\n Returns:\n Tuple: groups titles, lists of inner objects uuids and titles\n \"\"\"\n panel = self.__get_current_basedatapanel()\n return panel.objmodel.get_group_titles_with_object_infos()\n\n @remote_controlled\n def get_object_titles(self, panel: str | None = None) -> list[str]:\n \"\"\"Get object (signal/image) list for current panel.\n Objects are sorted by group number and object index in group.\n\n Args:\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used.\n\n Returns:\n list[str]: list of object titles\n\n Raises:\n ValueError: if panel is unknown\n \"\"\"\n return self.__get_specific_panel(panel).objmodel.get_object_titles()\n\n @remote_controlled\n def get_object(\n self,\n nb_id_title: int | str | None = None,\n panel: str | None = None,\n ) -> SignalObj | ImageObj:\n \"\"\"Get object (signal/image) from index.\n\n Args:\n nb_id_title: Object number, or object id, or object title.\n Defaults to None (current object).\n panel: Panel name. Defaults to None (current panel).\n\n Returns:\n Object\n\n Raises:\n KeyError: if object not found\n TypeError: if index_id_title type is invalid\n \"\"\"\n panelw = self.__get_specific_panel(panel)\n if nb_id_title is None:\n return panelw.objview.get_current_object()\n if isinstance(nb_id_title, int):\n return panelw.objmodel.get_object_from_number(nb_id_title)\n if isinstance(nb_id_title, str):\n try:\n return panelw.objmodel[nb_id_title]\n except KeyError:\n try:\n return panelw.objmodel.get_object_from_title(nb_id_title)\n except KeyError as exc:\n raise KeyError(\n f\"Invalid object index, id or title: {nb_id_title}\"\n ) from exc\n raise TypeError(f\"Invalid index_id_title type: {type(nb_id_title)}\")\n\n @remote_controlled\n def get_object_uuids(self, panel: str | None = None) -> list[str]:\n \"\"\"Get object (signal/image) uuid list for current panel.\n Objects are sorted by group number and object index in group.\n\n Args:\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used.\n\n Returns:\n list[str]: list of object uuids\n\n Raises:\n ValueError: if panel is unknown\n \"\"\"\n return self.__get_specific_panel(panel).objmodel.get_object_ids()\n\n @remote_controlled\n def get_sel_object_uuids(self, include_groups: bool = False) -> list[str]:\n \"\"\"Return selected objects uuids.\n\n Args:\n include_groups: If True, also return objects from selected groups.\n\n Returns:\n List of selected objects uuids.\n \"\"\"\n panel = self.__get_current_basedatapanel()\n return panel.objview.get_sel_object_uuids(include_groups)\n\n @remote_controlled\n def select_objects(\n self,\n selection: list[int | str],\n panel: str | None = None,\n ) -> None:\n \"\"\"Select objects in current panel.\n\n Args:\n selection: List of object numbers (1 to N) or uuids to select\n panel: panel name (valid values: \"signal\", \"image\").\n If None, current panel is used. Defaults to None.\n \"\"\"\n panel = self.__get_specific_panel(panel)\n panel.objview.select_objects(selection)\n\n @remote_controlled\n def select_groups(\n self, selection: list[int | str] | None = None, panel: str | None = None\n ) -> None:\n \"\"\"Select groups in current panel.\n\n Args:\n selection: List of group numbers (1 to N), or list of group uuids,\n or None to select all groups. Defaults to None.\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used. Defaults to None.\n \"\"\"\n panel = self.__get_specific_panel(panel)\n panel.objview.select_groups(selection)\n\n @remote_controlled\n def delete_metadata(self, refresh_plot: bool = True) -> None:\n \"\"\"Delete metadata of selected objects\n\n Args:\n refresh_plot (bool | None): Refresh plot. Defaults to True.\n \"\"\"\n panel = self.__get_current_basedatapanel()\n panel.delete_metadata(refresh_plot)\n\n @remote_controlled\n def get_object_shapes(\n self,\n nb_id_title: int | str | None = None,\n panel: str | None = None,\n ) -> list:\n \"\"\"Get plot item shapes associated to object (signal/image).\n\n Args:\n nb_id_title: Object number, or object id, or object title.\n Defaults to None (current object).\n panel: Panel name. Defaults to None (current panel).\n\n Returns:\n List of plot item shapes\n \"\"\"\n obj = self.get_object(nb_id_title, panel)\n return list(obj.iterate_shape_items(editable=False))\n\n @remote_controlled\n def add_annotations_from_items(\n self, items: list, refresh_plot: bool = True, panel: str | None = None\n ) -> None:\n \"\"\"Add object annotations (annotation plot items).\n\n Args:\n items (list): annotation plot items\n refresh_plot (bool | None): refresh plot. Defaults to True.\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used.\n \"\"\"\n panel = self.__get_specific_panel(panel)\n panel.add_annotations_from_items(items, refresh_plot)\n\n @remote_controlled\n def add_label_with_title(\n self, title: str | None = None, panel: str | None = None\n ) -> None:\n \"\"\"Add a label with object title on the associated plot\n\n Args:\n title (str | None): Label title. Defaults to None.\n If None, the title is the object title.\n panel (str | None): panel name (valid values: \"signal\", \"image\").\n If None, current panel is used.\n \"\"\"\n self.__get_specific_panel(panel).add_label_with_title(title)\n\n # ------Misc.\n @property\n def panels(self) -> tuple[AbstractPanel, ...]:\n \"\"\"Return the tuple of implemented panels (signal, image)\n\n Returns:\n tuple[SignalPanel, ImagePanel, MacroPanel]: tuple of panels\n \"\"\"\n return (self.signalpanel, self.imagepanel, self.macropanel)\n\n def __set_low_memory_state(self, state: bool) -> None:\n \"\"\"Set memory warning state\"\"\"\n self.__memory_warning = state\n\n def confirm_memory_state(self) -> bool: # pragma: no cover\n \"\"\"Check memory warning state and eventually show a warning dialog\n\n Returns:\n bool: True if memory state is ok\n \"\"\"\n if not env.execenv.unattended and self.__memory_warning:\n threshold = Conf.main.available_memory_threshold.get()\n answer = QW.QMessageBox.critical(\n self,\n _(\"Warning\"),\n _(\"Available memory is below %d MB.<br><br>Do you want to continue?\")\n % threshold,\n QW.QMessageBox.Yes | QW.QMessageBox.No,\n )\n return answer == QW.QMessageBox.Yes\n return True\n\n def check_stable_release(self) -> None: # pragma: no cover\n \"\"\"Check if this is a stable release\"\"\"\n if __version__.replace(\".\", \"\").isdigit():\n # This is a stable release\n return\n if \"b\" in __version__:\n # This is a beta release\n rel = _(\n \"This software is in the <b>beta stage</b> of its release cycle. \"\n \"The focus of beta testing is providing a feature complete \"\n \"software for users interested in trying new features before \"\n \"the final release. However, <u>beta software may not behave as \"\n \"expected and will probably have more bugs or performance issues \"\n \"than completed software</u>.\"\n )\n else:\n # This is an alpha release\n rel = _(\n \"This software is in the <b>alpha stage</b> of its release cycle. \"\n \"The focus of alpha testing is providing an incomplete software \"\n \"for early testing of specific features by users. \"\n \"Please note that <u>alpha software was not thoroughly tested</u> \"\n \"by the developer before it is released.\"\n )\n txtlist = [\n f\"<b>{APP_NAME}</b> v{__version__}:\",\n \"\",\n _(\"<i>This is not a stable release.</i>\"),\n \"\",\n rel,\n ]\n QW.QMessageBox.warning(self, APP_NAME, \"<br>\".join(txtlist), QW.QMessageBox.Ok)\n\n def __check_dependencies(self) -> None: # pragma: no cover\n \"\"\"Check dependencies\"\"\"\n if IS_FROZEN or execenv.unattended:\n # No need to check dependencies if DataLab has been frozen, or if\n # the user has chosen to ignore this check, or if we are in unattended mode\n # (i.e. running automated tests)\n\n if IS_FROZEN:\n QW.QMessageBox.information(\n self,\n _(\"Information\"),\n _(\n \"The dependency check feature is not relevant for the \"\n \"standalone version of DataLab.\"\n ),\n QW.QMessageBox.Ok,\n )\n return\n try:\n state = dephash.check_dependencies_hash(DATAPATH)\n bad_deps = [name for name in state if not state[name]]\n if not bad_deps:\n # Everything is OK\n QW.QMessageBox.information(\n self,\n _(\"Information\"),\n _(\n \"All critical dependencies of DataLab have been qualified \"\n \"on this operating system.\"\n ),\n QW.QMessageBox.Ok,\n )\n return\n except IOError:\n bad_deps = None\n txt0 = _(\"Non-compliant dependency:\")\n if bad_deps is None or len(bad_deps) > 1:\n txt0 = _(\"Non-compliant dependencies:\")\n if bad_deps is None:\n txtlist = [\n _(\"DataLab has not yet been qualified on your operating system.\"),\n ]\n else:\n txtlist = [\n \"<u>\" + txt0 + \"</u> \" + \", \".join(bad_deps),\n \"\",\n _(\n \"At least one dependency does not comply with DataLab \"\n \"qualification standard reference (wrong dependency version \"\n \"has been installed, or dependency source code has been \"\n \"modified, or the application has not yet been qualified \"\n \"on your operating system).\"\n ),\n ]\n txtlist += [\n \"\",\n _(\n \"This means that the application has not been officially qualified \"\n \"in this context and may not behave as expected.\"\n ),\n ]\n txt = \"<br>\".join(txtlist)\n QW.QMessageBox.warning(self, APP_NAME, txt, QW.QMessageBox.Ok)\n\n def check_for_previous_crash(self) -> None: # pragma: no cover\n \"\"\"Check for previous crash\"\"\"\n if execenv.unattended:\n self.__show_logviewer()\n elif Conf.main.faulthandler_log_available.get(\n False\n ) or Conf.main.traceback_log_available.get(False):\n txt = \"<br>\".join(\n [\n logviewer.get_log_prompt_message(),\n \"\",\n _(\"Do you want to see available log files?\"),\n ]\n )\n btns = QW.QMessageBox.StandardButton.Yes | QW.QMessageBox.StandardButton.No\n choice = QW.QMessageBox.warning(self, APP_NAME, txt, btns)\n if choice == QW.QMessageBox.StandardButton.Yes:\n self.__show_logviewer()\n\n def take_screenshot(self, name: str) -> None: # pragma: no cover\n \"\"\"Take main window screenshot\"\"\"\n self.memorystatus.set_demo_mode(True)\n qth.grab_save_window(self, f\"{name}\")\n self.memorystatus.set_demo_mode(False)\n\n def take_menu_screenshots(self) -> None: # pragma: no cover\n \"\"\"Take menu screenshots\"\"\"\n for panel in self.panels:\n if isinstance(panel, base.BaseDataPanel):\n self.tabwidget.setCurrentWidget(panel)\n for name in (\n \"file\",\n \"edit\",\n \"view\",\n \"operation\",\n \"processing\",\n \"computing\",\n \"help\",\n ):\n menu = getattr(self, f\"{name}_menu\")\n menu.popup(self.pos())\n qth.grab_save_window(menu, f\"{panel.objectName()}_{name}\")\n menu.close()\n\n # ------GUI setup\n def __restore_pos_and_size(self) -> None:\n \"\"\"Restore main window position and size from configuration\"\"\"\n pos = Conf.main.window_position.get(None)\n if pos is not None:\n posx, posy = pos\n self.move(QC.QPoint(posx, posy))\n size = Conf.main.window_size.get(None)\n if size is not None:\n width, height = size\n self.resize(QC.QSize(width, height))\n if pos is not None and size is not None:\n sgeo = self.screen().availableGeometry()\n out_inf = posx < -int(0.9 * width) or posy < -int(0.9 * height)\n out_sup = posx > int(0.9 * sgeo.width()) or posy > int(0.9 * sgeo.height())\n if len(QW.QApplication.screens()) == 1 and (out_inf or out_sup):\n # Main window is offscreen\n posx = min(max(posx, 0), sgeo.width() - width)\n posy = min(max(posy, 0), sgeo.height() - height)\n self.move(QC.QPoint(posx, posy))\n\n def __save_pos_and_size(self) -> None:\n \"\"\"Save main window position and size to configuration\"\"\"\n is_maximized = self.windowState() == QC.Qt.WindowMaximized\n Conf.main.window_maximized.set(is_maximized)\n if not is_maximized:\n size = self.size()\n Conf.main.window_size.set((size.width(), size.height()))\n pos = self.pos()\n Conf.main.window_position.set((pos.x(), pos.y()))\n\n def setup(self, console: bool = False) -> None:\n \"\"\"Setup main window\n\n Args:\n console: True to setup console\n \"\"\"\n self.__register_plugins()\n self.__configure_statusbar()\n self.__setup_global_actions()\n self.__add_signal_image_panels()\n self.__create_plugins_actions()\n self.__setup_central_widget()\n self.__add_menus()\n if console:\n self.__setup_console()\n self.__update_actions()\n self.__add_macro_panel()\n self.__configure_panels()\n\n def __register_plugins(self) -> None:\n \"\"\"Register plugins\"\"\"\n with qth.try_or_log_error(\"Discovering plugins\"):\n # Discovering plugins\n plugin_nb = len(discover_plugins())\n execenv.log(self, f\"{plugin_nb} plugin(s) found\")\n for plugin_class in PluginRegistry.get_plugin_classes():\n with qth.try_or_log_error(f\"Instantiating plugin {plugin_class.__name__}\"):\n # Instantiating plugin\n plugin: PluginBase = plugin_class()\n with qth.try_or_log_error(f\"Registering plugin {plugin.info.name}\"):\n # Registering plugin\n plugin.register(self)\n\n def __create_plugins_actions(self) -> None:\n \"\"\"Create plugins actions\"\"\"\n with self.signalpanel.acthandler.new_category(ActionCategory.PLUGINS):\n with self.imagepanel.acthandler.new_category(ActionCategory.PLUGINS):\n for plugin in PluginRegistry.get_plugins():\n with qth.try_or_log_error(f\"Create actions for {plugin.info.name}\"):\n plugin.create_actions()\n\n @staticmethod\n def __unregister_plugins() -> None:\n \"\"\"Unregister plugins\"\"\"\n while PluginRegistry.get_plugins():\n # Unregistering plugin\n plugin = PluginRegistry.get_plugins()[-1]\n with qth.try_or_log_error(f\"Unregistering plugin {plugin.info.name}\"):\n plugin.unregister()\n\n def __configure_statusbar(self) -> None:\n \"\"\"Configure status bar\"\"\"\n self.statusBar().showMessage(_(\"Welcome to %s!\") % APP_NAME, 5000)\n # Plugin status\n pluginstatus = status.PluginStatus()\n self.statusBar().addPermanentWidget(pluginstatus)\n # XML-RPC server status\n xmlrpcstatus = status.XMLRPCStatus()\n xmlrpcstatus.set_port(self.remote_server.port)\n self.statusBar().addPermanentWidget(xmlrpcstatus)\n # Memory status\n threshold = Conf.main.available_memory_threshold.get()\n self.memorystatus = status.MemoryStatus(threshold)\n self.memorystatus.SIG_MEMORY_ALARM.connect(self.__set_low_memory_state)\n self.statusBar().addPermanentWidget(self.memorystatus)\n\n def __setup_global_actions(self) -> None:\n \"\"\"Setup global actions\"\"\"\n self.openh5_action = create_action(\n self,\n _(\"Open HDF5 files...\"),\n icon=get_icon(\"fileopen_h5.svg\"),\n tip=_(\"Open one or several HDF5 files\"),\n triggered=lambda checked=False: self.open_h5_files(import_all=True),\n )\n self.saveh5_action = create_action(\n self,\n _(\"Save to HDF5 file...\"),\n icon=get_icon(\"filesave_h5.svg\"),\n tip=_(\"Save to HDF5 file\"),\n triggered=self.save_to_h5_file,\n )\n self.browseh5_action = create_action(\n self,\n _(\"Browse HDF5 file...\"),\n icon=get_icon(\"h5browser.svg\"),\n tip=_(\"Browse an HDF5 file\"),\n triggered=lambda checked=False: self.open_h5_files(import_all=None),\n )\n self.settings_action = create_action(\n self,\n _(\"Settings...\"),\n icon=get_icon(\"libre-gui-settings.svg\"),\n tip=_(\"Open settings dialog\"),\n triggered=self.__edit_settings,\n )\n main_toolbar = self.addToolBar(_(\"Main Toolbar\"))\n add_actions(\n main_toolbar,\n [\n self.openh5_action,\n self.saveh5_action,\n self.browseh5_action,\n None,\n self.settings_action,\n ],\n )\n # Quit action for \"File menu\" (added when populating menu on demand)\n if self.hide_on_close:\n quit_text = _(\"Hide window\")\n quit_tip = _(\"Hide DataLab window\")\n else:\n quit_text = _(\"Quit\")\n quit_tip = _(\"Quit application\")\n if sys.platform != \"darwin\":\n # On macOS, the \"Quit\" action is automatically added to the application menu\n self.quit_action = create_action(\n self,\n quit_text,\n shortcut=QG.QKeySequence(QG.QKeySequence.Quit),\n icon=get_icon(\"libre-gui-close.svg\"),\n tip=quit_tip,\n triggered=self.close,\n )\n # View menu actions\n self.auto_refresh_action = create_action(\n self,\n _(\"Auto-refresh\"),\n icon=get_icon(\"refresh-auto.svg\"),\n tip=_(\"Auto-refresh plot when object is modified, added or removed\"),\n toggled=self.toggle_auto_refresh,\n )\n self.showlabel_action = create_action(\n self,\n _(\"Show graphical object titles\"),\n icon=get_icon(\"show_titles.svg\"),\n tip=_(\"Show or hide ROI and other graphical object titles or subtitles\"),\n toggled=self.toggle_show_titles,\n )\n\n def __add_signal_panel(self) -> None:\n \"\"\"Setup signal toolbar, widgets and panel\"\"\"\n self.signal_toolbar = self.addToolBar(_(\"Signal Processing Toolbar\"))\n curvewidget = DockablePlotWidget(self, PlotType.CURVE)\n curveplot = curvewidget.get_plot()\n curveplot.add_item(make.legend(\"TR\"))\n self.signalpanel = signal.SignalPanel(\n self, curvewidget.plotwidget, self.signal_toolbar\n )\n self.signalpanel.SIG_STATUS_MESSAGE.connect(self.statusBar().showMessage)\n return curvewidget\n\n def __add_image_panel(self) -> None:\n \"\"\"Setup image toolbar, widgets and panel\"\"\"\n self.image_toolbar = self.addToolBar(_(\"Image Processing Toolbar\"))\n imagewidget = DockablePlotWidget(self, PlotType.IMAGE)\n self.imagepanel = image.ImagePanel(\n self, imagewidget.plotwidget, self.image_toolbar\n )\n # -----------------------------------------------------------------------------\n # # Before eventually disabling the \"peritem\" mode by default, wait for the\n # # plotpy bug to be fixed (peritem mode is not compatible with multiple image\n # # items):\n # for cspanel in (\n # self.imagepanel.plotwidget.get_xcs_panel(),\n # self.imagepanel.plotwidget.get_ycs_panel(),\n # ):\n # cspanel.peritem_ac.setChecked(False)\n # -----------------------------------------------------------------------------\n self.imagepanel.SIG_STATUS_MESSAGE.connect(self.statusBar().showMessage)\n return imagewidget\n\n def __add_signal_image_panels(self) -> None:\n \"\"\"Add signal and image panels\"\"\"\n self.tabwidget = QW.QTabWidget()\n cdock = self.__add_dockwidget(self.__add_signal_panel(), title=_(\"Curve panel\"))\n idock = self.__add_dockwidget(self.__add_image_panel(), title=_(\"Image panel\"))\n self.tabifyDockWidget(cdock, idock)\n self.docks = {self.signalpanel: cdock, self.imagepanel: idock}\n self.tabwidget.currentChanged.connect(self.__tab_index_changed)\n self.signalpanel.SIG_OBJECT_ADDED.connect(\n lambda: self.set_current_panel(\"signal\")\n )\n self.imagepanel.SIG_OBJECT_ADDED.connect(\n lambda: self.set_current_panel(\"image\")\n )\n for panel in (self.signalpanel, self.imagepanel):\n panel.setup_panel()\n\n def __setup_central_widget(self) -> None:\n \"\"\"Setup central widget (main panel)\"\"\"\n self.tabwidget.setMaximumWidth(500)\n self.tabwidget.addTab(self.signalpanel, get_icon(\"signal.svg\"), _(\"Signals\"))\n self.tabwidget.addTab(self.imagepanel, get_icon(\"image.svg\"), _(\"Images\"))\n self.setCentralWidget(self.tabwidget)\n\n @staticmethod\n def __get_local_doc_path() -> str | None:\n \"\"\"Return local documentation path, if it exists\"\"\"\n locale = QC.QLocale.system().name()\n for suffix in (\"_\" + locale[:2], \"_en\"):\n path = osp.join(DATAPATH, \"doc\", f\"{APP_NAME}{suffix}.pdf\")\n if osp.isfile(path):\n return path\n return None\n\n def __add_menus(self) -> None:\n \"\"\"Adding menus\"\"\"\n self.file_menu = self.menuBar().addMenu(_(\"File\"))\n configure_menu_about_to_show(self.file_menu, self.__update_file_menu)\n self.edit_menu = self.menuBar().addMenu(_(\"&Edit\"))\n self.operation_menu = self.menuBar().addMenu(_(\"Operations\"))\n self.processing_menu = self.menuBar().addMenu(_(\"Processing\"))\n self.computing_menu = self.menuBar().addMenu(_(\"Computing\"))\n self.plugins_menu = self.menuBar().addMenu(_(\"Plugins\"))\n self.view_menu = self.menuBar().addMenu(_(\"&View\"))\n configure_menu_about_to_show(self.view_menu, self.__update_view_menu)\n self.help_menu = self.menuBar().addMenu(\"?\")\n for menu in (\n self.edit_menu,\n self.operation_menu,\n self.processing_menu,\n self.computing_menu,\n self.plugins_menu,\n ):\n configure_menu_about_to_show(menu, self.__update_generic_menu)\n help_menu_actions = [\n create_action(\n self,\n _(\"Online documentation\"),\n icon=get_icon(\"libre-gui-help.svg\"),\n triggered=lambda: webbrowser.open(__docurl__),\n ),\n ]\n localdocpath = self.__get_local_doc_path()\n if localdocpath is not None:\n help_menu_actions += [\n create_action(\n self,\n _(\"PDF documentation\"),\n icon=get_icon(\"help_pdf.svg\"),\n triggered=lambda: webbrowser.open(localdocpath),\n ),\n ]\n help_menu_actions += [None]\n if TEST_SEGFAULT_ERROR:\n help_menu_actions += [\n create_action(\n self,\n _(\"Test segfault/Python error\"),\n triggered=self.test_segfault_error,\n )\n ]\n help_menu_actions += [\n create_action(\n self,\n _(\"Log files\") + \"...\",\n icon=get_icon(\"logs.svg\"),\n triggered=self.__show_logviewer,\n ),\n create_action(\n self,\n _(\"Installation and configuration\") + \"...\",\n icon=get_icon(\"libre-toolbox.svg\"),\n triggered=lambda: instconfviewer.exec_cdl_installconfig_dialog(self),\n ),\n None,\n create_action(\n self,\n _(\"Project home page\"),\n icon=get_icon(\"libre-gui-globe.svg\"),\n triggered=lambda: webbrowser.open(__homeurl__),\n ),\n create_action(\n self,\n _(\"Bug report or feature request\"),\n icon=get_icon(\"libre-gui-globe.svg\"),\n triggered=lambda: webbrowser.open(__supporturl__),\n ),\n create_action(\n self,\n _(\"Check critical dependencies...\"),\n triggered=self.__check_dependencies,\n ),\n create_action(\n self,\n _(\"About...\"),\n icon=get_icon(\"libre-gui-about.svg\"),\n triggered=self.__about,\n ),\n ]\n add_actions(self.help_menu, help_menu_actions)\n\n def __setup_console(self) -> None:\n \"\"\"Add an internal console\"\"\"\n ns = {\n \"cdl\": self,\n \"np\": np,\n \"sps\": sps,\n \"spi\": spi,\n \"os\": os,\n \"sys\": sys,\n \"osp\": osp,\n \"time\": time,\n }\n msg = (\n \"Welcome to DataLab console!\\n\"\n \"---------------------------\\n\"\n \"You can access the main window with the 'cdl' variable.\\n\"\n \"Example:\\n\"\n \" o = cdl.get_object() # returns currently selected object\\n\"\n \" o = cdl[1] # returns object number 1\\n\"\n \" o = cdl['My image'] # returns object which title is 'My image'\\n\"\n \" o.data # returns object data\\n\"\n \"Modules imported at startup: \"\n \"os, sys, os.path as osp, time, \"\n \"numpy as np, scipy.signal as sps, scipy.ndimage as spi\"\n )\n self.console = DockableConsole(self, namespace=ns, message=msg, debug=DEBUG)\n self.console.setMaximumBlockCount(Conf.console.max_line_count.get(5000))\n self.console.go_to_error.connect(go_to_error)\n console_dock = self.__add_dockwidget(self.console, _(\"Console\"))\n console_dock.hide()\n self.console.interpreter.widget_proxy.sig_new_prompt.connect(\n lambda txt: self.repopulate_panel_trees()\n )\n\n def __add_macro_panel(self) -> None:\n \"\"\"Add macro panel\"\"\"\n self.macropanel = macro.MacroPanel()\n mdock = self.__add_dockwidget(self.macropanel, _(\"Macro manager\"))\n self.docks[self.macropanel] = mdock\n self.tabifyDockWidget(self.docks[self.imagepanel], mdock)\n self.docks[self.signalpanel].raise_()\n\n def __configure_panels(self) -> None:\n \"\"\"Configure panels\"\"\"\n # Connectings signals\n for panel in self.panels:\n panel.SIG_OBJECT_ADDED.connect(self.set_modified)\n panel.SIG_OBJECT_REMOVED.connect(self.set_modified)\n self.macropanel.SIG_OBJECT_MODIFIED.connect(self.set_modified)\n # Initializing common panel actions\n self.auto_refresh_action.setChecked(Conf.view.auto_refresh.get(True))\n self.showlabel_action.setChecked(Conf.view.show_label.get(False))\n # Restoring current tab from last session\n tab_idx = Conf.main.current_tab.get(None)\n if tab_idx is not None:\n self.tabwidget.setCurrentIndex(tab_idx)\n # Set focus on current panel, so that keyboard shortcuts work (Fixes #10)\n self.tabwidget.currentWidget().setFocus()\n\n def set_process_isolation_enabled(self, state: bool) -> None:\n \"\"\"Enable/disable process isolation\n\n Args:\n state (bool): True to enable process isolation\n \"\"\"\n for processor in (self.imagepanel.processor, self.signalpanel.processor):\n processor.set_process_isolation_enabled(state)\n\n # ------Remote control\n @remote_controlled\n def get_current_panel(self) -> str:\n \"\"\"Return current panel name\n\n Returns:\n str: panel name (valid values: \"signal\", \"image\", \"macro\")\n \"\"\"\n panel = self.tabwidget.currentWidget()\n dock = self.docks[panel]\n if panel is self.signalpanel and dock.isVisible():\n return \"signal\"\n if panel is self.imagepanel and dock.isVisible():\n return \"image\"\n return \"macro\"\n\n @remote_controlled\n def set_current_panel(self, panel: str) -> None:\n \"\"\"Switch to panel.\n\n Args:\n panel (str): panel name (valid values: \"signal\", \"image\", \"macro\")\n\n Raises:\n ValueError: unknown panel\n \"\"\"\n if self.get_current_panel() == panel:\n if panel in (\"signal\", \"image\"):\n # Force tab index changed event to be sure that the dock associated\n # to the current panel is raised\n self.__tab_index_changed(self.tabwidget.currentIndex())\n return\n if panel == \"signal\":\n self.tabwidget.setCurrentWidget(self.signalpanel)\n elif panel == \"image\":\n self.tabwidget.setCurrentWidget(self.imagepanel)\n elif panel == \"macro\":\n self.docks[self.macropanel].raise_()\n else:\n raise ValueError(f\"Unknown panel {panel}\")\n\n @remote_controlled\n def calc(self, name: str, param: gds.DataSet | None = None) -> None:\n \"\"\"Call compute function `name` in current panel's processor\n\n Args:\n name (str): function name\n param (guidata.dataset.DataSet): optional parameters\n (default: None)\n\n Raises:\n ValueError: unknown function\n \"\"\"\n panel = self.tabwidget.currentWidget()\n if isinstance(panel, base.BaseDataPanel):\n for funcname in (name, f\"compute_{name}\"):\n func = getattr(panel.processor, funcname, None)\n if func is not None:\n break\n else:\n raise ValueError(f\"Unknown function {funcname}\")\n if param is None:\n func()\n else:\n func(param)\n\n # ------GUI refresh\n def has_objects(self) -> bool:\n \"\"\"Return True if sig/ima panels have any object\"\"\"\n return sum(len(panel) for panel in self.panels) > 0\n\n def set_modified(self, state: bool = True) -> None:\n \"\"\"Set mainwindow modified state\"\"\"\n state = state and self.has_objects()\n self.__is_modified = state\n self.setWindowTitle(APP_NAME + (\"*\" if state else \"\"))\n\n def __add_dockwidget(self, child, title: str) -> QW.QDockWidget:\n \"\"\"Add QDockWidget and toggleViewAction\"\"\"\n dockwidget, location = child.create_dockwidget(title)\n self.addDockWidget(location, dockwidget)\n return dockwidget\n\n def repopulate_panel_trees(self) -> None:\n \"\"\"Repopulate all panel trees\"\"\"\n for panel in self.panels:\n if isinstance(panel, base.BaseDataPanel):\n panel.objview.populate_tree()\n\n def __update_actions(self) -> None:\n \"\"\"Update selection dependent actions\"\"\"\n is_signal = self.tabwidget.currentWidget() is self.signalpanel\n panel = self.signalpanel if is_signal else self.imagepanel\n panel.selection_changed()\n self.signal_toolbar.setVisible(is_signal)\n self.image_toolbar.setVisible(not is_signal)\n if self.plugins_menu is not None:\n plugin_actions = panel.get_category_actions(ActionCategory.PLUGINS)\n self.plugins_menu.setEnabled(len(plugin_actions) > 0)\n\n def __tab_index_changed(self, index: int) -> None:\n \"\"\"Switch from signal to image mode, or vice-versa\"\"\"\n dock = self.docks[self.tabwidget.widget(index)]\n dock.raise_()\n self.__update_actions()\n\n def __update_generic_menu(self, menu: QW.QMenu | None = None) -> None:\n \"\"\"Update menu before showing up -- Generic method\"\"\"\n if menu is None:\n menu = self.sender()\n menu.clear()\n panel = self.tabwidget.currentWidget()\n category = {\n self.file_menu: ActionCategory.FILE,\n self.edit_menu: ActionCategory.EDIT,\n self.view_menu: ActionCategory.VIEW,\n self.operation_menu: ActionCategory.OPERATION,\n self.processing_menu: ActionCategory.PROCESSING,\n self.computing_menu: ActionCategory.COMPUTING,\n self.plugins_menu: ActionCategory.PLUGINS,\n }[menu]\n actions = panel.get_category_actions(category)\n add_actions(menu, actions)\n\n def __update_file_menu(self) -> None:\n \"\"\"Update file menu before showing up\"\"\"\n self.saveh5_action.setEnabled(self.has_objects())\n self.__update_generic_menu(self.file_menu)\n add_actions(\n self.file_menu,\n [\n None,\n self.openh5_action,\n self.saveh5_action,\n self.browseh5_action,\n None,\n self.settings_action,\n ],\n )\n if self.quit_action is not None:\n add_actions(self.file_menu, [None, self.quit_action])\n\n def __update_view_menu(self) -> None:\n \"\"\"Update view menu before showing up\"\"\"\n self.__update_generic_menu(self.view_menu)\n add_actions(self.view_menu, [None] + self.createPopupMenu().actions())\n\n @remote_controlled\n def toggle_show_titles(self, state: bool) -> None:\n \"\"\"Toggle show annotations option\n\n Args:\n state: state\n \"\"\"\n Conf.view.show_label.set(state)\n for datapanel in (self.signalpanel, self.imagepanel):\n for obj in datapanel.objmodel:\n obj.set_metadata_option(\"showlabel\", state)\n datapanel.SIG_REFRESH_PLOT.emit(\"selected\", True)\n\n @remote_controlled\n def toggle_auto_refresh(self, state: bool) -> None:\n \"\"\"Toggle auto refresh option\n\n Args:\n state: state\n \"\"\"\n Conf.view.auto_refresh.set(state)\n for datapanel in (self.signalpanel, self.imagepanel):\n datapanel.plothandler.set_auto_refresh(state)\n\n # ------Common features\n @remote_controlled\n def reset_all(self) -> None:\n \"\"\"Reset all application data\"\"\"\n for panel in self.panels:\n if panel is not None:\n panel.remove_all_objects()\n\n @staticmethod\n def __check_h5file(filename: str, operation: str) -> str:\n \"\"\"Check HDF5 filename\"\"\"\n filename = osp.abspath(osp.normpath(filename))\n bname = osp.basename(filename)\n if operation == \"load\" and not osp.isfile(filename):\n raise IOError(f'File not found \"{bname}\"')\n if not filename.endswith(\".h5\"):\n raise IOError(f'Invalid HDF5 file \"{bname}\"')\n Conf.main.base_dir.set(filename)\n return filename\n\n @remote_controlled\n def save_to_h5_file(self, filename=None) -> None:\n \"\"\"Save to a DataLab HDF5 file\n\n Args:\n filename (str): HDF5 filename. If None, a file dialog is opened.\n\n Raises:\n IOError: if filename is invalid or file cannot be saved.\n \"\"\"\n if filename is None:\n basedir = Conf.main.base_dir.get()\n with qth.save_restore_stds():\n filename, _fl = getsavefilename(self, _(\"Save\"), basedir, \"HDF5 (*.h5)\")\n if not filename:\n return\n with qth.qt_try_loadsave_file(self, filename, \"save\"):\n filename = self.__check_h5file(filename, \"save\")\n self.h5inputoutput.save_file(filename)\n self.set_modified(False)\n\n @remote_controlled\n def open_h5_files(\n self,\n h5files: list[str] | None = None,\n import_all: bool | None = None,\n reset_all: bool | None = None,\n ) -> None:\n \"\"\"Open a DataLab HDF5 file or import from any other HDF5 file.\n\n Args:\n h5files: HDF5 filenames (optionally with dataset name, separated by \":\")\n import_all (bool): Import all datasets from HDF5 files\n reset_all (bool): Reset all application data before importing\n\n Returns:\n None\n \"\"\"\n if not self.confirm_memory_state():\n return\n if reset_all is None:\n reset_all = False\n if self.has_objects():\n answer = QW.QMessageBox.question(\n self,\n _(\"Warning\"),\n _(\n \"Do you want to remove all signals and images \"\n \"before importing data from HDF5 files?\"\n ),\n QW.QMessageBox.Yes | QW.QMessageBox.No,\n )\n if answer == QW.QMessageBox.Yes:\n reset_all = True\n if h5files is None:\n basedir = Conf.main.base_dir.get()\n with qth.save_restore_stds():\n h5files, _fl = getopenfilenames(self, _(\"Open\"), basedir, \"HDF5 (*.h5)\")\n for fname_with_dset in h5files:\n if \",\" in fname_with_dset:\n filename, dsetname = fname_with_dset.split(\",\")\n else:\n filename, dsetname = fname_with_dset, None\n if import_all is None and dsetname is None:\n self.import_h5_file(filename, reset_all)\n else:\n with qth.qt_try_loadsave_file(self, filename, \"load\"):\n filename = self.__check_h5file(filename, \"load\")\n if dsetname is None:\n self.h5inputoutput.open_file(filename, import_all, reset_all)\n else:\n self.h5inputoutput.import_dataset_from_file(filename, dsetname)\n reset_all = False\n\n @remote_controlled\n def import_h5_file(self, filename: str, reset_all: bool | None = None) -> None:\n \"\"\"Import HDF5 file into DataLab\n\n Args:\n filename (str): HDF5 filename (optionally with dataset name,\n separated by \":\")\n reset_all (bool): Delete all DataLab signals/images before importing data\n\n Returns:\n None\n \"\"\"\n with qth.qt_try_loadsave_file(self, filename, \"load\"):\n filename = self.__check_h5file(filename, \"load\")\n self.h5inputoutput.import_file(filename, False, reset_all)\n\n # This method is intentionally *not* remote controlled\n # (see TODO regarding RemoteClient.add_object method)\n # @remote_controlled\n def add_object(self, obj: SignalObj | ImageObj) -> None:\n \"\"\"Add object - signal or image\n\n Args:\n obj (SignalObj or ImageObj): object to add (signal or image)\n \"\"\"\n if self.confirm_memory_state():\n if isinstance(obj, SignalObj):\n self.signalpanel.add_object(obj)\n elif isinstance(obj, ImageObj):\n self.imagepanel.add_object(obj)\n else:\n raise TypeError(f\"Unsupported object type {type(obj)}\")\n\n @remote_controlled\n def open_object(self, filename: str) -> None:\n \"\"\"Open object from file in current panel (signal/image)\n\n Args:\n filename (str): HDF5 filename\n\n Returns:\n None\n \"\"\"\n panel = self.tabwidget.currentWidget()\n panel.open_object(filename)\n\n # ------Other methods related to AbstractCDLControl interface\n def get_version(self) -> str:\n \"\"\"Return DataLab version.\n\n Returns:\n str: DataLab version\n \"\"\"\n return __version__\n\n def close_application(self) -> None: # Implementing AbstractCDLControl interface\n \"\"\"Close DataLab application\"\"\"\n self.close()\n\n def raise_window(self) -> None: # Implementing AbstractCDLControl interface\n \"\"\"Raise DataLab window\"\"\"\n bring_to_front(self)\n\n def add_signal(\n self,\n title: str,\n xdata: np.ndarray,\n ydata: np.ndarray,\n xunit: str | None = None,\n yunit: str | None = None,\n xlabel: str | None = None,\n ylabel: str | None = None,\n ) -> bool: # pylint: disable=too-many-arguments\n \"\"\"Add signal data to DataLab.\n\n Args:\n title (str): Signal title\n xdata (numpy.ndarray): X data\n ydata (numpy.ndarray): Y data\n xunit (str | None): X unit. Defaults to None.\n yunit (str | None): Y unit. Defaults to None.\n xlabel (str | None): X label. Defaults to None.\n ylabel (str | None): Y label. Defaults to None.\n\n Returns:\n bool: True if signal was added successfully, False otherwise\n\n Raises:\n ValueError: Invalid xdata dtype\n ValueError: Invalid ydata dtype\n \"\"\"\n obj = create_signal(\n title,\n xdata,\n ydata,\n units=(xunit, yunit),\n labels=(xlabel, ylabel),\n )\n self.add_object(obj)\n return True\n\n def add_image(\n self,\n title: str,\n data: np.ndarray,\n xunit: str | None = None,\n yunit: str | None = None,\n zunit: str | None = None,\n xlabel: str | None = None,\n ylabel: str | None = None,\n zlabel: str | None = None,\n ) -> bool: # pylint: disable=too-many-arguments\n \"\"\"Add image data to DataLab.\n\n Args:\n title (str): Image title\n data (numpy.ndarray): Image data\n xunit (str | None): X unit. Defaults to None.\n yunit (str | None): Y unit. Defaults to None.\n zunit (str | None): Z unit. Defaults to None.\n xlabel (str | None): X label. Defaults to None.\n ylabel (str | None): Y label. Defaults to None.\n zlabel (str | None): Z label. Defaults to None.\n\n Returns:\n bool: True if image was added successfully, False otherwise\n\n Raises:\n ValueError: Invalid data dtype\n \"\"\"\n obj = create_image(\n title,\n data,\n units=(xunit, yunit, zunit),\n labels=(xlabel, ylabel, zlabel),\n )\n self.add_object(obj)\n return True\n\n # ------?\n def __about(self) -> None: # pragma: no cover\n \"\"\"About dialog box\"\"\"\n self.check_stable_release()\n if self.remote_server.port is None:\n xrpcstate = '<font color=\"red\">' + _(\"not started\") + \"</font>\"\n else:\n xrpcstate = _(\"started (port %s)\") % self.remote_server.port\n xrpcstate = f\"<font color='green'>{xrpcstate}</font>\"\n if Conf.main.process_isolation_enabled.get():\n pistate = \"<font color='green'>\" + _(\"enabled\") + \"</font>\"\n else:\n pistate = \"<font color='red'>\" + _(\"disabled\") + \"</font>\"\n adv_conf = \"<br>\".join(\n [\n \"<i>\" + _(\"Advanced configuration:\") + \"</i>\",\n \"• \" + _(\"XML-RPC server:\") + \" \" + xrpcstate,\n \"• \" + _(\"Process isolation:\") + \" \" + pistate,\n ]\n )\n pinfos = PluginRegistry.get_plugin_infos()\n created_by = _(\"Created by\")\n dev_by = _(\"Developed and maintained by %s open-source project team\") % APP_NAME\n copyrght = \"2023 Codra\"\n QW.QMessageBox.about(\n self,\n _(\"About\") + \" \" + APP_NAME,\n f\"\"\"<b>{APP_NAME}</b> v{__version__}<br>{APP_DESC}\n <p>{created_by} Pierre Raybaut<br>{dev_by}<br>Copyright &copy; {copyrght}\n <p>{adv_conf}<br><br>{pinfos}\"\"\",\n )\n\n def __edit_settings(self) -> None:\n \"\"\"Edit settings\"\"\"\n changed_options = edit_settings(self)\n for option in changed_options:\n if option == \"plot_toolbar_position\":\n for dock in self.docks.values():\n widget = dock.widget()\n if isinstance(widget, DockablePlotWidget):\n widget.update_toolbar_position()\n if option == \"ima_defaults\" and len(self.imagepanel) > 0:\n answer = QW.QMessageBox.question(\n self,\n _(\"Visualization settings\"),\n _(\n \"Default visualization settings have changed.<br><br>\"\n \"Do you want to update all active %s objects?\"\n )\n % _(\"image\"),\n QW.QMessageBox.Yes | QW.QMessageBox.No,\n )\n if answer == QW.QMessageBox.Yes:\n self.imagepanel.update_metadata_view_settings()\n\n def __show_logviewer(self) -> None:\n \"\"\"Show error logs\"\"\"\n logviewer.exec_cdl_logviewer_dialog(self)\n\n @staticmethod\n def test_segfault_error() -> None:\n \"\"\"Generate errors (both fault and traceback)\"\"\"\n import ctypes # pylint: disable=import-outside-toplevel\n\n ctypes.string_at(0)\n raise RuntimeError(\"!!! Testing RuntimeError !!!\")\n\n def show(self) -> None:\n \"\"\"Reimplement QMainWindow method\"\"\"\n super().show()\n if self.__old_size is not None:\n self.resize(self.__old_size)\n\n # ------Close window\n def close_properly(self) -> bool:\n \"\"\"Close properly\n\n Returns:\n bool: True if closed properly, False otherwise\n \"\"\"\n if not env.execenv.unattended and self.__is_modified:\n answer = QW.QMessageBox.warning(\n self,\n _(\"Quit\"),\n _(\n \"Do you want to save all signals and images \"\n \"to an HDF5 file before quitting DataLab?\"\n ),\n QW.QMessageBox.Yes | QW.QMessageBox.No | QW.QMessageBox.Cancel,\n )\n if answer == QW.QMessageBox.Yes:\n self.save_to_h5_file()\n if self.__is_modified:\n return False\n elif answer == QW.QMessageBox.Cancel:\n return False\n for panel in self.panels:\n if panel is not None:\n panel.close()\n if self.console is not None:\n try:\n self.console.close()\n except RuntimeError:\n # TODO: [P3] Investigate further why the following error occurs when\n # restarting the mainwindow (this is *not* a production case):\n # \"RuntimeError: wrapped C/C++ object of type DockableConsole\n # has been deleted\".\n # Another solution to avoid this error would be to really restart\n # the application (run each unit test in a separate process), but\n # it would represent too much effort for an error occuring in test\n # configurations only.\n pass\n self.reset_all()\n self.__save_pos_and_size()\n self.__unregister_plugins()\n\n # Saving current tab for next session\n Conf.main.current_tab.set(self.tabwidget.currentIndex())\n\n execenv.log(self, \"closed properly\")\n return True\n\n def closeEvent(self, event: QG.QCloseEvent) -> None:\n \"\"\"Reimplement QMainWindow method\"\"\"\n if self.hide_on_close:\n self.__old_size = self.size()\n self.hide()\n else:\n if self.close_properly():\n self.SIG_CLOSING.emit()\n event.accept()\n else:\n event.ignore()" }, { "identifier": "ImagePanel", "path": "cdl/core/gui/panel/image.py", "snippet": "class ImagePanel(BaseDataPanel):\n \"\"\"Object handling the item list, the selected item properties and plot,\n specialized for Image objects\"\"\"\n\n PANEL_STR = _(\"Image panel\")\n PARAMCLASS = ImageObj\n DIALOGSIZE = (800, 800)\n ANNOTATION_TOOLS = (\n AnnotatedCircleTool,\n AnnotatedSegmentTool,\n AnnotatedRectangleTool,\n AnnotatedPointTool,\n AnnotatedEllipseTool,\n LabelTool,\n )\n IO_REGISTRY = ImageIORegistry\n H5_PREFIX = \"DataLab_Ima\"\n ROIDIALOGOPTIONS = {\"show_itemlist\": True, \"show_contrast\": False}\n ROIDIALOGCLASS = roieditor.ImageROIEditor\n\n # pylint: disable=duplicate-code\n\n def __init__(self, parent: QW.QWidget, plotwidget: PlotWidget, toolbar) -> None:\n super().__init__(parent, plotwidget, toolbar)\n self.plothandler = ImagePlotHandler(self, plotwidget)\n self.processor = ImageProcessor(self, plotwidget)\n self.acthandler = ImageActionHandler(self, toolbar)\n\n # ------Refreshing GUI--------------------------------------------------------------\n def properties_changed(self) -> None:\n \"\"\"The properties 'Apply' button was clicked: updating signal\"\"\"\n obj = self.objview.get_current_object()\n if obj is not None:\n obj.invalidate_maskdata_cache()\n super().properties_changed()\n\n # ------Creating, adding, removing objects------------------------------------------\n def get_newparam_from_current(\n self, newparam: NewImageParam | None = None\n ) -> NewImageParam | None:\n \"\"\"Get new object parameters from the current object.\n\n Args:\n newparam (guidata.dataset.DataSet): new object parameters.\n If None, create a new one.\n\n Returns:\n New object parameters\n \"\"\"\n curobj: ImageObj = self.objview.get_current_object()\n newparam = new_image_param() if newparam is None else newparam\n if curobj is not None:\n newparam.width, newparam.height = curobj.size\n newparam.dtype = ImageDatatypes.from_dtype(curobj.data.dtype)\n return newparam\n\n def new_object(\n self,\n newparam: NewImageParam | None = None,\n addparam: gds.DataSet | None = None,\n edit: bool = True,\n add_to_panel: bool = True,\n ) -> ImageObj | None:\n \"\"\"Create a new object (image).\n\n Args:\n newparam (Daguidata.dataset.datatypes.DataSettaSet): new object parameters\n addparam (guidata.dataset.DataSet): additional parameters\n edit (bool): Open a dialog box to edit parameters (default: True)\n add_to_panel (bool): Add the object to the panel (default: True)\n\n Returns:\n New object\n \"\"\"\n if not self.mainwindow.confirm_memory_state():\n return None\n newparam = self.get_newparam_from_current(newparam)\n image = create_image_from_param(\n newparam, addparam=addparam, edit=edit, parent=self\n )\n if image is None:\n return None\n if add_to_panel:\n self.add_object(image)\n return image\n\n def delete_metadata(self, refresh_plot: bool = True) -> None:\n \"\"\"Delete metadata of selected objects\n\n Args:\n refresh_plot (bool | None): Refresh plot. Defaults to True.\n \"\"\"\n for obj in self.objview.get_sel_objects(include_groups=True):\n obj.invalidate_maskdata_cache()\n super().delete_metadata(refresh_plot)\n\n def toggle_show_contrast(self, state: bool) -> None:\n \"\"\"Toggle show contrast option\"\"\"\n Conf.view.show_contrast.set(state)\n self.SIG_REFRESH_PLOT.emit(\"selected\", True)" }, { "identifier": "SignalPanel", "path": "cdl/core/gui/panel/signal.py", "snippet": "class SignalPanel(BaseDataPanel):\n \"\"\"Object handling the item list, the selected item properties and plot,\n specialized for Signal objects\"\"\"\n\n PANEL_STR = _(\"Signal panel\")\n PARAMCLASS = SignalObj\n ANNOTATION_TOOLS = (\n LabelTool,\n VCursorTool,\n HCursorTool,\n XCursorTool,\n SegmentTool,\n RectangleTool,\n HRangeTool,\n )\n IO_REGISTRY = SignalIORegistry\n H5_PREFIX = \"DataLab_Sig\"\n ROIDIALOGCLASS = roieditor.SignalROIEditor\n\n # pylint: disable=duplicate-code\n\n def __init__(self, parent: QW.QWidget, plotwidget: PlotWidget, toolbar) -> None:\n super().__init__(parent, plotwidget, toolbar)\n self.plothandler = SignalPlotHandler(self, plotwidget)\n self.processor = SignalProcessor(self, plotwidget)\n self.acthandler = SignalActionHandler(self, toolbar)\n\n # ------Creating, adding, removing objects------------------------------------------\n def get_newparam_from_current(\n self, newparam: NewSignalParam | None = None\n ) -> NewSignalParam | None:\n \"\"\"Get new object parameters from the current object.\n\n Args:\n newparam (guidata.dataset.DataSet): new object parameters.\n If None, create a new one.\n\n Returns:\n New object parameters\n \"\"\"\n curobj: SignalObj = self.objview.get_current_object()\n newparam = new_signal_param() if newparam is None else newparam\n if curobj is not None:\n newparam.size = len(curobj.data)\n newparam.xmin = curobj.x.min()\n newparam.xmax = curobj.x.max()\n return newparam\n\n def new_object(\n self,\n newparam: NewSignalParam | None = None,\n addparam: gds.DataSet | None = None,\n edit: bool = True,\n add_to_panel: bool = True,\n ) -> SignalObj | None:\n \"\"\"Create a new object (signal).\n\n Args:\n newparam (guidata.dataset.DataSet): new object parameters\n addparam (guidata.dataset.DataSet): additional parameters\n edit (bool): Open a dialog box to edit parameters (default: True)\n add_to_panel (bool): Add the new object to the panel (default: True)\n\n Returns:\n New object\n \"\"\"\n if not self.mainwindow.confirm_memory_state():\n return None\n newparam = self.get_newparam_from_current(newparam)\n signal = create_signal_from_param(\n newparam, addparam=addparam, edit=edit, parent=self\n )\n if signal is None:\n return None\n if add_to_panel:\n self.add_object(signal)\n return signal\n\n # ------Plotting--------------------------------------------------------------------\n def toggle_anti_aliasing(self, state: bool) -> None:\n \"\"\"Toggle anti-aliasing on/off\n\n Args:\n state: state of the anti-aliasing\n \"\"\"\n self.plothandler.toggle_anti_aliasing(state)" }, { "identifier": "create_paracetamol_signal", "path": "cdl/tests/data.py", "snippet": "def create_paracetamol_signal(\n size: int | None = None, title: str | None = None\n) -> cdl.obj.SignalObj:\n \"\"\"Create test signal (Paracetamol molecule spectrum)\n\n Args:\n size (int | None): Size of the data. Defaults to None.\n title (str | None): Title of the signal. Defaults to None.\n\n Returns:\n SignalObj: Signal object\n \"\"\"\n obj = cdl.obj.read_signal(get_test_fnames(\"paracetamol.txt\")[0])\n if title is not None:\n obj.title = title\n if size is not None:\n x0, y0 = obj.xydata\n x1 = np.linspace(x0[0], x0[-1], size)\n y1 = np.interp(x1, x0, y0)\n obj.set_xydata(x1, y1)\n return obj" }, { "identifier": "create_peak2d_image", "path": "cdl/tests/data.py", "snippet": "def create_peak2d_image(\n p: cdl.obj.NewImageParam | None = None,\n) -> cdl.obj.ImageObj:\n \"\"\"Creating 2D peak image\n\n Args:\n p (cdl.obj.NewImageParam | None): Image parameters. Defaults to None.\n\n Returns:\n cdl.obj.ImageObj: Image object\n \"\"\"\n p = __set_default_size_dtype(p)\n p.title = \"Test image (2D peaks)\" if p.title is None else p.title\n obj = cdl.obj.create_image_from_param(p)\n param = PeakDataParam()\n if p.height is not None and p.width is not None:\n param.size = max(p.height, p.width)\n obj.data = get_peak2d_data(param)\n return obj" }, { "identifier": "create_sincos_image", "path": "cdl/tests/data.py", "snippet": "def create_sincos_image(\n p: cdl.obj.NewImageParam | None = None,\n) -> cdl.obj.ImageObj:\n \"\"\"Creating test image (sin(x)+cos(y))\n\n Args:\n p (cdl.obj.NewImageParam | None): Image parameters. Defaults to None.\n\n Returns:\n cdl.obj.ImageObj: Image object\n \"\"\"\n p = __set_default_size_dtype(p)\n p.title = \"Test image (sin(x)+cos(y))\" if p.title is None else p.title\n dtype = p.dtype.value\n x, y = np.meshgrid(np.linspace(0, 10, p.width), np.linspace(0, 10, p.height))\n raw_data = 0.5 * (np.sin(x) + np.cos(y)) + 0.5\n dmin = np.iinfo(dtype).min * 0.95\n dmax = np.iinfo(dtype).max * 0.95\n obj = cdl.obj.create_image_from_param(p)\n obj.data = np.array(raw_data * (dmax - dmin) + dmin, dtype=dtype)\n return obj" }, { "identifier": "iterate_image_creation", "path": "cdl/tests/features/common/newobject_unit.py", "snippet": "def iterate_image_creation(\n data_size: int = 500, non_zero: bool = False, verbose: bool = True\n) -> Generator[ImageObj, None, None]:\n \"\"\"Iterate over all possible images created from parameters\"\"\"\n if verbose:\n execenv.print(\n f\" Iterating over image types (size={data_size}, non_zero={non_zero}):\"\n )\n for itype in ImageTypes:\n if non_zero and itype in (ImageTypes.EMPTY, ImageTypes.ZEROS):\n continue\n if verbose:\n execenv.print(f\" {itype.value}\")\n for dtype in ImageDatatypes:\n if verbose:\n execenv.print(f\" {dtype.value}\")\n newparam = new_image_param(\n itype=itype, dtype=dtype, width=data_size, height=data_size\n )\n if itype == ImageTypes.GAUSS:\n addparam = Gauss2DParam()\n addparam.x0 = addparam.y0 = 3\n addparam.sigma = 5\n elif itype == ImageTypes.UNIFORMRANDOM:\n addparam = UniformRandomParam()\n addparam.set_from_datatype(dtype.value)\n elif itype == ImageTypes.NORMALRANDOM:\n addparam = NormalRandomParam()\n addparam.set_from_datatype(dtype.value)\n else:\n addparam = None\n image = create_image_from_param(newparam, addparam=addparam)\n if itype == ImageTypes.ZEROS:\n assert (image.data == 0).all()\n yield image" }, { "identifier": "iterate_signal_creation", "path": "cdl/tests/features/common/newobject_unit.py", "snippet": "def iterate_signal_creation(\n data_size: int = 500, non_zero: bool = False, verbose: bool = True\n) -> Generator[SignalObj, None, None]:\n \"\"\"Iterate over all possible signals created from parameters\"\"\"\n if verbose:\n execenv.print(\n f\" Iterating over signal types (size={data_size}, non_zero={non_zero}):\"\n )\n for stype in SignalTypes:\n if non_zero and stype in (SignalTypes.ZEROS,):\n continue\n if verbose:\n execenv.print(f\" {stype.value}\")\n newparam = new_signal_param(stype=stype, size=data_size)\n if stype == SignalTypes.UNIFORMRANDOM:\n addparam = UniformRandomParam()\n elif stype == SignalTypes.NORMALRANDOM:\n addparam = NormalRandomParam()\n else:\n addparam = None\n signal = create_signal_from_param(newparam, addparam=addparam)\n if stype == SignalTypes.ZEROS:\n assert (signal.y == 0).all()\n yield signal" }, { "identifier": "fitdialog", "path": "cdl/widgets/fitdialog.py", "snippet": "def guifit(\n x,\n y,\n fitfunc,\n fitparams,\n fitargs=None,\n fitkwargs=None,\n wintitle=None,\n title=None,\n xlabel=None,\n ylabel=None,\n param_cols=1,\n auto_fit=True,\n winsize=None,\n winpos=None,\n parent=None,\n name=None,\n):\ndef polynomialfit(x, y, degree, parent=None, name=None):\n def fitfunc(x, params):\ndef gaussianfit(x, y, parent=None, name=None):\n def fitfunc(x, params):\ndef lorentzianfit(x, y, parent=None, name=None):\n def fitfunc(x, params):\ndef voigtfit(x, y, parent=None, name=None):\n def fitfunc(x, params):\ndef multigaussian(x, *values, **kwargs):\ndef multigaussianfit(x, y, peak_indexes, parent=None, name=None):\n def fitfunc(xi, params):" } ]
import numpy as np import cdl.obj as dlo import cdl.param as dlp from cdl.config import _ from cdl.core.gui.main import CDLMainWindow from cdl.core.gui.panel.image import ImagePanel from cdl.core.gui.panel.signal import SignalPanel from cdl.tests.data import ( create_paracetamol_signal, create_peak2d_image, create_sincos_image, ) from cdl.tests.features.common.newobject_unit import ( iterate_image_creation, iterate_signal_creation, ) from cdl.widgets import fitdialog
19,718
panel.add_label_with_title() __compute_11_operations(panel, 2) def run_signal_computations( win: CDLMainWindow, data_size: int = 500, all_types: bool = True ) -> None: """Testing signal features""" panel = win.signalpanel win.set_current_panel("signal") if all_types: for signal in iterate_signal_creation(data_size, non_zero=True): panel.add_object(create_paracetamol_signal(data_size)) panel.add_object(signal) compute_common_operations(panel) panel.remove_all_objects() sig1 = create_paracetamol_signal(data_size) win.add_object(sig1) # Add new signal based on s0 panel.objview.set_current_object(sig1) newparam = dlo.new_signal_param( _("Random function"), stype=dlo.SignalTypes.UNIFORMRANDOM ) addparam = dlo.UniformRandomParam.create(vmin=0, vmax=sig1.y.max() * 0.2) panel.new_object(newparam, addparam=addparam, edit=False) compute_common_operations(panel) win.add_object(create_paracetamol_signal(data_size)) param = dlp.NormalizeYParam() for _name, method in param.methods: param.method = method panel.processor.compute_normalize(param) param = dlp.XYCalibrateParam.create(a=1.2, b=0.1) panel.processor.compute_calibration(param) panel.processor.compute_derivative() panel.processor.compute_integral() param = dlp.PeakDetectionParam() panel.processor.compute_peak_detection(param) panel.processor.compute_multigaussianfit() panel.objview.select_objects([-3]) sig = panel.objview.get_sel_objects()[0] i1 = data_size // 10 i2 = len(sig.y) - i1 panel.processor.compute_roi_extraction(dlp.ROIDataParam.create([[i1, i2]])) param = dlp.PolynomialFitParam() panel.processor.compute_polyfit(param) panel.processor.compute_fit(_("Gaussian fit"), fitdialog.gaussianfit) panel.processor.compute_fit(_("Lorentzian fit"), fitdialog.lorentzianfit) panel.processor.compute_fit(_("Voigt fit"), fitdialog.voigtfit) newparam = dlo.new_signal_param(_("Gaussian"), stype=dlo.SignalTypes.GAUSS) sig = dlo.create_signal_from_param( newparam, dlo.GaussLorentzVoigtParam(), edit=False ) panel.add_object(sig) param = dlp.FWHMParam() for fittype, _name in param.fittypes: param.fittype = fittype panel.processor.compute_fwhm(param) panel.processor.compute_fw1e2() # Create a new signal which X values are a subset of sig1 x = np.linspace(sig1.x.min(), sig1.x.max(), data_size // 2)[: data_size // 4] y = x * 0.0 sig2 = dlo.create_signal("X values for interpolation", x, y) panel.add_object(sig2) # Test interpolation for method_choice_tuple in dlp.InterpolationParam._methods: method = method_choice_tuple[0] for fill_value in (None, 0.0): panel.objview.set_current_object(sig1) param = dlp.InterpolationParam.create(method=method, fill_value=fill_value) panel.processor.compute_interpolation(sig2, param) # Test resampling xmin, xmax = x[0], x[-1] for mode, dx, nbpts in (("dx", 0.1, 10), ("nbpts", 0.0, 100)): panel.objview.set_current_object(sig1) param = dlp.ResamplingParam.create( xmin=xmin, xmax=xmax, mode=mode, dx=dx, nbpts=nbpts ) panel.processor.compute_resampling(param) # Test convolution panel.objview.set_current_object(sig1) panel.processor.compute_derivative() panel.processor.compute_convolution(sig1) # Test detrending panel.objview.set_current_object(sig1) for method_choice_tuple in dlp.DetrendingParam._methods: param = dlp.DetrendingParam.create(method=method_choice_tuple[0]) panel.processor.compute_detrending(param) def run_image_computations( win: CDLMainWindow, data_size: int = 150, all_types: bool = True ) -> None: """Testing signal features""" win.set_current_panel("image") panel = win.imagepanel newparam = dlo.new_image_param(height=data_size, width=data_size) if all_types:
# -*- coding: utf-8 -*- # # Licensed under the terms of the BSD 3-Clause # (see cdl/LICENSE for details) """ Scenarios common functions """ # pylint: disable=invalid-name # Allows short reference names like x, y, ... # guitest: skip from __future__ import annotations def __compute_11_operations(panel: SignalPanel | ImagePanel, number: int) -> None: """Test compute_11 type operations on a signal or image Requires that one signal or image has been added at index.""" assert len(panel) >= number - 1 panel.objview.select_objects((number,)) panel.processor.compute_gaussian_filter(dlp.GaussianParam()) panel.processor.compute_moving_average(dlp.MovingAverageParam()) panel.processor.compute_moving_median(dlp.MovingMedianParam()) panel.processor.compute_wiener() panel.processor.compute_fft() panel.processor.compute_ifft() panel.processor.compute_abs() panel.remove_object() panel.processor.compute_re() panel.remove_object() panel.processor.compute_im() panel.remove_object() panel.processor.compute_astype(dlp.DataTypeIParam.create(dtype="float64")) panel.processor.compute_log10() panel.processor.compute_swap_axes() panel.processor.compute_swap_axes() def compute_common_operations(panel: SignalPanel | ImagePanel) -> None: """Test operations common to signal/image Requires that two (and only two) signals/images are created/added to panel First signal/image is supposed to be always the same (reference) Second signal/image is the tested object """ assert len(panel) == 2 panel.objview.select_objects((2,)) panel.processor.compute_difference(panel[1]) # difference with obj #1 panel.remove_object() panel.objview.select_objects((2,)) panel.processor.compute_quadratic_difference() # quadratic difference with itself panel.delete_metadata() panel.objview.select_objects((3,)) panel.remove_object() panel.objview.select_objects((1, 2)) panel.processor.compute_sum() panel.objview.select_objects((1, 2)) panel.processor.compute_sum() panel.objview.select_objects((1, 2)) panel.processor.compute_product() obj = panel.objmodel.get_groups()[0][-1] param = dlp.ThresholdParam() param.value = (obj.data.max() - obj.data.min()) * 0.2 + obj.data.min() panel.processor.compute_threshold(param) param = dlp.ClipParam() # Clipping before division... param.value = (obj.data.max() - obj.data.min()) * 0.8 + obj.data.min() panel.processor.compute_clip(param) panel.objview.select_objects((3, 7)) panel.processor.compute_division() panel.objview.select_objects((1, 2, 3)) panel.processor.compute_average() panel.add_label_with_title() __compute_11_operations(panel, 2) def run_signal_computations( win: CDLMainWindow, data_size: int = 500, all_types: bool = True ) -> None: """Testing signal features""" panel = win.signalpanel win.set_current_panel("signal") if all_types: for signal in iterate_signal_creation(data_size, non_zero=True): panel.add_object(create_paracetamol_signal(data_size)) panel.add_object(signal) compute_common_operations(panel) panel.remove_all_objects() sig1 = create_paracetamol_signal(data_size) win.add_object(sig1) # Add new signal based on s0 panel.objview.set_current_object(sig1) newparam = dlo.new_signal_param( _("Random function"), stype=dlo.SignalTypes.UNIFORMRANDOM ) addparam = dlo.UniformRandomParam.create(vmin=0, vmax=sig1.y.max() * 0.2) panel.new_object(newparam, addparam=addparam, edit=False) compute_common_operations(panel) win.add_object(create_paracetamol_signal(data_size)) param = dlp.NormalizeYParam() for _name, method in param.methods: param.method = method panel.processor.compute_normalize(param) param = dlp.XYCalibrateParam.create(a=1.2, b=0.1) panel.processor.compute_calibration(param) panel.processor.compute_derivative() panel.processor.compute_integral() param = dlp.PeakDetectionParam() panel.processor.compute_peak_detection(param) panel.processor.compute_multigaussianfit() panel.objview.select_objects([-3]) sig = panel.objview.get_sel_objects()[0] i1 = data_size // 10 i2 = len(sig.y) - i1 panel.processor.compute_roi_extraction(dlp.ROIDataParam.create([[i1, i2]])) param = dlp.PolynomialFitParam() panel.processor.compute_polyfit(param) panel.processor.compute_fit(_("Gaussian fit"), fitdialog.gaussianfit) panel.processor.compute_fit(_("Lorentzian fit"), fitdialog.lorentzianfit) panel.processor.compute_fit(_("Voigt fit"), fitdialog.voigtfit) newparam = dlo.new_signal_param(_("Gaussian"), stype=dlo.SignalTypes.GAUSS) sig = dlo.create_signal_from_param( newparam, dlo.GaussLorentzVoigtParam(), edit=False ) panel.add_object(sig) param = dlp.FWHMParam() for fittype, _name in param.fittypes: param.fittype = fittype panel.processor.compute_fwhm(param) panel.processor.compute_fw1e2() # Create a new signal which X values are a subset of sig1 x = np.linspace(sig1.x.min(), sig1.x.max(), data_size // 2)[: data_size // 4] y = x * 0.0 sig2 = dlo.create_signal("X values for interpolation", x, y) panel.add_object(sig2) # Test interpolation for method_choice_tuple in dlp.InterpolationParam._methods: method = method_choice_tuple[0] for fill_value in (None, 0.0): panel.objview.set_current_object(sig1) param = dlp.InterpolationParam.create(method=method, fill_value=fill_value) panel.processor.compute_interpolation(sig2, param) # Test resampling xmin, xmax = x[0], x[-1] for mode, dx, nbpts in (("dx", 0.1, 10), ("nbpts", 0.0, 100)): panel.objview.set_current_object(sig1) param = dlp.ResamplingParam.create( xmin=xmin, xmax=xmax, mode=mode, dx=dx, nbpts=nbpts ) panel.processor.compute_resampling(param) # Test convolution panel.objview.set_current_object(sig1) panel.processor.compute_derivative() panel.processor.compute_convolution(sig1) # Test detrending panel.objview.set_current_object(sig1) for method_choice_tuple in dlp.DetrendingParam._methods: param = dlp.DetrendingParam.create(method=method_choice_tuple[0]) panel.processor.compute_detrending(param) def run_image_computations( win: CDLMainWindow, data_size: int = 150, all_types: bool = True ) -> None: """Testing signal features""" win.set_current_panel("image") panel = win.imagepanel newparam = dlo.new_image_param(height=data_size, width=data_size) if all_types:
for image in iterate_image_creation(data_size, non_zero=True):
7
2023-11-09 16:56:03+00:00
24k
ingra14m/Tensor4D-DNeRF
exp_runner.py
[ { "identifier": "Dataset", "path": "models/dataset.py", "snippet": "class Dataset:\n def __init__(self, conf):\n super(Dataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.data_dir = conf.get_string('data_dir')\n self.render_cameras_name = conf.get_string('render_cameras_name')\n self.object_cameras_name = conf.get_string('object_cameras_name')\n\n self.camera_outside_sphere = conf.get_bool('camera_outside_sphere', default=True)\n self.scale_mat_scale = conf.get_float('scale_mat_scale', default=1.1)\n self.near = conf.get_float('near', default=-1)\n self.far = conf.get_float('far', default=-1)\n self.n_frames = conf.get_int('n_frames', default=128)\n\n camera_dict = np.load(os.path.join(self.data_dir, self.render_cameras_name))\n self.camera_dict = camera_dict\n self.images_lis = sorted(glob(os.path.join(self.data_dir, 'image/*.png')))\n self.n_images = len(self.images_lis)\n self.images_np = np.stack([cv.imread(im_name) for im_name in self.images_lis]) / 256.0\n self.masks_lis = sorted(glob(os.path.join(self.data_dir, 'mask/*.png')))\n self.masks_np = np.stack([cv.imread(im_name) for im_name in self.masks_lis]) / 256.0\n\n # world_mat is a projection matrix from world to image\n self.world_mats_np = [camera_dict['world_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n self.fid_list = [torch.LongTensor(np.array([camera_dict['fid_%d' % idx]])) for idx in range(self.n_images)]\n self.scale_mats_np = []\n\n # scale_mat: used for coordinate normalization, we assume the scene to render is inside a unit sphere at origin.\n self.scale_mats_np = [camera_dict['scale_mat_%d' % idx].astype(np.float32) for idx in range(self.n_images)]\n\n self.intrinsics_all = []\n self.pose_all = []\n self.proj_all = []\n\n for scale_mat, world_mat in zip(self.scale_mats_np, self.world_mats_np):\n P = world_mat @ scale_mat\n P = P[:3, :4]\n intrinsics, pose = load_K_Rt_from_P(None, P)\n self.intrinsics_all.append(torch.from_numpy(intrinsics).float())\n self.pose_all.append(torch.from_numpy(pose).float())\n self.proj_all.append(torch.from_numpy(P).float())\n\n self.images = torch.from_numpy(self.images_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.masks = torch.from_numpy(self.masks_np.astype(np.float32)).cpu() # [n_images, H, W, 3]\n self.errors = self.masks[:, :, :, :1].clone()\n self.errors = F.interpolate(self.errors.permute(0, 3, 1, 2), (self.images.shape[1] // 8, self.images.shape[2] // 8), mode='bilinear')\n self.errors = F.max_pool2d(self.errors, 7, stride=1, padding=3)\n self.errors = self.errors.permute(0, 2, 3, 1)\n self.radius = torch.zeros(self.masks.shape[0], self.masks.shape[2], self.masks.shape[1], 1) # [n_images, W, H, 3]\n \n self.intrinsics_all = torch.stack(self.intrinsics_all).to(self.device) # [n_images, 4, 4]\n self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]\n self.focal = self.intrinsics_all[0][0, 0]\n self.pose_all = torch.stack(self.pose_all).to(self.device) # [n_images, 4, 4]\n self.proj_all = torch.stack(self.proj_all).to(self.device)\n self.H, self.W = self.images.shape[1], self.images.shape[2]\n self.image_pixels = self.H * self.W\n self.fid_all = torch.stack(self.fid_list).to(self.device)\n self.time_emb_list = (self.fid_all / self.n_frames * 2) - 0.95\n\n object_bbox_min = np.array([-1.01, -1.01, -1.01, 1.0])\n object_bbox_max = np.array([ 1.01, 1.01, 1.01, 1.0])\n # Object scale mat: region of interest to **extract mesh**\n object_scale_mat = np.load(os.path.join(self.data_dir, self.object_cameras_name))['scale_mat_0']\n object_bbox_min = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_min[:, None]\n object_bbox_max = np.linalg.inv(self.scale_mats_np[0]) @ object_scale_mat @ object_bbox_max[:, None]\n self.object_bbox_min = object_bbox_min[:3, 0]\n self.object_bbox_max = object_bbox_max[:3, 0]\n self.process_radius()\n\n print('Load data: End')\n\n def process_radius(self):\n for img_idx in tqdm(range(self.images.shape[0])):\n tx = torch.linspace(0, self.W - 1, self.W, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n # Cut the distance in half, and then round it out so that it's\n # halfway between inscribed by / circumscribed about the pixel.\n radii = dx[..., None] * 2 / np.sqrt(12)\n self.radius[img_idx] = radii.detach().cpu() # W H 3\n\n def gen_rays_at(self, img_idx, resolution_level=1):\n \"\"\"\n Generate rays at world space from one camera.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def gen_random_rays_at(self, img_idx, batch_size):\n \"\"\"\n Generate random rays at world space from one camera.\n \"\"\"\n error = self.errors[img_idx].reshape(-1).numpy()\n max_error = np.max(error) + 1e-8\n error = error / max_error\n error[error < 0.1] = 0.1\n error = error / np.sum(error)\n index = np.arange(0, self.W*self.H // 64)\n select_index = np.random.choice(index, size=[batch_size], p=error)\n pixels_y = torch.LongTensor(select_index // (self.W // 8)) * 8\n pixels_y += torch.randint_like(pixels_y, 8)\n pixels_x = torch.LongTensor(select_index % (self.W // 8)) * 8\n pixels_x += torch.randint_like(pixels_x, 8)\n\n color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n rays_r = self.radius[img_idx][(pixels_x, pixels_y)] # batch_size, 1\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float().to(self.device) # batch_size, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3\n rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3\n return torch.cat([rays_o.cpu(), rays_v.cpu(), color.cpu(), mask[:, :1].cpu(), rays_r.cpu()], dim=-1).cuda(), pixels_y.cpu(), pixels_x.cpu() # batch_size, 10\n\n def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):\n \"\"\"\n Interpolate pose between two cameras.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio\n pose_0 = self.pose_all[idx_0].detach().cpu().numpy()\n pose_1 = self.pose_all[idx_1].detach().cpu().numpy()\n pose_0 = np.linalg.inv(pose_0)\n pose_1 = np.linalg.inv(pose_1)\n rot_0 = pose_0[:3, :3]\n rot_1 = pose_1[:3, :3]\n rots = Rot.from_matrix(np.stack([rot_0, rot_1]))\n key_times = [0, 1]\n slerp = Slerp(key_times, rots)\n rot = slerp(ratio)\n pose = np.diag([1.0, 1.0, 1.0, 1.0])\n pose = pose.astype(np.float32)\n pose[:3, :3] = rot.as_matrix()\n pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]\n pose = np.linalg.inv(pose)\n rot = torch.from_numpy(pose[:3, :3]).cuda()\n trans = torch.from_numpy(pose[:3, 3]).cuda()\n rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def near_far_from_sphere(self, rays_o, rays_d):\n if self.near > 0:\n return self.near, self.far\n a = torch.sum(rays_d**2, dim=-1, keepdim=True)\n b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)\n mid = 0.5 * (-b) / a\n near = mid - 1.0\n far = mid + 1.0\n return near, far\n\n def image_at(self, idx, resolution_level):\n img = cv.imread(self.images_lis[idx])\n return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)" }, { "identifier": "BlenderDataset", "path": "models/dataset.py", "snippet": "class BlenderDataset:\n def __init__(self, conf):\n super(BlenderDataset, self).__init__()\n print('Load data: Begin')\n self.device = torch.device('cuda')\n self.conf = conf\n\n self.near = conf.get_float('near', default=-1)\n self.far = conf.get_float('far', default=-1)\n self.n_frames = conf.get_int('n_frames', default=128)\n\n self.data_dir = conf.get_string('data_dir')\n splits = ['train']\n metas = {}\n for s in splits:\n with open(os.path.join(self.data_dir, 'transforms_{}.json'.format(s)), 'r') as fp:\n metas[s] = json.load(fp)\n self.images_lis = sorted(glob(os.path.join(self.data_dir, 'train/*.png')), key=lambda x: int(x.split('.')[0].split('_')[-1]))\n # if self.data_dir.split('/')[-2] == 'lego':\n # # self.images_lis = self.images_lis[1:]\n # self.images_lis.append('/data00/yzy/Git_Project/data/dynamic/D-NeRF/lego/val/r_0.png')\n all_imgs = []\n all_poses = []\n all_masks = []\n all_times = []\n counts = [0]\n for s in splits:\n meta = metas[s]\n\n imgs = []\n poses = []\n times = []\n\n for t, frame in enumerate(meta['frames']):\n fname = os.path.join(self.data_dir, frame['file_path'] + '.png')\n image = cv.imread(fname, cv.IMREAD_UNCHANGED)\n imgs.append(image)\n pose = np.array(frame['transform_matrix'])\n time = np.array([frame['time']])\n\n a = pose[:, 0:1]\n b = pose[:, 1:2]\n c = pose[:, 2:3]\n d = pose[:, 3:].copy()\n d[:3, :] *= 0.8\n\n pose = np.concatenate([a, -b, -c, d], 1)\n\n poses.append(pose)\n times.append(time)\n\n imgs = (np.array(imgs) / 255.).astype(np.float32) # keep all 4 channels (RGBA)\n poses = np.array(poses).astype(np.float32)\n times = np.array(times).astype(np.float32)\n masks = (imgs[..., 3:] > 0).astype(np.float32)\n imgs = imgs[..., :3]\n counts.append(counts[-1] + imgs.shape[0])\n all_imgs.append(imgs)\n all_poses.append(poses)\n all_masks.append(masks)\n all_times.append(times)\n\n self.images = torch.from_numpy(np.concatenate(all_imgs, 0)).cpu()\n self.masks = torch.from_numpy(np.concatenate(all_masks, 0)).cpu()\n self.radius = torch.zeros(self.masks.shape[0], self.masks.shape[2], self.masks.shape[1], 1) # no use\n self.errors = self.masks[:, :, :, :1].clone()\n self.errors = F.interpolate(self.errors.permute(0, 3, 1, 2),\n (self.images.shape[1] // 8, self.images.shape[2] // 8), mode='bilinear')\n self.errors = F.max_pool2d(self.errors, 7, stride=1, padding=3)\n self.errors = self.errors.permute(0, 2, 3, 1)\n self.n_images = self.images.shape[0]\n\n self.fid_list = [torch.LongTensor(np.array([idx])) for idx in range(self.n_images)]\n # if self.data_dir.split('/')[-2] == 'lego':\n # self.fid_list[-1] = torch.LongTensor(np.array([0]))\n self.pose_all = torch.from_numpy(np.concatenate(all_poses, 0)).to(self.device)\n self.fid_all = torch.stack(self.fid_list).to(self.device)\n self.time_emb_list = torch.from_numpy(np.concatenate(all_times, 0)).to(self.device)\n\n self.H, self.W = self.images[0].shape[:2]\n self.image_pixels = self.H * self.W\n\n camera_angle_x = float(meta['camera_angle_x'])\n self.focal = .5 * self.W / np.tan(.5 * camera_angle_x)\n intrinsics = torch.Tensor(\n [[self.focal, 0, 0.5 * self.W, 0],\n [0, self.focal, 0.5 * self.H, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]]).to(self.device)\n self.intrinsics_all = intrinsics.expand(self.n_images, -1, -1)\n self.intrinsics_all_inv = torch.inverse(self.intrinsics_all) # [n_images, 4, 4]\n self.object_bbox_min = np.array([-1.01, -1.01, -1.01]) # hard code bbox\n self.object_bbox_max = np.array([1.01, 1.01, 1.01])\n self.process_radius()\n\n print('Load data: End')\n\n def process_radius(self):\n for img_idx in tqdm(range(self.images.shape[0])):\n tx = torch.linspace(0, self.W - 1, self.W, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n # Cut the distance in half, and then round it out so that it's\n # halfway between inscribed by / circumscribed about the pixel.\n radii = dx[..., None] * 2 / np.sqrt(12)\n self.radius[img_idx] = radii.detach().cpu() # W H 3\n\n def gen_rays_at(self, img_idx, resolution_level=1):\n \"\"\"\n Generate rays at world space from one camera.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[img_idx, None, None, :3, :3],\n p[:, :, :, None]).squeeze() # W, H, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_o = self.pose_all[img_idx, None, None, :3, 3].expand(rays_v.shape) # W, H, 3\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def gen_random_rays_at(self, img_idx, batch_size):\n \"\"\"\n Generate random rays at world space from one camera.\n \"\"\"\n error = self.errors[img_idx].reshape(-1).numpy()\n max_error = np.max(error) + 1e-8\n error = error / max_error\n error[error < 0.1] = 0.1\n error = error / np.sum(error)\n index = np.arange(0, self.W * self.H // 64)\n select_index = np.random.choice(index, size=[batch_size], p=error)\n pixels_y = torch.LongTensor(select_index // (self.W // 8)) * 8\n pixels_y += torch.randint_like(pixels_y, 8)\n pixels_x = torch.LongTensor(select_index % (self.W // 8)) * 8\n pixels_x += torch.randint_like(pixels_x, 8)\n\n color = self.images[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n mask = self.masks[img_idx][(pixels_y, pixels_x)] # batch_size, 3\n rays_r = self.radius[img_idx][(pixels_x, pixels_y)] # batch_size, 1\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1).float().to(\n self.device) # batch_size, 3\n p = torch.matmul(self.intrinsics_all_inv[img_idx, None, :3, :3], p[:, :, None]).squeeze() # batch_size, 3\n rays_v = p / torch.linalg.norm(p, ord=2, dim=-1, keepdim=True) # batch_size, 3\n rays_v = torch.matmul(self.pose_all[img_idx, None, :3, :3], rays_v[:, :, None]).squeeze() # batch_size, 3\n rays_o = self.pose_all[img_idx, None, :3, 3].expand(rays_v.shape) # batch_size, 3\n return torch.cat([rays_o.cpu(), rays_v.cpu(), color.cpu(), mask[:, :1].cpu(), rays_r.cpu()],\n dim=-1).cuda(), pixels_y.cpu(), pixels_x.cpu() # batch_size, 10\n\n def gen_rays_between(self, idx_0, idx_1, ratio, resolution_level=1):\n \"\"\"\n Interpolate pose between two cameras.\n \"\"\"\n l = resolution_level\n tx = torch.linspace(0, self.W - 1, self.W // l, device=self.device)\n ty = torch.linspace(0, self.H - 1, self.H // l, device=self.device)\n pixels_x, pixels_y = torch.meshgrid(tx, ty)\n p = torch.stack([pixels_x, pixels_y, torch.ones_like(pixels_y)], dim=-1) # W, H, 3\n rays_v = torch.matmul(self.intrinsics_all_inv[0, None, None, :3, :3], p[:, :, :, None]).squeeze() # W, H, 3\n trans = self.pose_all[idx_0, :3, 3] * (1.0 - ratio) + self.pose_all[idx_1, :3, 3] * ratio\n pose_0 = self.pose_all[idx_0].detach().cpu().numpy()\n pose_1 = self.pose_all[idx_1].detach().cpu().numpy()\n pose_0 = np.linalg.inv(pose_0)\n pose_1 = np.linalg.inv(pose_1)\n rot_0 = pose_0[:3, :3]\n rot_1 = pose_1[:3, :3]\n rots = Rot.from_matrix(np.stack([rot_0, rot_1]))\n key_times = [0, 1]\n slerp = Slerp(key_times, rots)\n rot = slerp(ratio)\n pose = np.diag([1.0, 1.0, 1.0, 1.0])\n pose = pose.astype(np.float32)\n pose[:3, :3] = rot.as_matrix()\n pose[:3, 3] = ((1.0 - ratio) * pose_0 + ratio * pose_1)[:3, 3]\n pose = np.linalg.inv(pose)\n rot = torch.from_numpy(pose[:3, :3]).cuda()\n trans = torch.from_numpy(pose[:3, 3]).cuda()\n rays_v = torch.matmul(rot[None, None, :3, :3], rays_v[:, :, :, None]).squeeze() # W, H, 3\n dx = torch.sqrt(torch.sum((rays_v[:-1, :, :] - rays_v[1:, :, :]) ** 2, dim=-1))\n dx = torch.cat([dx, dx[-2:-1, :]], dim=0)\n rays_r = dx[..., None] * 2 / np.sqrt(12)\n rays_v = rays_v / torch.linalg.norm(rays_v, ord=2, dim=-1, keepdim=True) # W, H, 3\n rays_o = trans[None, None, :3].expand(rays_v.shape) # W, H, 3\n return rays_o.transpose(0, 1), rays_v.transpose(0, 1), rays_r.transpose(0, 1)\n\n def near_far_from_sphere(self, rays_o, rays_d):\n if self.near > 0:\n return self.near, self.far\n a = torch.sum(rays_d ** 2, dim=-1, keepdim=True)\n b = 2.0 * torch.sum(rays_o * rays_d, dim=-1, keepdim=True)\n mid = 0.5 * (-b) / a\n near = mid - 1.0\n far = mid + 1.0\n return near, far\n\n def image_at(self, idx, resolution_level):\n img = cv.imread(self.images_lis[idx])\n return (cv.resize(img, (self.W // resolution_level, self.H // resolution_level))).clip(0, 255)" }, { "identifier": "RenderingNetwork", "path": "models/fields.py", "snippet": "class RenderingNetwork(nn.Module):\n def __init__(self,\n d_feature,\n mode,\n d_in,\n d_out,\n d_hidden,\n n_layers,\n weight_norm=True,\n multires_view=0,\n squeeze_out=True):\n super().__init__()\n\n self.mode = mode\n self.squeeze_out = squeeze_out\n dims = [d_in + d_feature] + [d_hidden for _ in range(n_layers)] + [d_out]\n\n self.embedview_fn = None\n if multires_view > 0:\n embedview_fn, input_ch = get_embedder(multires_view)\n self.embedview_fn = embedview_fn\n dims[0] += (input_ch - 3)\n\n self.num_layers = len(dims)\n\n for l in range(0, self.num_layers - 1):\n out_dim = dims[l + 1]\n lin = nn.Linear(dims[l], out_dim)\n if weight_norm:\n lin = nn.utils.weight_norm(lin)\n\n setattr(self, \"lin\" + str(l), lin)\n\n self.relu = nn.ReLU()\n\n self.mask = -torch.ones((1, 1, 256, 256, 256)).float().cuda()\n \n\n def forward(self, points, normals, view_dirs, feature_vectors):\n if self.embedview_fn is not None:\n view_dirs = self.embedview_fn(view_dirs)\n\n rendering_input = NoOptionError\n\n if self.mode == 'idr':\n rendering_input = torch.cat([points, view_dirs, normals, feature_vectors], dim=-1)\n elif self.mode == 'no_view_dir':\n rendering_input = torch.cat([points, normals, feature_vectors], dim=-1)\n elif self.mode == 'no_normal':\n rendering_input = torch.cat([points, view_dirs, feature_vectors], dim=-1)\n\n x = rendering_input\n for l in range(0, self.num_layers - 1):\n lin = getattr(self, \"lin\" + str(l))\n\n x = lin(x)\n\n if l < self.num_layers - 2:\n x = self.relu(x)\n\n if self.squeeze_out:\n x = torch.sigmoid(x)\n return x" }, { "identifier": "FieldNetwork", "path": "models/fields.py", "snippet": "class FieldNetwork(nn.Module):\n def __init__(self,\n d_in,\n d_out,\n d_hidden,\n d_t4d,\n min_emb,\n max_emb,\n n_layers,\n t_emb=-1,\n skip_in=(4,),\n bias=0.5,\n geometric_init=True,\n weight_norm=True):\n super(FieldNetwork, self).__init__()\n\n dims = [d_in] + [d_hidden for _ in range(n_layers)] + [d_out]\n dims[0] = d_in + (max_emb - min_emb)*3*2\n\n self.num_layers = len(dims)\n self.skip_in = skip_in\n self.min_emb = min_emb\n self.max_emb = max_emb\n self.t_emb = t_emb\n\n if t_emb > 0:\n embed_fn, time_input_ch = get_embedder(t_emb, input_dims=1)\n self.embed_fn = embed_fn\n dims[0] += time_input_ch\n\n for l in range(0, self.num_layers - 1):\n if l in self.skip_in:\n in_dim = dims[l] + dims[0] + d_t4d\n else:\n in_dim = dims[l]\n out_dim = dims[l+1]\n\n lin = nn.Linear(in_dim, out_dim)\n \n if geometric_init:\n if l == self.num_layers - 2:\n torch.nn.init.normal_(lin.weight, mean=np.sqrt(np.pi) / np.sqrt(dims[l]), std=0.0001)\n torch.nn.init.constant_(lin.bias, -bias)\n elif l == 0:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.constant_(lin.weight[:, 3:], 0.0)\n torch.nn.init.normal_(lin.weight[:, :3], 0.0, np.sqrt(2) / np.sqrt(out_dim))\n elif l in self.skip_in:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n torch.nn.init.constant_(lin.weight[:, -(dims[0] + d_t4d):], 0.0)\n else:\n torch.nn.init.constant_(lin.bias, 0.0)\n torch.nn.init.normal_(lin.weight, 0.0, np.sqrt(2) / np.sqrt(out_dim))\n\n if weight_norm:\n lin = nn.utils.weight_norm(lin)\n\n setattr(self, \"lin\" + str(l), lin)\n\n self.activation = nn.Softplus(beta=100)\n\n def set_tensor4d(self, tensor4d):\n self.tensor4d = tensor4d\n\n def forward(self, mean, cov, fid, time_emb, reg_l2=False):\n cones_embedding = integrated_pos_enc((mean[:, None, :], cov[:, None, :]), self.min_emb, self.max_emb, diagonal=True).reshape(mean.shape[0], -1)\n inputs = mean\n tri_feat = self.tensor4d(inputs, fid, torch.mean(time_emb))\n\n if reg_l2:\n d_vec = F.normalize(torch.randn_like(inputs), dim=-1) * 1e-3\n d_tri_feat = self.tensor4d(inputs + d_vec, fid, torch.mean(time_emb))\n pred_reg_l2 = (d_tri_feat - tri_feat)**2\n \n xyz = inputs\n if self.t_emb > 0:\n time_input = self.embed_fn(time_emb)\n x = torch.cat([xyz, cones_embedding, time_input], 1)\n else:\n x = torch.cat([xyz, cones_embedding], 1)\n\n for l in range(0, self.num_layers - 1):\n lin = getattr(self, \"lin\" + str(l))\n \n if l in self.skip_in:\n if self.t_emb > 0:\n x = torch.cat([x, tri_feat, xyz, cones_embedding, time_input], 1) / np.sqrt(2)\n else:\n x = torch.cat([x, tri_feat, xyz, cones_embedding], 1) / np.sqrt(2)\n x = lin(x)\n\n if l < self.num_layers - 2:\n x = self.activation(x)\n if reg_l2:\n return x, pred_reg_l2\n return x" }, { "identifier": "SingleVarianceNetwork", "path": "models/fields.py", "snippet": "class SingleVarianceNetwork(nn.Module):\n def __init__(self, init_val):\n super(SingleVarianceNetwork, self).__init__()\n init_tensor = torch.zeros(120)\n init_tensor[:] = init_val\n self.register_parameter('variance', nn.Parameter(init_tensor))\n\n def forward(self, x):\n return torch.ones([len(x), 1], device=x.device) * torch.exp(self.variance[0] * 10.0)" }, { "identifier": "Tensor4D", "path": "models/tensor4d.py", "snippet": "class Tensor4D(nn.Module):\n def __init__(self, feature_type, lr_resolution, hr_resolution, image_guide=False, image_guide_interval=2, image_guide_base=16) -> None:\n super(Tensor4D, self).__init__()\n \n self.data_dims = 0\n self.feature_type = feature_type\n if feature_type == '3d':\n self.feature_plane = SpacePlane(lr_resolution, hr_resolution)\n self.data_dims = self.feature_plane.dims\n elif feature_type == '4d':\n self.feature_plane = TimeSpacePlane(lr_resolution, hr_resolution)\n self.data_dims = self.feature_plane.dims\n\n self.img_dims = 0\n self.image_guide = image_guide\n if image_guide:\n self.conv_net = ConvNet(image_guide_base)\n self.img_dims = image_guide_base*8*2\n self.ig_interval = image_guide_interval\n\n if feature_type == '4d':\n self.compress_network = CompressNetwork(self.data_dims, self.data_dims // 3)\n self.compress_list = [self.compress_network.compress1, self.compress_network.compress2, self.compress_network.compress3]\n\n self.dims = self.data_dims + self.img_dims\n self.matMode = torch.BoolTensor([[0, 1, 1], [1, 0, 1], [1, 1, 0]]).cuda()\n self.vecMode = torch.BoolTensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).cuda()\n \n def get_data_parameters(self):\n return list(self.feature_plane.parameters())\n \n def get_network_parameters(self):\n params = []\n if self.feature_type == '4d':\n params += list(self.compress_network.parameters())\n if self.image_guide:\n params += list(self.conv_net.parameters())\n return params\n\n def set_images(self, image, proj):\n step = self.ig_interval\n select_proj = torch.cat([proj[i*step:i*step+1] for i in range(proj.shape[0] // step)], dim=0)\n self.proj = select_proj\n self.img_shape = image.shape\n select_image = torch.cat([image[i*step:i*step+1] for i in range(image.shape[0] // step)], dim=0)\n self.image_feature, self.image_feature_hr = self.conv_net(F.interpolate(select_image.permute(0, 3, 1, 2), size=(1024, 1024)))\n\n def forward(self, xyz_sampled_ori, fid, time_emb):\n sigma_feature_list = [] \n\n if self.image_guide:\n proj_pts = ((self.proj[:, :3, :3] @ xyz_sampled_ori.T.unsqueeze(0)) + self.proj[:, :3, 3:]).transpose(1, 2)\n proj_xy = proj_pts[:, :, :2] / (proj_pts[:, :, 2:] + 1e-6)\n B, H, W, C = self.img_shape\n proj_xy[:, :, 0] = (proj_xy[:, :, 0] - W / 2) / (W / 2)\n proj_xy[:, :, 1] = (proj_xy[:, :, 1] - H / 2) / (H / 2)\n N = self.image_feature.shape[0]\n img_feature = grid_sample(self.image_feature, proj_xy.reshape(N, -1, 1, 2)).reshape(N, -1, xyz_sampled_ori.shape[0])\n img_feature_cost = torch.sqrt(torch.sum((img_feature - torch.sum(img_feature, dim=0).unsqueeze(0) / N)**2, dim=0) / N + 1e-8)\n img_feature_max = torch.mean(img_feature, dim=0) + torch.max(img_feature, dim=0)[0]\n image_feature_hr = grid_sample(self.image_feature_hr, proj_xy.reshape(N, -1, 1, 2)).reshape(N, -1, xyz_sampled_ori.shape[0])\n image_feature_hr_cost = torch.sqrt(torch.sum((image_feature_hr - torch.sum(image_feature_hr, dim=0).unsqueeze(0) / N)**2, dim=0) / N + 1e-8)\n image_feature_hr_max = torch.mean(image_feature_hr, dim=0) + torch.max(image_feature_hr, dim=0)[0]\n sigma_feature_list = [img_feature_cost, img_feature_max, image_feature_hr_cost, image_feature_hr_max]\n \n xyz_sampled = xyz_sampled_ori\n scale = 1.0\n matMode = self.matMode\n coordinate_plane = torch.stack((xyz_sampled[..., matMode[0]] * scale, xyz_sampled[..., matMode[1]] * scale, xyz_sampled[..., matMode[2]] * scale)).view(3, -1, 1, 2)\n\n for idx_plane in range(3):\n sample_points = coordinate_plane[[idx_plane]]\n plane_coef_point = self.feature_plane.sample(sample_points, idx_plane, time_emb).view(-1, *xyz_sampled.shape[:1])\n if self.feature_type == '4d':\n plane_coef_point = self.compress_list[idx_plane](plane_coef_point.T).T\n sigma_feature_list.append(plane_coef_point)\n \n sigma_feature_list = torch.cat(sigma_feature_list, dim=0)\n # print(sigma_feature_list.shape)\n return sigma_feature_list.T" }, { "identifier": "NeuSRenderer", "path": "models/renderer.py", "snippet": "class NeuSRenderer:\n def __init__(self,\n sdf_network,\n deviation_network,\n color_network,\n mask3d,\n n_samples,\n n_importance,\n n_outside,\n up_sample_steps,\n perturb,\n reg_l2=False,\n mip_render=False,\n flow_network=None):\n \n self.sdf_network = sdf_network\n self.deviation_network = deviation_network\n self.color_network = color_network\n self.mask3d = mask3d\n self.n_samples = n_samples\n self.n_importance = n_importance\n self.n_outside = n_outside\n self.up_sample_steps = up_sample_steps\n self.perturb = perturb\n self.reg_l2 = reg_l2\n self.flow_network = flow_network\n self.mip_render = mip_render\n\n def mask_query_geometry(self, mean, cov, only_sdf=False):\n fid = self.fid\n time_emb = self.time_emb\n time_input = time_emb.expand(mean[:, :1].shape)\n space_time_input = torch.cat([mean, time_input], dim=-1)\n if not only_sdf:\n space_time_input.requires_grad_(True)\n inputs = space_time_input[:, :3]\n time_emb = space_time_input[:, 3:]\n N, _ = inputs.shape\n grads = torch.zeros((N, 4), device=inputs.device)\n sdf_nn = torch.zeros((N, 257), device=inputs.device)\n\n reg_l2 = torch.zeros((N, self.sdf_network.tensor4d.dims), device=inputs.device)\n grads[:, 0] = 1\n sdf_nn[:, 0] = -10\n\n mask = self.mask3d.valid_input(inputs, fid)\n if torch.sum(mask) == 0:\n results = {\n 'sdf_nn': sdf_nn,\n 'grads': grads[:, :3],\n 'time_grads': grads[:, 3:],\n 'pts_mask': mask,\n 'reg_l2': reg_l2\n }\n return results\n mask_mean = inputs[mask, :]\n mask_time_emb = time_emb[mask, :]\n mask_cov = cov[mask, :]\n \n if self.flow_network is not None:\n mask_cov = torch.zeros_like(mask_mean) # flow mode, disable mip_render\n if fid != 0:\n pred_flow = self.flow_network(mask_mean, mask_cov, fid, mask_time_emb, reg_l2=False)\n mask_mean = mask_mean + pred_flow\n elif not self.mip_render:\n mask_cov = torch.zeros_like(mask_mean)\n\n if (not only_sdf) and self.reg_l2:\n pred_sdf_nn, pred_reg_l2 = self.sdf_network(mask_mean, mask_cov, fid, mask_time_emb, reg_l2=True)\n reg_l2[mask] = pred_reg_l2\n else:\n pred_sdf_nn = self.sdf_network(mask_mean, mask_cov, fid, mask_time_emb, reg_l2=False)\n\n if not only_sdf:\n pred_sdf = pred_sdf_nn[:, :1]\n d_output = torch.ones_like(pred_sdf, requires_grad=False, device=pred_sdf.device)\n gradients = torch.autograd.grad(\n outputs=pred_sdf,\n inputs=space_time_input,\n grad_outputs=d_output,\n create_graph=True,\n retain_graph=True,\n only_inputs=True)[0]\n grads[mask] = gradients.reshape(-1, 4)[mask]\n \n sdf_nn[mask] = pred_sdf_nn\n results = {\n 'sdf_nn': sdf_nn,\n 'grads': grads[:, :3],\n 'time_grads': grads[:, 3:],\n 'pts_mask': mask,\n 'reg_l2': reg_l2\n }\n return results\n\n def mask_query_color(self, pts, mask, normals, view_dirs, features):\n N, _ = pts.shape\n out = torch.zeros((N, 3), device=pts.device)\n if torch.sum(mask) > 0:\n x = self.color_network(pts[mask], normals[mask], view_dirs[mask], features[mask])\n out[mask] = x\n return out\n else:\n return torch.zeros((N, 3), device=pts.device)\n\n def up_sample(self, rays_o, rays_d, z_vals, sdf, n_importance, inv_s, pts_mask):\n \"\"\"\n Up sampling give a fixed inv_s\n \"\"\"\n batch_size, n_samples = z_vals.shape\n pts = rays_o[:, None, :] + rays_d[:, None, :] * z_vals[..., :, None] # n_rays, n_samples, 3\n radius = torch.linalg.norm(pts, ord=2, dim=-1, keepdim=False)\n inside_sphere = (radius[:, :-1] < 1.0) | (radius[:, 1:] < 1.0)\n sdf = sdf.reshape(batch_size, n_samples)\n prev_sdf, next_sdf = sdf[:, :-1], sdf[:, 1:]\n prev_mask, next_mask = pts_mask[:, :-1], pts_mask[:, 1:]\n mid_mask = torch.logical_and(prev_mask, next_mask)\n prev_z_vals, next_z_vals = z_vals[:, :-1], z_vals[:, 1:]\n mid_sdf = (prev_sdf + next_sdf) * 0.5\n cos_val = (next_sdf - prev_sdf) / (next_z_vals - prev_z_vals + 1e-5)\n\n # ----------------------------------------------------------------------------------------------------------\n # Use min value of [ cos, prev_cos ]\n # Though it makes the sampling (not rendering) a little bit biased, this strategy can make the sampling more\n # robust when meeting situations like below:\n #\n # SDF\n # ^\n # |\\ -----x----...\n # | \\ /\n # | x x\n # |---\\----/-------------> 0 level\n # | \\ /\n # | \\/\n # |\n # ----------------------------------------------------------------------------------------------------------\n prev_cos_val = torch.cat([torch.zeros([batch_size, 1], device=sdf.device), cos_val[:, :-1]], dim=-1)\n cos_val = torch.stack([prev_cos_val, cos_val], dim=-1)\n cos_val, _ = torch.min(cos_val, dim=-1, keepdim=False)\n cos_val = cos_val.clip(-1e3, 0.0) * inside_sphere\n\n dist = (next_z_vals - prev_z_vals)\n prev_esti_sdf = mid_sdf - cos_val * dist * 0.5\n next_esti_sdf = mid_sdf + cos_val * dist * 0.5\n prev_cdf = torch.sigmoid(prev_esti_sdf * inv_s)\n next_cdf = torch.sigmoid(next_esti_sdf * inv_s)\n\n alpha = (prev_cdf - next_cdf + 1e-5) / (prev_cdf + 1e-5)\n alpha[~mid_mask] = 0\n alpha = alpha.clamp(0.0, 1.0)\n \n alpha = torch.cat([alpha, torch.zeros([batch_size, 1], device=alpha.device)], dim=-1)\n weights = alpha * torch.cumprod(\n torch.cat([torch.ones([batch_size, 1], device=alpha.device), 1. - alpha + 1e-7], -1), -1)[:, :-1]\n\n z_samples = sample_pdf(z_vals, weights, n_importance, det=True).detach()\n return z_samples\n\n def cat_z_vals(self, rays_o, rays_d, z_vals, new_z_vals, sdf, pts_mask, last=False):\n batch_size, n_samples = z_vals.shape\n _, n_importance = new_z_vals.shape\n pts = rays_o[:, None, :] + rays_d[:, None, :] * new_z_vals[..., :, None]\n z_vals = torch.cat([z_vals, new_z_vals], dim=-1)\n z_vals, index = torch.sort(z_vals, dim=-1)\n if not last:\n new_sdf, new_pts_mask = self.sdf_network.sdf(pts.reshape(-1, 3), rt_mask=True)\n new_sdf = new_sdf.reshape(batch_size, n_importance)\n new_pts_mask = new_pts_mask.reshape(batch_size, n_importance)\n sdf = torch.cat([sdf, new_sdf], dim=-1)\n pts_mask = torch.cat([pts_mask, new_pts_mask], dim=-1)\n xx = torch.arange(batch_size)[:, None].expand(batch_size, n_samples + n_importance).reshape(-1)\n index = index.reshape(-1)\n sdf = sdf[(xx, index)].reshape(batch_size, n_samples + n_importance)\n pts_mask = pts_mask[(xx, index)].reshape(batch_size, n_samples + n_importance)\n\n return z_vals, sdf, pts_mask\n\n def render_core(self,\n rays_o,\n rays_d,\n rays_r,\n z_vals,\n sample_dist,\n background_alpha=None,\n background_sampled_color=None,\n background_rgb=None,\n cos_anneal_ratio=0.0):\n batch_size, n_samples = z_vals[:, :-1].shape\n\n # Section length\n dists = z_vals[..., 1:] - z_vals[..., :-1]\n cat_dists = torch.cat([dists, torch.Tensor([sample_dist]).to(dists.device).expand(dists[..., :1].shape)], -1)\n mid_z_vals = z_vals + cat_dists * 0.5\n\n cones = cast_rays(z_vals, rays_o, rays_d, rays_r, 'cone', diagonal=True)\n dirs = rays_d[:, None, :].expand(cones[0].shape)\n dirs = dirs.reshape(-1, 3)\n\n results = self.mask_query_geometry(cones[0].reshape(-1, 3), cones[1].reshape(-1, 3))\n sdf_nn_output, gradients, t_grads, pts_mask = results['sdf_nn'], results['grads'], results['time_grads'], results['pts_mask']\n sdf = sdf_nn_output[:, :1]\n feature_vector = sdf_nn_output[:, 1:]\n\n gradients = gradients.squeeze()\n sampled_color = self.mask_query_color(cones[0].reshape(-1, 3), pts_mask, gradients, dirs, feature_vector).reshape(batch_size, n_samples, 3)\n \n inv_s = self.deviation_network(torch.zeros([1, 3], device=sdf.device))[:, :1].clip(1e-6, 1e6) # Single parameter\n inv_s = inv_s.expand(batch_size * n_samples, 1)\n\n true_cos = (dirs * gradients).sum(-1, keepdim=True)\n\n # \"cos_anneal_ratio\" grows from 0 to 1 in the beginning training iterations. The anneal strategy below makes\n # the cos value \"not dead\" at the beginning training iterations, for better convergence.\n iter_cos = -(F.relu(-true_cos * 0.5 + 0.5) * (1.0 - cos_anneal_ratio) +\n F.relu(-true_cos) * cos_anneal_ratio) # always non-positive\n\n # Estimate signed distances at section points\n estimated_next_sdf = sdf + iter_cos * dists.reshape(-1, 1) * 0.5\n estimated_prev_sdf = sdf - iter_cos * dists.reshape(-1, 1) * 0.5\n\n prev_cdf = torch.sigmoid(estimated_prev_sdf * inv_s)\n next_cdf = torch.sigmoid(estimated_next_sdf * inv_s)\n\n p = prev_cdf - next_cdf\n c = prev_cdf\n\n alpha = ((p + 1e-5) / (c + 1e-5))\n \n alpha[~pts_mask] = 0\n alpha = alpha.reshape(batch_size, n_samples).clip(0.0, 1.0)\n \n weights = alpha * torch.cumprod(torch.cat([torch.ones([batch_size, 1], device=alpha.device), 1. - alpha + 1e-7], -1), -1)[:, :-1]\n weights_sum = weights.sum(dim=-1, keepdim=True)\n \n color = (sampled_color * weights[:, :, None]).sum(dim=1)\n if background_rgb is not None: # Fixed background, usually black\n color = color + background_rgb * (1.0 - weights_sum)\n\n # Eikonal loss\n gradient_error = torch.mean((torch.linalg.norm(gradients.reshape(batch_size, n_samples, 3), ord=2,\n dim=-1) - 1.0) ** 2)\n time_grad_error = torch.mean(t_grads**2)\n return {\n 'color': color,\n 'sdf': sdf,\n 'pts_mask': pts_mask,\n 'dists': dists,\n 'gradients': gradients.reshape(batch_size, n_samples, 3),\n 's_val': 1.0 / inv_s,\n 'mid_z_vals': mid_z_vals,\n 'weights': weights,\n 'gradient_error': gradient_error,\n 'time_grad_error': time_grad_error,\n 'reg_l2': results['reg_l2'].reshape(batch_size, n_samples, -1),\n }\n\n def render(self, rays_o, rays_d, rays_r, near, far, fid, time_emb, perturb_overwrite=-1, background_rgb=None, cos_anneal_ratio=0.0):\n self.fid = fid\n self.time_emb = time_emb\n self.mask3d.set_fid(fid)\n\n batch_size = len(rays_o)\n sample_dist = 2.0 / self.n_samples # Assuming the region of interest is a unit sphere\n z_vals = torch.linspace(0.0, 1.0, self.n_samples, device=rays_o.device)\n z_vals = near + (far - near) * z_vals[None, :]\n\n z_vals_outside = None\n \n n_samples = self.n_samples\n perturb = self.perturb\n\n if perturb_overwrite >= 0:\n perturb = perturb_overwrite\n if perturb > 0:\n t_rand = (torch.rand([batch_size, 1], device=z_vals.device) - 0.5)\n z_vals = z_vals + t_rand * 2.0 / self.n_samples\n\n background_alpha = None\n background_sampled_color = None\n\n # Up sample\n if self.n_importance > 0:\n with torch.no_grad():\n cast_z_vals = torch.cat([z_vals, z_vals[:, -1:]], dim=1)\n cones = cast_rays(cast_z_vals, rays_o, rays_d, rays_r, 'cone', diagonal=True)\n results = self.mask_query_geometry(cones[0].reshape(-1, 3), cones[1].reshape(-1, 3), only_sdf=True)\n sdf, pts_mask = results['sdf_nn'][:, :1], results['pts_mask']\n # sdf, pts_mask = self.sdf_network.sdf(pts.reshape(-1, 3), rt_mask=True)\n sdf = sdf.reshape(batch_size, self.n_samples)\n pts_mask = pts_mask.reshape(batch_size, self.n_samples)\n for i in range(self.up_sample_steps):\n new_z_vals = self.up_sample(rays_o,\n rays_d,\n z_vals,\n sdf,\n self.n_importance // self.up_sample_steps + 1,\n 64 * 2**i, pts_mask)\n z_vals, sdf, pts_mask = self.cat_z_vals(rays_o,\n rays_d,\n z_vals,\n new_z_vals,\n sdf, pts_mask,\n last=(i + 1 == self.up_sample_steps))\n\n n_samples = self.n_samples + self.n_importance\n\n background_alpha = None\n background_sampled_color = None\n sample_dist = 1e-2\n\n # Render core\n ret_fine = self.render_core(rays_o,\n rays_d,\n rays_r,\n z_vals,\n sample_dist,\n background_rgb=background_rgb,\n background_alpha=background_alpha,\n background_sampled_color=background_sampled_color,\n cos_anneal_ratio=cos_anneal_ratio)\n\n\n return {\n 'color_fine': ret_fine['color'],\n 's_val': ret_fine['s_val'].reshape(batch_size, n_samples).mean(dim=-1, keepdim=True),\n 'mid_z_vals': ret_fine['mid_z_vals'],\n 'weights': ret_fine['weights'],\n 'weight_sum': ret_fine['weights'].sum(dim=-1, keepdim=True),\n 'weight_max': torch.max(ret_fine['weights'], dim=-1, keepdim=True)[0],\n 'gradients': ret_fine['gradients'],\n 'gradient_error': ret_fine['gradient_error'],\n 'time_grad_error': ret_fine['time_grad_error'],\n 'reg_l2': ret_fine['reg_l2']\n }" }, { "identifier": "Mask3D", "path": "models/mask.py", "snippet": "class Mask3D:\n def __init__(self, mask_type, num_frames=None, mask_reso=None, device=None):\n self.mask_type = mask_type # 'bounding or visualhull'\n if mask_type == 'visualhull':\n self.R = mask_reso\n self.mask = torch.ones([num_frames, self.R, self.R, self.R]).float()\n self.device = device\n self.current_fid = -1\n self.current_mask = None\n\n def set_fid(self, fid):\n if fid != self.current_fid:\n self.current_fid = fid\n if self.mask_type == 'visualhull':\n self.current_mask = self.mask[fid.cpu()].to(self.device)\n \n def valid_input(self, pts, fid):\n with torch.no_grad():\n pts = pts.reshape(1, -1, 1, 1, 3)\n pts_max = torch.max(pts, dim=-1)[0]\n pts_min = torch.min(pts, dim=-1)[0]\n mask_max = (pts_max > 1).reshape(-1)\n mask_min = (pts_min < -1).reshape(-1)\n if self.mask_type == 'visualhull':\n R = self.R\n sigma = F.grid_sample(self.current_mask.view(1, 1, R, R, R), pts, mode='bilinear', padding_mode='border').reshape(-1)\n calc_mask = sigma < 0.05\n else:\n calc_mask = torch.ones_like(mask_max)\n calc_mask[mask_max] = 0\n calc_mask[mask_min] = 0\n return calc_mask\n\n def visualhull(self, pts_ori, projs, masks, g_nums):\n cam_nums = projs.shape[0]\n interval = 1\n pts_mask = torch.zeros(pts_ori.shape[0], g_nums)\n out_mask = torch.zeros(pts_ori.shape[0])\n N, H, W, C = masks.shape\n for gp in range(cam_nums // (g_nums*interval)):\n for j in range(g_nums):\n i = j + gp*(g_nums*interval)\n mask = masks[i, :, :, :1].permute(2, 0, 1).unsqueeze(0).clone()\n mask = torch.max_pool2d(mask, 7, 1, 3, 1)\n pts = torch.cat([pts_ori, torch.ones_like(pts_ori[:, :1])], dim=-1)\n pts = projs[i] @ pts.T\n pts = pts[:2] / pts[2:]\n pts[0] = pts[0] / W * 2 - 1\n pts[1] = pts[1] / H * 2 - 1\n pts = pts.T.reshape(1, -1, 1, 2)\n \n sample_mask = torch.nn.functional.grid_sample(mask, pts, mode='bilinear', padding_mode='zeros').reshape(-1)\n pts_mask[:, j] = sample_mask\n pts_mask_sum = torch.min(pts_mask, dim=1)[0]\n valid = pts_mask_sum > 0.1\n out_mask[valid] = -1\n if gp == 0:\n out_mask[~valid] = 1\n return out_mask\n\n def compute_image_mask(self, projs, masks, g_nums):\n N = 64\n R = self.R\n X = torch.linspace(-1, 1, R).split(N)\n Y = torch.linspace(-1, 1, R).split(N)\n Z = torch.linspace(-1, 1, R).split(N)\n cam_nums = projs.shape[0]\n \n self.mask = self.mask.to(self.device)\n for gp in tqdm(range(cam_nums // g_nums)):\n # for gp in range(1):\n with torch.no_grad():\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = torch.meshgrid(xs, ys, zs)\n pts = torch.cat([zz.reshape(-1, 1), yy.reshape(-1, 1), xx.reshape(-1, 1)], dim=-1).to(self.device)\n val = self.visualhull(pts, projs[gp*g_nums:gp*g_nums+g_nums].to(self.device), masks[gp*g_nums:gp*g_nums+g_nums].to(self.device), g_nums).reshape(len(xs), len(ys), len(zs))\n self.mask[gp, xi * N: xi * N + len(xs),yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val\n self.mask = self.mask.unsqueeze(1)\n self.mask = -torch.max_pool3d(-self.mask, 7, 1, 3)\n self.mask[self.mask > -0.5] = 1\n self.mask = self.mask.detach().cpu()\n \n def compute_mask(self, fid, query_func, inv_s):\n N = 64\n R = 128\n X = torch.linspace(-1, 1, R).split(N)\n Y = torch.linspace(-1, 1, R).split(N)\n Z = torch.linspace(-1, 1, R).split(N)\n from .renderer import sigma_f\n mask = self.mask[fid].reshape(R, R, R).clone()\n self.triplane[0].flow(fid)\n with torch.no_grad():\n for xi, xs in enumerate(X):\n for yi, ys in enumerate(Y):\n for zi, zs in enumerate(Z):\n xx, yy, zz = torch.meshgrid(xs, ys, zs)\n pts = torch.cat([zz.reshape(-1, 1), yy.reshape(-1, 1), xx.reshape(-1, 1)], dim=-1)\n val = sigma_f(query_func(pts), inv_s).reshape(len(xs), len(ys), len(zs))\n mask[xi * N: xi * N + len(xs),yi * N: yi * N + len(ys), zi * N: zi * N + len(zs)] = val\n valid = mask > 0.02\n mask[valid] = 1\n mask[~valid] = -1\n mask = -torch.max_pool3d(mask.reshape(1, 1, 128, 128, 128), 7, 1, 3)\n self.mask[fid][mask[0] > -0.5] = 1" } ]
import os import time import logging import argparse import numpy as np import cv2 as cv import torch import torch.nn.functional as F from torch.utils.tensorboard import SummaryWriter from shutil import copyfile from tqdm import tqdm from pyhocon import ConfigFactory from models.dataset import Dataset, BlenderDataset from models.fields import RenderingNetwork, FieldNetwork, SingleVarianceNetwork from models.tensor4d import Tensor4D from models.renderer import NeuSRenderer from models.mask import Mask3D from metrics import *
15,678
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False)
os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' class Runner: def __init__(self, conf_path, mode='train', case='CASE_NAME', is_continue=False): self.device = torch.device('cuda') # Configuration self.conf_path = conf_path f = open(self.conf_path) conf_text = f.read() conf_text = conf_text.replace('CASE_NAME', case) f.close() self.conf = ConfigFactory.parse_string(conf_text) self.conf['dataset.data_dir'] = self.conf['dataset.data_dir'].replace('CASE_NAME', case) self.base_exp_dir = self.conf['general.base_exp_dir'] os.makedirs(self.base_exp_dir, exist_ok=True) self.is_blender = self.conf['dataset'].get_bool('is_blender', default=False)
self.dataset = BlenderDataset(self.conf['dataset']) if self.is_blender else Dataset(self.conf['dataset'])
0
2023-11-07 10:16:33+00:00
24k
Giftify-Bot/Giftify-Bot
bot.py
[ { "identifier": "GuildConfig", "path": "models/giveaway_settings.py", "snippet": "class GuildConfig:\n \"\"\"Represents the configuration settings for a guild.\n\n Parameters\n ----------\n guild: discord.Guild\n The guild associated with the configuration.\n logging: Optional[discord.TextChannel]\n The logging text channel for the guild.\n ping: Optional[discord.Role]\n The role to ping for notifications.\n reaction: str\n The reaction used for giveaways.\n participants_reaction,: str\n The reaction used for giveaways participants button.\n required_roles: List[discord.Role]\n The default roles required to join giveaway.\n blacklisted_roles: List[discord.Role]\n The default roles blacklisted from joining a giveaway.\n bypass_roles: List[discord.Role]\n The roles that bypass_roles certain restrictions.\n multiplier_roles: Dict[discord.Role, int]\n The multiplier_roles points assigned to each role.\n managers: List[discord.Role]\n The roles with manager permissions.\n dm_winner: bool\n Whether to send a direct message to the winner.\n dm_host: bool\n Whether to send a direct message to the host.\n channel_settings: List[ChannelConfig]\n The settings for each channel.\n color: discord.Colour\n The color used for messages.\n button_style: discord.ButtonStyle\n The style of the button.\n end_message: str\n The message sent when a giveaway ends.\n reroll_message: str\n The message sent when a giveaway rerolls.\n dm_message: str\n The direct message sent to winner.\n dm_host_message: str\n The direct message sent to host.\n gw_header: str\n The header for the giveaway message.\n gw_end_header: str\n The header for the giveaway end.\n \"\"\"\n\n __slots__: Tuple[str, ...] = (\n \"guild\",\n \"logging\",\n \"ping\",\n \"reaction\",\n \"participants_reaction\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"managers\",\n \"dm_winner\",\n \"dm_host\",\n \"channel_settings\",\n \"color\",\n \"button_style\",\n \"end_message\",\n \"reroll_message\",\n \"dm_message\",\n \"dm_host_message\",\n \"gw_header\",\n \"gw_end_header\",\n )\n\n def __init__(\n self,\n guild: discord.Guild,\n *,\n logging: Optional[discord.TextChannel],\n ping: Optional[discord.Role],\n reaction: str,\n participants_reaction: str,\n required_roles: List[discord.Role],\n blacklisted_roles: List[discord.Role],\n bypass_roles: List[discord.Role],\n multiplier_roles: Dict[discord.Role, int],\n managers: List[discord.Role],\n dm_winner: bool,\n dm_host: bool,\n channel_settings: List[ChannelConfig],\n color: discord.Colour,\n button_style: discord.ButtonStyle,\n end_message: str,\n reroll_message: str,\n dm_message: str,\n dm_host_message: str,\n gw_header: str,\n gw_end_header: str,\n ):\n self.guild = guild\n self.logging = logging\n self.ping = ping\n self.reaction = reaction\n self.participants_reaction = participants_reaction\n self.required_roles = required_roles\n self.blacklisted_roles = blacklisted_roles\n self.bypass_roles = bypass_roles\n self.multiplier_roles = multiplier_roles\n self.managers = managers\n self.dm_winner = dm_winner\n self.dm_host = dm_host\n self.channel_settings = channel_settings\n self.color = color\n self.button_style = button_style\n self.end_message = end_message\n self.reroll_message = reroll_message\n self.dm_host_message = dm_host_message\n self.dm_message = dm_message\n self.gw_header = gw_header\n self.gw_end_header = gw_end_header\n\n def __repr__(self):\n return f\"<GuildConfig guild={self.guild!r}>\"\n\n @staticmethod\n async def _create_config(guild_id: int, pool: asyncpg.Pool) -> asyncpg.Record:\n return await pool.fetchrow(\n \"INSERT INTO configs (guild) VALUES ($1) RETURNING *\",\n guild_id,\n )\n\n @classmethod\n def _from_data(\n cls,\n guild: discord.Guild,\n data: asyncpg.Record,\n channel_data: List[asyncpg.Record],\n ) -> \"GuildConfig\":\n data = dict(data)\n data[\"color\"] = discord.Colour(data[\"color\"])\n\n data[\"logging\"] = guild.get_channel(data[\"logging\"])\n data[\"ping\"] = guild.get_role(data[\"ping\"])\n data[\"required_roles\"] = [\n guild.get_role(role) for role in data[\"required_roles\"] if role is not None\n ]\n data[\"blacklisted_roles\"] = [\n guild.get_role(role)\n for role in data[\"blacklisted_roles\"]\n if role is not None\n ]\n data[\"bypass_roles\"] = [\n guild.get_role(role) for role in data[\"bypass_roles\"] if role is None\n ]\n data[\"multiplier_roles\"] = {\n guild.get_role(role): multiplier\n for role, multiplier in data[\"multiplier_roles\"].items()\n if role is not None and multiplier > 1\n }\n data[\"managers\"] = [\n guild.get_role(role) for role in data[\"managers\"] if role is not None\n ]\n\n data[\"button_style\"] = discord.utils.get(\n discord.ButtonStyle, value=data[\"button_style\"]\n )\n\n data[\"channel_settings\"] = [\n channel_setting\n for record in channel_data\n if (channel_setting := ChannelConfig.from_data(guild, record))\n ]\n\n data.pop(\"guild\") # We do not need this.\n\n return cls(guild, **data)\n\n def to_dict(self) -> GuildConfigData:\n \"\"\"Converts this GuildConfig object into a dict.\"\"\"\n\n data = GuildConfigData(\n guild=self.guild.id,\n reaction=self.reaction,\n participants_reaction=self.participants_reaction,\n required_roles=[\n role.id for role in self.required_roles if role is not None\n ],\n blacklisted_roles=[\n role.id for role in self.blacklisted_roles if role is not None\n ],\n bypass_roles=[role.id for role in self.bypass_roles if role is not None],\n multiplier_roles={\n role.id: multiplier_roles\n for role, multiplier_roles in self.multiplier_roles.items()\n if role is not None\n },\n managers=[role.id for role in self.managers if role is not None],\n dm_winner=self.dm_winner,\n dm_host=self.dm_host,\n color=int(self.color),\n button_style=self.button_style.value,\n end_message=self.end_message,\n reroll_message=self.reroll_message,\n dm_message=self.dm_message,\n dm_host_message=self.dm_host_message,\n gw_header=self.gw_header,\n gw_end_header=self.gw_end_header,\n ) # type: ignore\n if self.logging:\n data[\"logging\"] = self.logging.id\n if self.ping:\n data[\"ping\"] = self.ping.id\n return data\n\n @classmethod\n async def fetch(cls, guild: discord.Guild, pool: asyncpg.Pool) -> \"GuildConfig\":\n \"\"\"Create a GuildConfig instance from data retrieved from a database.\n\n Parameters\n ----------\n guild: discord.Guild\n The discord guild.\n pool: asyncpg.Pool\n The database connection pool.\n\n Returns\n -------\n GuildConfig\n An instance of GuildConfig populated with the retrieved data.\n \"\"\"\n\n data = await pool.fetchrow(\"SELECT * FROM configs WHERE guild = $1\", guild.id)\n channel_data: List[asyncpg.Record] = await pool.fetch(\n \"SELECT * FROM channel_configs WHERE guild = $1\", guild.id\n )\n\n if not data:\n data: asyncpg.Record = await cls._create_config(guild.id, pool)\n\n return cls._from_data(guild, data, channel_data)\n\n async def update(\n self, column: str, value: Any, pool: asyncpg.Pool\n ) -> \"GuildConfig\":\n \"\"\"Update the specified column with the provided value in the database.\n\n Parameters\n ----------\n column: str\n The column to be updated.\n value: Any\n The new value for the column.\n pool: asyncpg.Pool\n The database connection pool.\n\n Raises\n ------\n ValueError\n If the provided column is not a valid column name in `self.__slots__`.\n\n Returns\n -------\n GuildConfig\n The updated `GuildConfig` instance.\n \"\"\"\n if column not in self.__slots__:\n raise ValueError(f\"Invalid column: {column}\")\n\n setattr(self, column, value)\n\n data = self.to_dict()\n\n columns = \", \".join(data.keys())\n placeholders = \", \".join([f\"${i+1}\" for i in range(len(data))])\n update_clause = \", \".join(\n [f\"{key} = EXCLUDED.{key}\" for key in data.keys() if key != \"guild\"]\n )\n\n query = f\"\"\"\n INSERT INTO configs ({columns}) \n VALUES ({placeholders})\n ON CONFLICT (guild) DO \n UPDATE SET {update_clause}\n \"\"\"\n\n values = list(data.values())\n await pool.execute(query, *values)\n return self\n\n @overload\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = True,\n pool: Optional[asyncpg.Pool] = None,\n ) -> ChannelConfig:\n ...\n\n @overload\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = False,\n pool: Optional[asyncpg.Pool] = None,\n ) -> Optional[ChannelConfig]:\n ...\n\n async def get_channel_config(\n self,\n channel: Union[discord.TextChannel, discord.CategoryChannel],\n create_if_not_exists: bool = True,\n pool: Optional[asyncpg.Pool] = None,\n ) -> Optional[ChannelConfig]:\n \"\"\"\n Retrieves the configuration for a specific channel.\n\n Parameters\n ----------\n channel: Union[discord.TextChannel, discord.CategoryChannel]\n The channel for which to retrieve the configuration.\n create_if_not_exists: Optional[bool]\n Whether to create a new configuration if it doesn't exist. Default is True.\n pool: Optional[asyncpg.Pool]\n The connection pool for interacting with the database.\n\n Returns\n -------\n Optional[ChannelConfig]\n The ChannelConfig object if it exists, or None if it doesn't exist and create_if_not_exists is set to False.\n\n Raises\n ------\n MaxChannelConfigCreationError\n If create_if_not_exists is True and the maximum number of channel configurations has already been reached.\n \"\"\"\n\n config = discord.utils.get(self.channel_settings, channel=channel)\n if config is not None:\n return config\n\n if create_if_not_exists:\n if len(self.channel_settings) >= 25:\n raise MaxChannelConfigCreationError()\n else:\n if pool:\n config = await ChannelConfig.create(channel.guild, channel, pool)\n self.channel_settings.append(config)\n return config\n\n return None" }, { "identifier": "Giveaway", "path": "models/giveaways.py", "snippet": "class Giveaway:\n \"\"\"\n Represents a giveaway object.\n\n Attributes\n ----------\n bot: Giftify\n The bot instance to handle the giveaway.\n guild_id: int\n The ID of the guild (server) where the giveaway is hosted.\n channel_id: int\n The ID of the channel where the giveaway is hosted.\n message_id: int\n The ID of the giveaway message.\n extra_message_id: int\n The ID of the extra message with giveaway.\n host_id: int\n The ID of the user hosting the giveaway.\n donor_id: int\n The ID of the user donating for the giveaway.\n prize: int\n The prize of the giveaway.\n winner_count: int\n The number of winners for the giveaway.\n winners: List[int]\n The winners of the giveaway.\n participants: List[int]\n The IDs participants for the giveaway.\n ended: bool\n Indicates whether the giveaway has ended.\n ends: datetime.datetime\n The timestamp when the giveaway will be ended.\n required_roles: List[int]\n The list of role IDs required to participate in the giveaway.\n blacklisted_roles: List[int]\n The list of role IDs excluded from participating in the giveaway.\n bypass_roles: List[int]\n The list of user IDs exempted from giveaway restrictions.\n multiplier_roles: Optional[dict]\n A dictionary containing multiplier_roles criteria for the giveaway.\n messages: Optional[dict]\n A dictionary containing message-based criteria for the giveaway.\n messages_required: Optional[int]\n The number of messages required to participate in the giveaway.\n allowed_message_channels: Optional[List[int]]\n The ID of the channels where the message count is tracked.\n amari: Optional[int]\n The required Amari XP to participate in the giveaway.\n weekly_amari: Optional[int]\n The required weekly Amari XP to participate in the giveaway.\n \"\"\"\n\n __slots__ = (\n \"bot\",\n \"guild_id\",\n \"channel_id\",\n \"message_id\",\n \"extra_message_id\",\n \"prize\",\n \"host_id\",\n \"donor_id\",\n \"winner_count\",\n \"winners\",\n \"participants\",\n \"ended\",\n \"ends\",\n \"required_roles\",\n \"blacklisted_roles\",\n \"bypass_roles\",\n \"multiplier_roles\",\n \"messages\",\n \"messages_required\",\n \"allowed_message_channels\",\n \"amari\",\n \"weekly_amari\",\n )\n\n def __init__(self, *, bot: Giftify, record: asyncpg.Record):\n self.bot = bot\n self.guild_id: int = record[\"guild\"]\n self.channel_id: int = record[\"channel\"]\n self.message_id: int = record[\"message\"]\n self.extra_message_id: int = record[\"extra_message\"]\n self.prize: str = record[\"prize\"]\n self.host_id: int = record[\"host\"]\n self.donor_id: Optional[int] = record[\"donor\"]\n self.winner_count: int = record[\"winner_count\"]\n self.winners: List[int] = record[\"winners\"]\n self.participants: List[int] = record[\"participants\"]\n self.ended: bool = record[\"ended\"]\n self.ends: datetime.datetime = record[\"ends\"]\n self.required_roles: List[int] = record[\"required_roles\"] or []\n self.blacklisted_roles: List[int] = record[\"blacklisted_roles\"] or []\n self.bypass_roles: List[int] = record[\"bypass_roles\"] or []\n self.multiplier_roles: Dict[int, int] = {\n int(role): entries\n for role, entries in record[\"multiplier_roles\"].items()\n if entries > 1\n }\n self.messages: Dict[int, int] = {\n int(member): messages for member, messages in record[\"messages\"].items()\n }\n self.messages_required: Optional[int] = record[\"messages_required\"]\n self.allowed_message_channels: Optional[List[int]] = record[\"messages_channel\"]\n self.amari: Optional[int] = record[\"amari\"]\n self.weekly_amari: Optional[int] = record[\"weekly_amari\"]\n\n def __eq__(self, other: \"Giveaway\") -> bool:\n try:\n return (\n self.guild_id == other.guild_id\n and self.channel_id == other.channel_id\n and self.message_id == other.message_id\n )\n except AttributeError:\n return False\n\n def __hash__(self) -> int:\n return hash((self.guild_id, self.channel_id, self.message_id))\n\n def __repr__(self) -> str:\n return f\"<Giveaway guild_id={self.guild_id} channel_id={self.channel_id} message_id={self.message_id}>\"\n\n @property\n def jump_to_giveaway(self) -> discord.ui.View:\n url = f\"https://discord.com/channels/{self.guild_id}/{self.channel_id}/{self.message_id}\"\n view = BaseView(timeout=None)\n button = discord.ui.Button(label=\"Jump To Giveaway\", url=url)\n view.add_item(button)\n return view\n\n @staticmethod\n def create_embed(\n interaction: Interaction,\n config: GuildConfig,\n duration: datetime.datetime,\n winners: int,\n prize: str,\n required_roles: Optional[List[discord.Role]] = None,\n blacklisted_roles: Optional[List[discord.Role]] = None,\n bypass_roles: Optional[List[discord.Role]] = None,\n multiplier_roles: Optional[Dict[discord.Role, int]] = None,\n messages_required: Optional[int] = None,\n allowed_message_channels: Optional[List[discord.TextChannel]] = None,\n amari: Optional[int] = None,\n weekly_amari: Optional[int] = None,\n donor: Optional[discord.Member] = None,\n ) -> discord.Embed:\n assert interaction.guild is not None\n\n description = f\"Click the {config.reaction} button to join the giveaway!\\n\"\n description += f\"Hosted By: {interaction.user.mention}\\n\"\n\n if donor:\n description += f\"Donor: {donor.mention}\\n\"\n\n description += f\"Ends: {discord.utils.format_dt(duration, style='R')} ({discord.utils.format_dt(duration, style='f')})\\n\"\n\n embed = discord.Embed(\n title=prize,\n description=description,\n colour=config.color,\n timestamp=duration,\n )\n embed.set_footer(\n text=f\"{winners} winner(s) • Ends\",\n icon_url=interaction.guild.icon or interaction.client.user.display_avatar,\n )\n requirements = \"\"\n if required_roles:\n requirements += f\"Required Roles: {', '.join(role.mention for role in required_roles if role is not None)}\\n\"\n if bypass_roles:\n requirements += f\"Bypass Roles: {', '.join(role.mention for role in bypass_roles if role is not None)}\\n\"\n\n if blacklisted_roles:\n requirements += f\"Blacklisted Roles: {', '.join(role.mention for role in blacklisted_roles if role is not None)}\\n\"\n if messages_required:\n requirements += (\n f\"Messages Required: **{messages_required}** message(s) (5s cooldown)\\n\"\n )\n if allowed_message_channels:\n requirements += f\"Allowed Channels: {', '.join(f'<#{c.id}>' for c in allowed_message_channels)}\\n\"\n\n if amari:\n requirements += f\"Amari Level: {amari}\\n\"\n if weekly_amari:\n requirements += f\"Weekly Amari: {weekly_amari} XP Points\\n\"\n\n if requirements:\n embed.add_field(name=\"Requirements\", value=requirements, inline=False)\n\n if multiplier_roles:\n multiplier_roles_mention = \"\\n\".join(\n [\n f\"- {entry}x ・ {role.mention}\"\n for role, entry in multiplier_roles.items()\n if role is not None\n ]\n )\n embed.add_field(\n name=\"Bonus Entries\", value=multiplier_roles_mention, inline=False\n )\n\n return embed\n\n @classmethod\n async def start(\n cls,\n interaction: Interaction,\n duration: datetime.datetime,\n winners: int,\n prize: str,\n config: GuildConfig,\n channel_config: Optional[ChannelConfig],\n required_roles: Optional[List[discord.Role]] = None,\n blacklisted_roles: Optional[List[discord.Role]] = None,\n bypass_roles: Optional[List[discord.Role]] = None,\n multiplier_roles: Optional[Dict[discord.Role, int]] = None,\n messages_required: Optional[int] = None,\n allowed_message_channels: Optional[List[discord.TextChannel]] = None,\n amari: Optional[int] = None,\n weekly_amari: Optional[int] = None,\n image: Optional[discord.Attachment] = None,\n donor: Optional[discord.Member] = None,\n ping: bool = False,\n message: Optional[str] = None,\n ):\n assert isinstance(interaction.channel, discord.TextChannel)\n assert interaction.guild is not None\n\n embed = cls.create_embed(\n interaction=interaction,\n config=config,\n duration=duration,\n winners=winners,\n prize=prize,\n required_roles=required_roles,\n blacklisted_roles=blacklisted_roles,\n bypass_roles=bypass_roles,\n multiplier_roles=multiplier_roles,\n messages_required=messages_required,\n allowed_message_channels=allowed_message_channels,\n amari=amari,\n weekly_amari=weekly_amari,\n donor=donor,\n )\n view = GiveawayView(\n config.reaction, config.participants_reaction, config.button_style\n )\n giveaway_message = await interaction.channel.send(\n config.gw_header, embed=embed, view=view\n )\n\n message_embed = discord.Embed(\n title=f\"{GIFT_EMOJI} Giveaway\",\n description=f\"**Message・** {message}\" if message else None,\n color=config.color,\n )\n\n if image:\n message_embed.set_image(url=image)\n\n extra_message = None\n\n if ping or image:\n ping_role = (\n channel_config.ping\n if channel_config and channel_config.ping\n else config.ping\n )\n extra_message = await interaction.channel.send(\n ping_role.mention if ping_role else \"\",\n embed=message_embed if message or image else None, # type: ignore\n allowed_mentions=discord.AllowedMentions(roles=True),\n )\n\n if extra_message is None and message is not None:\n extra_message = await interaction.channel.send(embed=message_embed)\n\n await interaction.client.timer_cog.create_timer(\n message_id=giveaway_message.id,\n channel_id=interaction.channel.id,\n guild_id=interaction.guild.id,\n author_id=interaction.user.id,\n title=\"Giveaway\",\n event=\"giveaway\",\n expires=duration,\n pool=interaction.client.pool,\n )\n\n return await cls.create_entry(\n bot=interaction.client,\n guild_id=interaction.guild.id,\n channel_id=interaction.channel.id,\n message_id=giveaway_message.id,\n prize=prize,\n host_id=interaction.user.id,\n donor_id=donor.id if donor else None,\n winner_count=winners,\n ends=duration,\n required_roles=[role.id for role in required_roles if role is not None]\n if required_roles\n else [],\n blacklisted_roles=[\n role.id for role in blacklisted_roles if role is not None\n ]\n if blacklisted_roles\n else [],\n bypass_roles=[role.id for role in bypass_roles if role is not None]\n if bypass_roles\n else [],\n multiplier_roles={\n role.id: entries\n for role, entries in multiplier_roles.items()\n if role is not None\n }\n if multiplier_roles\n else {},\n messages={},\n messages_required=messages_required,\n allowed_message_channels=[c.id for c in allowed_message_channels]\n if allowed_message_channels\n else [],\n extra_message_id=extra_message.id if extra_message else None,\n amari=amari,\n weekly_amari=weekly_amari,\n )\n\n @classmethod\n async def create_entry(\n cls,\n bot: Giftify,\n guild_id: int,\n channel_id: int,\n message_id: int,\n prize: str,\n host_id: int,\n winner_count: int,\n ends: datetime.datetime,\n required_roles: List[int],\n blacklisted_roles: List[int],\n bypass_roles: List[int],\n donor_id: Optional[int],\n multiplier_roles: Optional[dict],\n messages: Optional[dict],\n messages_required: Optional[int],\n allowed_message_channels: Optional[List[int]],\n extra_message_id: Optional[int],\n amari: Optional[int],\n weekly_amari: Optional[int],\n ) -> \"Giveaway\":\n \"\"\"\n Create a new Giveaway object and insert it into the database.\n\n Parameters\n ----------\n bot: Giftify\n The bot instance.\n guild_id: int\n The ID of the guild (server) where the giveaway is hosted.\n channel_id: int\n The ID of the channel where the giveaway is hosted.\n message_id: int\n The ID of the message having the giveaway view.\n prize: str\n The prize of the giveaway.\n host_id: int\n The ID of the user hosting the giveaway.\n donor_id: int\n The ID of the donor of the giveaway.\n winner_count: int\n The number of winners for the giveaway.\n ends: datetime.datetime\n The time when the giveaway ends.\n required_roles: List[int]\n The list of role IDs required to participate in the giveaway.\n blacklisted_roles: List[int]\n The list of role IDs excluded from participating in the giveaway.\n bypass_roles: List[int]\n The list of user IDs exempted from giveaway restrictions.\n multiplier_roles: Optional[dict]\n A dictionary containing multiplier_roles criteria for the giveaway.\n messages: Optional[dict]\n A dictionary containing message-based criteria for the giveaway.\n messages_required: Optional[int]\n The number of messages required to participate in the giveaway.\n allowed_message_channels: Optional[int]\n The ID of the channel where the message count is tracked.\n amari: Optional[int]\n The required Amari XP to participate in the giveaway.\n weekly_amari: Optional[int]\n The required weekly Amari XP to participate in the giveaway.\n\n Returns\n -------\n Giveaway\n The created Giveaway object.\n \"\"\"\n record = await bot.pool.fetchrow(\n \"INSERT INTO giveaways (guild, channel, message, extra_message, host, donor, prize, winner_count, ends, required_roles, blacklisted_roles, bypass_roles, multiplier_roles, messages, messages_required, messages_channel, amari, weekly_amari) \"\n \"VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18) \"\n \"RETURNING *\",\n guild_id,\n channel_id,\n message_id,\n extra_message_id,\n host_id,\n donor_id,\n prize,\n winner_count,\n ends,\n required_roles,\n blacklisted_roles,\n bypass_roles,\n multiplier_roles,\n messages,\n messages_required,\n allowed_message_channels,\n amari,\n weekly_amari,\n )\n return cls(bot=bot, record=record)\n\n async def check_requirements(self, member: discord.Member) -> None:\n missing_roles = [\n role.mention\n for role_id in self.required_roles\n if (role := member.guild.get_role(role_id)) and role not in member.roles\n ]\n if missing_roles:\n raise GiveawayError(\n f\"You cannot join this giveaway as you are missing the following required roles: {', '.join(missing_roles)}\"\n )\n\n blacklisted_roles = [\n role.mention\n for role_id in self.blacklisted_roles\n if (role := member.guild.get_role(role_id)) and role in member.roles\n ]\n if blacklisted_roles:\n raise GiveawayError(\n f\"You cannot join this giveaway as you have the following blacklisted roles: {', '.join(blacklisted_roles)}\"\n )\n\n if self.amari:\n if (user_level := await self.bot.fetch_level(member)) < self.amari:\n raise GiveawayError(\n f\"Your amari level is less than the required level, you need `{self.amari - user_level}` more level(s) to join the giveaway.\"\n )\n\n if self.weekly_amari:\n if (\n weekly_exp := await self.bot.fetch_weekly_experience(member)\n ) < self.weekly_amari:\n raise GiveawayError(\n f\"Your weekly amari experience is less than the required weekly amari experience, you need `{self.weekly_amari - weekly_exp}` more experience point(s) to join the giveaway.\"\n )\n\n if self.messages_required and self.messages_required > 0:\n if (\n user_messages := self.messages.get(member.id, 0)\n ) < self.messages_required:\n raise GiveawayError(\n f\"You have sent less messages than the required messages, you need to send `{self.messages_required - user_messages}` more messages to join the giveaway.\"\n )\n\n def can_bypass(self, member: discord.Member) -> bool:\n return any(\n member.guild.get_role(role_id) in member.roles\n for role_id in self.bypass_roles\n )\n\n def get_multiplier_entries(self, member: discord.Member) -> int:\n entries = 0\n for role_id, multiplier_roles_entries in self.multiplier_roles.items():\n if member.get_role(int(role_id)):\n entries += multiplier_roles_entries\n\n return entries or 1\n\n async def join(self, member: discord.Member) -> int:\n try:\n await self.check_requirements(member)\n except GiveawayError as error:\n if not self.can_bypass(member):\n raise error\n\n if member.id in self.participants:\n raise GiveawayError(\"You have already joined the giveaway.\")\n\n number_of_entries = self.get_multiplier_entries(member)\n entries = [member.id] * number_of_entries\n\n self.participants += entries\n\n query = \"\"\"UPDATE giveaways SET participants = $1 \n WHERE guild = $2 AND channel = $3 AND message = $4\"\"\"\n\n await self.bot.pool.execute(\n query, self.participants, self.guild_id, self.channel_id, self.message_id\n )\n\n return len(set(self.participants))\n\n async def leave(self, member: discord.Member) -> int:\n if member.id not in self.participants:\n raise GiveawayError(\"You are not a participant of this giveaway.\")\n\n self.participants = [\n participant for participant in self.participants if participant != member.id\n ]\n\n query = \"\"\"UPDATE giveaways SET participants = $1 \n WHERE guild = $2 AND channel = $3 AND message = $4\"\"\"\n\n await self.bot.pool.execute(\n query, self.participants, self.guild_id, self.channel_id, self.message_id\n )\n\n return len(set(self.participants))\n\n async def _end(self):\n await self.bot.pool.execute(\n \"UPDATE giveaways SET ended = $1, winners = $2 WHERE guild = $3 AND channel = $4 AND message = $5\",\n True,\n self.winners,\n self.guild_id,\n self.channel_id,\n self.message_id,\n )\n\n async def end(self):\n guild = self.bot.get_guild(self.guild_id)\n if not guild:\n return await self._end()\n\n config = await self.bot.fetch_config(guild)\n winners = await self.pick_winners(self.winner_count, guild)\n self.winners = [winner.id for winner in winners]\n\n await self._end()\n\n if config.dm_host:\n await self.dm_host(guild, winners, config.dm_host_message)\n\n if config.dm_winner:\n await self.dm_winners(config.dm_message, winners)\n\n channel = guild.get_channel(self.channel_id)\n if not channel or not isinstance(channel, discord.TextChannel):\n return\n\n gw_message = channel.get_partial_message(self.message_id)\n message = (\n safe_format(\n config.end_message,\n winners=\", \".join(winner.mention for winner in winners),\n prize=bold(self.prize),\n )\n if winners\n else f\"Could not pick any winners for the giveaway of {bold(self.prize)}!\"\n )\n embed = self.get_end_embed(guild, config)\n\n view = GiveawayView(\n config.reaction,\n config.participants_reaction,\n config.button_style,\n participant_count=len(set(self.participants)),\n disabled=True,\n )\n\n with contextlib.suppress(discord.HTTPException):\n await gw_message.edit(content=config.gw_end_header, embed=embed, view=view)\n await gw_message.reply(message, view=self.jump_to_giveaway)\n\n async def reroll(self, winner_count: int):\n guild = self.bot.get_guild(self.guild_id)\n if not guild:\n return\n\n config = await self.bot.fetch_config(guild)\n winners = await self.pick_winners(winner_count, guild)\n self.winners = [winner.id for winner in winners]\n\n await self._end()\n\n if config.dm_winner:\n await self.dm_winners(config.dm_message, winners)\n\n channel = guild.get_channel(self.channel_id)\n if not channel or not isinstance(channel, discord.TextChannel):\n return\n\n gw_message = channel.get_partial_message(self.message_id)\n message = (\n safe_format(\n config.reroll_message,\n winners=\", \".join(winner.mention for winner in winners),\n prize=bold(self.prize),\n )\n if winners\n else f\"Could not pick any winners for the giveaway of {bold(self.prize)}!\"\n )\n embed = self.get_end_embed(guild, config)\n\n view = GiveawayView(\n config.reaction,\n config.participants_reaction,\n config.button_style,\n participant_count=len(set(self.participants)),\n disabled=True,\n )\n\n with contextlib.suppress(discord.HTTPException):\n await gw_message.edit(content=config.gw_end_header, embed=embed, view=view)\n await gw_message.reply(message, view=self.jump_to_giveaway)\n\n async def cancel(self):\n await self.bot.pool.execute(\n \"\"\"DELETE FROM giveaways WHERE guild = $1 AND channel = $2 AND message = $3\"\"\",\n self.guild_id,\n self.channel_id,\n self.message_id,\n )\n if self.extra_message_id is not None:\n channel = self.bot.get_channel(self.channel_id)\n if channel is not None:\n await channel.get_partial_message(self.extra_message_id).delete() # type: ignore\n\n async def dm_host(\n self, guild: discord.Guild, winners: List[discord.Member], message: str\n ) -> None:\n host = await self.bot.get_or_fetch_member(guild, self.host_id)\n if not host:\n return\n\n description = safe_format(\n message,\n winners=\", \".join(winner.mention for winner in winners)\n if winners\n else \"No Winners\",\n prize=bold(self.prize),\n )\n\n embed = discord.Embed(\n title=f\"Your giveaway for {self.prize} has ended!\"[:256],\n description=description,\n colour=self.bot.colour,\n )\n view = self.jump_to_giveaway\n\n with contextlib.suppress(discord.HTTPException):\n await host.send(embed=embed, view=view)\n\n async def dm_winners(self, message: str, winners: List[discord.Member]) -> None:\n for winner in winners:\n description = safe_format(\n message, winner=winner.mention, prize=bold(self.prize)\n )\n\n embed = discord.Embed(\n title=\"You won!\",\n description=description,\n colour=self.bot.colour,\n )\n view = self.jump_to_giveaway\n\n with contextlib.suppress(discord.HTTPException):\n await winner.send(embed=embed, view=view)\n\n async def pick_winners(\n self, count: int, guild: discord.Guild\n ) -> List[discord.Member]:\n winners = []\n\n participants = self.participants.copy()\n\n while count > 0 and participants:\n member_id = random.choice(participants)\n member = await self.bot.get_or_fetch_member(guild, member_id)\n if member is not None and member not in winners:\n try:\n await self.check_requirements(member)\n except GiveawayError:\n pass\n else:\n winners.append(member)\n count -= 1\n\n participants.remove(member_id)\n\n return winners\n\n def get_end_embed(self, guild: discord.Guild, config: GuildConfig) -> discord.Embed:\n description = (\n f\"This giveaway has ended!\\n\"\n f\"Hosted By: <@!{self.host_id}>\\n\"\n f\"Winners: {', '.join(f'<@!{winner_id}>' for winner_id in self.winners) if self.winners else 'No Winners'}\\n\"\n f\"Ended: {discord.utils.format_dt(datetime.datetime.now(datetime.timezone.utc), style='R')} ({discord.utils.format_dt(datetime.datetime.now(datetime.timezone.utc), style='f')})\\n\"\n )\n if self.donor_id:\n description += f\"Donor: <@!{self.donor_id}>\\n\"\n embed = discord.Embed(\n title=self.prize,\n description=description,\n colour=config.color,\n timestamp=self.ends,\n )\n embed.set_footer(\n text=f\"{self.winner_count} winner(s) • Ended\",\n icon_url=guild.icon or self.bot.user.display_avatar,\n )\n\n requirements = \"\"\n if self.required_roles:\n requirements += f\"Required Roles: {', '.join(f'<@&{role_id}>' for role_id in self.required_roles)}\\n\"\n if self.bypass_roles:\n requirements += f\"Bypass Roles: {', '.join(f'<@&{role_id}>' for role_id in self.bypass_roles)}\\n\"\n if self.blacklisted_roles:\n requirements += f\"Blacklisted Roles: {', '.join(f'<@&{role_id}>' for role_id in self.blacklisted_roles)}\\n\"\n if self.messages_required:\n requirements += f\"Messages Required: **{self.messages_required}** message(s) (5s cooldown)\\n\"\n if self.allowed_message_channels:\n requirements += f\"Allowed Channels: {', '.join(f'<#{cid}>' for cid in self.allowed_message_channels)}\\n\"\n if self.amari:\n requirements += f\"Amari Level: {self.amari}\\n\"\n if self.weekly_amari:\n requirements += f\"Weekly Amari: {self.weekly_amari} XP Points\\n\"\n\n if requirements:\n embed.add_field(name=\"Requirements\", value=requirements, inline=False)\n\n if self.multiplier_roles:\n multiplier_roles = \"\\n\".join(\n [\n f\"- {multiplier_entries}x ・ <@&{multiplier_role}>\"\n for multiplier_role, multiplier_entries in self.multiplier_roles.items()\n ]\n )\n embed.add_field(name=\"Bonus Entries\", value=multiplier_roles, inline=False)\n\n return embed" }, { "identifier": "Raffle", "path": "models/raffles.py", "snippet": "class Raffle:\n \"\"\"\n Represents a raffle object.\n\n Attributes\n ----------\n pool: asyncpg.Pool\n The PostgreSQL connection pool instance.\n guild: discord.Guild\n The guild (server) where the raffle is hosted.\n name: str\n The name of the raffle.\n winner: Optional[discord.Member]\n The member instance of the winner, or None if the raffle hasn't ended yet.\n deputy_roles: List[discord.Role]\n A list of roles associated with the raffle.\n deputy_members: List[discord.Member]\n A list of members associated with the raffle.\n tickets: Dict[discord.Member, int]\n A mapping of members to the number of tickets they have.\n \"\"\"\n\n def __init__(\n self,\n pool: asyncpg.Pool,\n *,\n guild: discord.Guild,\n name: str,\n winner: Optional[discord.Member],\n deputy_roles: List[discord.Role],\n deputy_members: List[discord.Member],\n tickets: Dict[discord.Member, int],\n ):\n self.pool = pool\n\n self.guild = guild\n self.name = name\n self.winner = winner\n self.deputy_roles = deputy_roles\n self.deputy_members = deputy_members\n self.tickets = tickets\n\n def __str__(self):\n return self.name\n\n def __repr__(self) -> str:\n return f\"<Raffle name={self.name} guild={self.guild} winner={self.winner}>\"\n\n def __hash__(self) -> int:\n return hash((self.name, self.guild))\n\n def __eq__(self, other: Raffle) -> bool:\n return self.name == other.name and self.guild == other.guild\n\n @classmethod\n async def from_record(cls, bot: Giftify, *, record: asyncpg.Record) -> Raffle:\n name = record[\"name\"]\n guild = bot.get_guild(record[\"guild\"])\n if guild is None:\n raise RaffleError(\"The guild having the raffle was not found.\")\n\n winner_id = record[\"winner\"]\n winner: Optional[discord.Member] = (\n (await bot.get_or_fetch_member(guild, winner_id) or FakeMember(winner_id))\n if winner_id\n else None\n ) # type: ignore\n\n deputy_roles = [guild.get_role(role_id) for role_id in record[\"deputy_roles\"]]\n deputy_members = [\n await bot.get_or_fetch_member(guild, member_id)\n for member_id in record[\"deputy_members\"]\n ]\n\n tickets = {\n await bot.get_or_fetch_member(guild, int(member_id)): num_tickets\n for member_id, num_tickets in record[\"tickets\"].items()\n }\n\n return cls(\n bot.pool,\n guild=guild,\n name=name,\n winner=winner,\n deputy_roles=filter_none(deputy_roles),\n deputy_members=filter_none(deputy_members),\n tickets=filter_none(tickets),\n )\n\n async def roll(self) -> discord.Member:\n \"\"\"\n End the raffle and set the winner.\n \"\"\"\n members = list(self.tickets.keys())\n weights = list(self.tickets.values())\n\n self.winner = random.choices(members, weights, k=1)[0]\n\n await self.save()\n\n return self.winner\n\n async def add_deputy(self, obj: Union[discord.Member, discord.Role]) -> None:\n \"\"\"\n Add a deputy to the raffle.\n\n Parameters\n ----------\n obj: Union[discord.Member, discord.Role]\n The instance of deputy member or role to be added.\n \"\"\"\n if isinstance(obj, discord.Member):\n if len(self.deputy_members) >= 25:\n raise RaffleError(\"You cannot add more than 25 deputy members.\")\n self.deputy_members.append(obj)\n elif isinstance(obj, discord.Role):\n if len(self.deputy_roles) >= 10:\n raise RaffleError(\"You cannot add more than 10 deputy roles.\")\n self.deputy_roles.append(obj)\n else:\n raise RaffleError(\"Invalid obj type.\")\n\n await self.save()\n\n async def remove_deputy(self, obj: Union[discord.Member, discord.Role]) -> None:\n \"\"\"\n Remove a deputy from the raffle.\n\n Parameters\n ----------\n obj: Union[discord.Member, discord.Role]\n The instance of deputy member or role to be removed.\n \"\"\"\n if isinstance(obj, discord.Member):\n if obj not in self.deputy_members:\n raise RaffleError(\"That member is not a deputy.\")\n self.deputy_members.remove(obj)\n elif isinstance(obj, discord.Role):\n if obj not in self.deputy_roles:\n raise RaffleError(\"That role is not a deputy.\")\n self.deputy_roles.remove(obj)\n else:\n raise RaffleError(\"Invalid obj type.\")\n\n await self.save()\n\n async def add_tickets(self, member: discord.Member, num_tickets: int) -> None:\n \"\"\"\n Add tickets to a member.\n\n Parameters\n ----------\n member: discord.Member\n The instance of the member.\n num_tickets: int\n The number of tickets to add.\n \"\"\"\n if member in self.tickets:\n self.tickets[member] += num_tickets\n else:\n self.tickets[member] = num_tickets\n\n await self.save()\n\n async def remove_tickets(self, member: discord.Member, num_tickets: int) -> None:\n \"\"\"\n Remove tickets from a member.\n\n Parameters\n ----------\n member: discord.Member\n The instance of the member.\n num_tickets: int\n The number of tickets to remove.\n \"\"\"\n if member in self.tickets:\n self.tickets[member] -= num_tickets\n if self.tickets[member] <= 0:\n del self.tickets[member]\n\n await self.save()\n else:\n raise RaffleError(\n f\"That member does not have any tickets in {self.name} raffle.\"\n )\n\n async def save(self) -> None:\n \"\"\"\n Update raffle attributes in the database.\n \"\"\"\n query = \"\"\"\n INSERT INTO raffles (guild, name, winner, deputy_roles, deputy_members, tickets)\n VALUES ($1, $2, $3, $4, $5, $6)\n ON CONFLICT (guild, name)\n DO UPDATE SET winner = EXCLUDED.winner, deputy_roles = EXCLUDED.deputy_roles,\n deputy_members = EXCLUDED.deputy_members, tickets = EXCLUDED.tickets;\n \"\"\"\n await self.pool.execute(\n query,\n self.guild.id,\n self.name,\n self.winner.id if self.winner else None,\n [role.id for role in self.deputy_roles],\n [member.id for member in self.deputy_members],\n {\n str(member.id): num_tickets\n for member, num_tickets in self.tickets.items()\n },\n )\n\n async def delete(self):\n \"\"\"\n Delete the raffle from the database.\n \"\"\"\n query = \"\"\"DELETE FROM raffles WHERE guild = $1 AND name = $2\"\"\"\n await self.pool.execute(query, self.guild.id, self.name)" }, { "identifier": "ERROR_EMOJI", "path": "utils/constants.py", "snippet": "ERROR_EMOJI = \"<:GiftifyError:1117842868057423914>\"" }, { "identifier": "SUCCESS_EMOJI", "path": "utils/constants.py", "snippet": "SUCCESS_EMOJI = \"<:GiftifySuccess:1100674526318166048>\"" }, { "identifier": "WARN_EMOJI", "path": "utils/constants.py", "snippet": "WARN_EMOJI = \"<:GiftifyWarn:1098498926564356106>\"" }, { "identifier": "db_init", "path": "utils/db.py", "snippet": "async def db_init(connection: asyncpg.Connection) -> None:\n await connection.set_type_codec(\n \"jsonb\", schema=\"pg_catalog\", encoder=_encode_jsonb, decoder=_decode_jsonb\n )" }, { "identifier": "CommandTree", "path": "utils/tree.py", "snippet": "class CommandTree(app_commands.CommandTree):\r\n client: \"Giftify\"\r\n\r\n async def on_error(\r\n self,\r\n interaction: Interaction,\r\n error: app_commands.AppCommandError,\r\n ) -> None:\r\n view = discord.ui.View()\r\n\r\n button = discord.ui.Button(label=\"Support\", url=\"https://discord.gg/GQSGChbEKz\")\r\n\r\n view.add_item(button)\r\n\r\n if not interaction.response.is_done():\r\n await interaction.response.defer(thinking=True, ephemeral=True)\r\n\r\n embed = discord.Embed(\r\n title=\"An error was raised while executing this command!\",\r\n color=discord.Colour.red(),\r\n )\r\n\r\n if isinstance(error, app_commands.CommandInvokeError):\r\n if isinstance(error, MaxChannelConfigCreationError):\r\n embed.description = (\r\n f\"{WARN_EMOJI} You cannot setup configuration for more than 25 channels, please try removing some.\"\r\n )\r\n elif isinstance(error, discord.HTTPException):\r\n embed.description = f\"{WARN_EMOJI} Unknown HTTP error occured!\"\r\n else:\r\n embed.description = (\r\n f\"{WARN_EMOJI} An unknown error occurred , my developers have been notified about this error.\"\r\n )\r\n self.client.log_handler.log.exception(\"Exception occurred in the CommandTree:\\n\", exc_info=error)\r\n sentry_sdk.capture_exception(error)\r\n elif isinstance(error, app_commands.TransformerError):\r\n if isinstance(error, TransformerError):\r\n embed.description = f\"{WARN_EMOJI} {error.message}\"\r\n else:\r\n embed.description = f\"{WARN_EMOJI} {str(error)}\"\r\n\r\n elif isinstance(error, app_commands.MissingPermissions):\r\n missing = [perm.replace(\"_\", \" \").replace(\"guild\", \"server\").title() for perm in error.missing_permissions]\r\n\r\n format = \"\\n> \".join(missing)\r\n\r\n embed.description = (\r\n f\"{WARN_EMOJI} You are missing follwing permission(s) to run this command: \\n\\n> {format}\"\r\n )\r\n\r\n elif isinstance(error, app_commands.BotMissingPermissions):\r\n missing = [perm.replace(\"_\", \" \").replace(\"guild\", \"server\").title() for perm in error.missing_permissions]\r\n\r\n format = \"\\n> \".join(missing)\r\n\r\n embed.description = f\"{WARN_EMOJI} I am missing follwing permission(s) to run this command: \\n\\n > {format}\"\r\n\r\n elif isinstance(error, app_commands.CommandOnCooldown):\r\n cooldown = int(error.cooldown.per)\r\n retry_after = int(error.retry_after)\r\n embed.description = f\"{WARN_EMOJI} The cooldown for this command is **{cooldown}s**. Try running the command again after **{retry_after}s**.\"\r\n\r\n elif isinstance(error, app_commands.CommandNotFound):\r\n embed.description = f'{WARN_EMOJI} The command \"{error.name}\" was not found.'\r\n elif isinstance(error, DonationError):\r\n embed.description = f\"{WARN_EMOJI} {str(error)}\"\r\n elif isinstance(error, app_commands.CheckFailure):\r\n if isinstance(error, (DonationCategoryError, DonationPermissionsError)):\r\n embed.description = f\"{WARN_EMOJI} {str(error.message)}\"\r\n else:\r\n return\r\n else:\r\n embed.description = (\r\n f\"{WARN_EMOJI} An unknown error occured, my developers have been notified about this errors.\"\r\n )\r\n await interaction.followup.send(embed=embed, ephemeral=True)\r\n sentry_sdk.capture_exception(error)\r\n return self.client.log_handler.log.exception(\"Exception occurred in the CommandTree:\\n\", exc_info=error)\r\n\r\n return await interaction.followup.send(embed=embed, ephemeral=True)\r" }, { "identifier": "ConfirmationView", "path": "utils/view.py", "snippet": "class ConfirmationView(BaseView):\r\n def __init__(\r\n self,\r\n *,\r\n timeout: float,\r\n interaction: Interaction,\r\n success_message: str,\r\n cancel_message: str,\r\n ) -> None:\r\n super().__init__(timeout=timeout)\r\n self.interaction = interaction\r\n self.success_message = success_message\r\n self.cancel_message = cancel_message\r\n self.value: Optional[bool] = None\r\n\r\n @property\r\n def success_embed(self) -> discord.Embed:\r\n return discord.Embed(\r\n description=f\"{SUCCESS_EMOJI} {self.success_message}\",\r\n colour=discord.Colour.green(),\r\n )\r\n\r\n @property\r\n def cancel_embed(self) -> discord.Embed:\r\n return discord.Embed(\r\n description=f\"{SUCCESS_EMOJI} {self.cancel_message}\",\r\n colour=discord.Colour.green(),\r\n )\r\n\r\n async def interaction_check(self, interaction: Interaction) -> bool:\r\n if interaction.user and interaction.user.id == self.interaction.user.id:\r\n return True\r\n else:\r\n await interaction.response.send_message(\r\n \"This confirmation dialog is not for you.\", ephemeral=True\r\n )\r\n return False\r\n\r\n async def on_timeout(self) -> None:\r\n with contextlib.suppress(discord.HTTPException):\r\n for item in self.children:\r\n item.disabled = True\r\n await self.interaction.edit_original_response(view=self)\r\n\r\n @discord.ui.button(label=\"Confirm\", style=discord.ButtonStyle.green)\r\n async def confirm(self, interaction: Interaction, button: discord.ui.Button):\r\n self.value = True\r\n await interaction.response.edit_message(embed=self.success_embed, view=None)\r\n self.stop()\r\n\r\n @discord.ui.button(label=\"Cancel\", style=discord.ButtonStyle.red)\r\n async def cancel(self, interaction: Interaction, button: discord.ui.Button):\r\n self.value = False\r\n await interaction.response.edit_message(embed=self.cancel_embed, view=None)\r\n\r\n self.stop()\r" } ]
import asyncio import datetime import logging import os import pathlib import sys import traceback import aiohttp import asyncpg import discord import dotenv import jishaku import sentry_sdk import uvloop from logging.handlers import RotatingFileHandler from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple from amari import AmariClient from discord.ext import commands from discord.utils import MISSING from discord.utils import _ColourFormatter as ColourFormatter from expiringdict import ExpiringDict from sentry_sdk.integrations.logging import LoggingIntegration from models.giveaway_settings import GuildConfig from models.giveaways import Giveaway from models.raffles import Raffle from utils.constants import ERROR_EMOJI, SUCCESS_EMOJI, WARN_EMOJI from utils.db import db_init from utils.tree import CommandTree from utils.view import ConfirmationView from cogs.timer_manager import TimerManager from models.donation_settings import GuildDonationConfig
14,886
async def fetch_level(self, member: discord.Member, /) -> int: """Fetches user level from Amari Bot API. Parameters ----------- member: discord.Member The member whose level is to be fetched. Returns --------- int The retrieved level. """ try: user = await self.amari_client.fetch_user(member.guild.id, member.id) except Exception: return 0 else: return user.level or 0 async def fetch_weekly_experience(self, member: discord.Member, /) -> int: """Fetches user's weekly experience from Amari Bot API. Parameters ----------- member: discord.Member The member whose weekly experience is to be fetched. Returns --------- int The retrieved weekly experience. """ try: user = await self.amari_client.fetch_user(member.guild.id, member.id) except Exception: return 0 else: return user.weeklyexp or 0 async def prompt( self, message: str, *, interaction: discord.Interaction[Giftify], success_message: str, cancel_message: str, timeout: float = 60.0, ) -> Optional[bool]: """An interactive reaction confirmation dialog. Parameters ----------- message: str The message to show along with the prompt. timeout: float How long to wait before returning. interaction: Interaction The interaction object to handle the confirmation dialog. success_message: str The message to show when the user clicks Confirm. cancel_message: str The message to show when the user clicks Cancel. Returns -------- Optional[bool] ``True`` if explicit confirm, ``False`` if explicit deny, ``None`` if deny due to timeout """ view = ConfirmationView( timeout=timeout, interaction=interaction, success_message=success_message, cancel_message=cancel_message, ) view.message = await self.send(interaction, message, view=view, reason="warn") await view.wait() return view.value class Giftify(GiftifyHelper, commands.AutoShardedBot): user: discord.ClientUser colour: int = 0xCB3045 __version_info__ = "1.1.4" def __init__( self, *, log_handler: LogHandler, pool: asyncpg.Pool, session: aiohttp.ClientSession, amari_client: AmariClient, ) -> None: self._log_handler = log_handler self._pool = pool self._session = session self._amari_client = amari_client intents = discord.Intents(messages=True, emojis=True, guilds=True) allowed_mentions = discord.AllowedMentions(everyone=False, roles=False, users=True, replied_user=False) member_cache_flags = discord.MemberCacheFlags.from_intents(intents=intents) sentry_sdk.init( dsn=os.environ["SENTRY_DSN"], integrations=[ LoggingIntegration( level=logging.INFO, event_level=logging.ERROR, ) ], traces_sample_rate=1.0, ) super().__init__( command_prefix=commands.when_mentioned,
from __future__ import annotations if TYPE_CHECKING: dotenv.load_dotenv() try: except ImportError: # Windows pass else: uvloop.install() jishaku.Flags.HIDE = True jishaku.Flags.RETAIN = True jishaku.Flags.NO_UNDERSCORE = True jishaku.Flags.NO_DM_TRACEBACK = True OWNER_IDS = (747403406154399765,) EXTENSIONS: Tuple[str, ...] = ( "meta", "settings", "timers", "giveaways", "donations", "raffles", "logger", "webserver", ) class RemoveNoise(logging.Filter): def __init__(self) -> None: super().__init__(name="discord.state") def filter(self, record) -> bool: if record.levelname == "WARNING" and "referencing an unknown" in record.msg: return False return True class LogHandler: def __init__(self, stream: bool = True) -> None: self.log: logging.Logger = logging.getLogger() self.max_bytes: int = 32 * 1024 * 1024 self.logging_path = pathlib.Path("./logs/") self.logging_path.mkdir(exist_ok=True) self.stream = stream async def __aenter__(self) -> "LogHandler": return self.__enter__() def __enter__(self: "LogHandler") -> "LogHandler": logging.getLogger("discord").setLevel(logging.INFO) logging.getLogger("discord.http").setLevel(logging.INFO) logging.getLogger("discord.state").addFilter(RemoveNoise()) self.log.setLevel(logging.INFO) handler = RotatingFileHandler( filename=self.logging_path / "Giftify.log", encoding="utf-8", mode="w", maxBytes=self.max_bytes, backupCount=5, ) dt_fmt = "%Y-%m-%d %H:%M:%S" fmt = logging.Formatter("[{asctime}] [{levelname:<7}] {name}: {message}", dt_fmt, style="{") handler.setFormatter(fmt) self.log.addHandler(handler) if self.stream: stream_handler = logging.StreamHandler() stream_handler.setFormatter(ColourFormatter()) self.log.addHandler(stream_handler) return self async def __aexit__(self, *args: Any) -> None: return self.__exit__(*args) def __exit__(self, *args: Any) -> None: handlers = self.log.handlers[:] for handler in handlers: handler.close() self.log.removeHandler(handler) class GiftifyHelper: configs: List[GuildConfig] = [] donation_configs: List[GuildDonationConfig] = [] cached_giveaways: List["Giveaway"] = [] webhook_cache: Dict[discord.TextChannel, discord.Webhook] = {} raffles_cache: Dict[discord.Guild, List[Raffle]] = ExpiringDict(max_len=100, max_age_seconds=300) pool: asyncpg.Pool user: discord.ClientUser amari_client: AmariClient """A helper class for Giftify's operations. This class provides methods to send interaction messages with embeds, fetch webhooks for a channel, and retrieve or fetch guild configuration. """ async def send( self, interaction: discord.Interaction, message: str, reason: str = "success", ephemeral: bool = True, view: discord.ui.View = MISSING, ) -> None: """Sends an interaction message with embed. Parameters ----------- interaction: discord.Interaction The interaction to respond to. message: str The response message to send. reason: str The reason to send the message, can be "warn", "error" or "success". ephemeral: bool If the response should be sent ephemerally. """ emoji = WARN_EMOJI if reason == "warn" else ERROR_EMOJI if reason == "error" else SUCCESS_EMOJI colour = ( discord.Colour.orange() if reason == "warn" else discord.Colour.red() if reason == "error" else discord.Colour.green() ) embed = discord.Embed(description=f"> {emoji} {message}", colour=colour) if interaction.response.is_done(): await interaction.followup.send(embed=embed, view=view, ephemeral=ephemeral) else: await interaction.response.send_message(embed=embed, view=view, ephemeral=ephemeral) async def _get_webhook(self, channel: discord.TextChannel, force_create: bool = False) -> discord.Webhook: if not force_create and (webhook := self.webhook_cache.get(channel)): return webhook webhook_list = await channel.webhooks() if webhook_list: for hook in webhook_list: if hook.token: if hook.user and hook.user.id == self.user.id: self.webhook_cache[channel] = hook return hook # If no suitable webhook is found, create a new one hook = await channel.create_webhook(name="Giftify Logging", avatar=await channel.guild.me.display_avatar.read()) self.webhook_cache[channel] = hook return hook async def send_to_webhook(self, channel: discord.TextChannel, embed: discord.Embed): """Sends an embed to a webhook associated with the provided channel. Parameters ----------- channel: discord.TextChannel The channel to send message to. """ try: webhook = await self._get_webhook(channel) await webhook.send(embed=embed, username="Giftify Logging", avatar_url=self.user.display_avatar) except discord.NotFound: new_webhook = await self._get_webhook(channel, force_create=True) await new_webhook.send(embed=embed, username="Giftify Logging", avatar_url=self.user.display_avatar) except discord.HTTPException: return async def fetch_config(self, guild: discord.Guild) -> GuildConfig: """Looks up a guild config in cache or fetches if not found. Parameters ----------- guild: discord.Guild The guild to look for. Returns --------- GuildConfig The retrieved guild config object. """ config = discord.utils.get(self.configs, guild=guild) if not config: config = await GuildConfig.fetch(guild, self.pool) self.configs.append(config) return config def get_donation_config(self, guild: discord.Guild, category: str) -> Optional[GuildDonationConfig]: """Finds the donation config of a guild for some category. Parameters ----------- guild: Guild The guild to which the category belongs. category: str The name of the category. Returns -------- Optional[GuildDonationConfig] The fetched donation config. """ for config in self.donation_configs: if config.guild == guild and config.category == category: return config def get_guild_donation_categories(self, guild: discord.Guild) -> List[str]: """Finds the donation categories of a guild. Parameters ----------- guild: Guild The guild for which categories will be fetched. Returns -------- List[str] The of names of donation categories. """ return [config.category for config in self.donation_configs if config.guild == guild] async def fetch_raffle(self, guild: discord.Guild, name: str) -> Optional[Raffle]: """Finds a raffle in some guild. Parameters ----------- guild: Guild The guild to which the raffle belongs. name: str The name of the raffle. Returns -------- Optional[Raffle] The fetched raffle. """ record = await self.pool.fetchrow("SELECT * FROM raffles WHERE guild = $1 AND name = $2", guild.id, name) if record is not None: return await Raffle.from_record(self, record=record) # type: ignore async def fetch_raffles(self, guild: discord.Guild, use_cache: bool = True) -> List[Raffle]: """Fetch all the raffles in some guild Parameters ----------- guild: Guild The guild for which raffles will be fetched. use_cache: bool Indicates wheter the bot should fetch the raffles from database or use internal cache. Returns -------- List[Raffle] The of list of fetched raffles. """ if guild in self.raffles_cache and use_cache: return self.raffles_cache[guild] records = await self.pool.fetch("SELECT * FROM raffles WHERE guild = $1", guild.id) raffles = [await Raffle.from_record(self, record=record) for record in records] # type: ignore self.raffles_cache[guild] = raffles return raffles async def fetch_giveaway(self, *, guild_id: int, channel_id: int, message_id: int) -> Optional[Giveaway]: """Looks up a for a giveaway object in database. Parameters ----------- message_id: int The ID of the giveaway message. Returns -------- Optional[Giveaway] The retrieved giveaway object. """ giveaway = discord.utils.get( self.cached_giveaways, guild_id=guild_id, channel_id=channel_id, message_id=message_id, ) if giveaway is not None: return giveaway record = await self.pool.fetchrow( "SELECT * FROM giveaways WHERE guild = $1 AND channel = $2 AND message = $3", guild_id, channel_id, message_id, ) if record is not None: giveaway = Giveaway(bot=self, record=record) # type: ignore if giveaway.messages: self.cached_giveaways.append(giveaway) return giveaway async def running_giveaways(self, *, guild_id: Optional[int] = None, sort_by_ends: bool = True) -> List[Giveaway]: """Looks up a list of active giveaways in the database. Parameters ----------- guild_id: Optional[int] The ID of the guild. If provided, fetches giveaways only for that guild. sort_by_ends: bool If True, the results will be sorted by the 'ends' column in ascending order. Returns -------- List[Giveaway] The list of fetched active giveaways. """ query = "SELECT * FROM giveaways WHERE ended = FALSE" if guild_id is not None: query += " AND guild = $1" if sort_by_ends: query += " ORDER BY ends ASC" if guild_id is not None: records = await self.pool.fetch(query, guild_id) else: records = await self.pool.fetch(query) return [Giveaway(bot=self, record=record) for record in records] # type: ignore async def fetch_level(self, member: discord.Member, /) -> int: """Fetches user level from Amari Bot API. Parameters ----------- member: discord.Member The member whose level is to be fetched. Returns --------- int The retrieved level. """ try: user = await self.amari_client.fetch_user(member.guild.id, member.id) except Exception: return 0 else: return user.level or 0 async def fetch_weekly_experience(self, member: discord.Member, /) -> int: """Fetches user's weekly experience from Amari Bot API. Parameters ----------- member: discord.Member The member whose weekly experience is to be fetched. Returns --------- int The retrieved weekly experience. """ try: user = await self.amari_client.fetch_user(member.guild.id, member.id) except Exception: return 0 else: return user.weeklyexp or 0 async def prompt( self, message: str, *, interaction: discord.Interaction[Giftify], success_message: str, cancel_message: str, timeout: float = 60.0, ) -> Optional[bool]: """An interactive reaction confirmation dialog. Parameters ----------- message: str The message to show along with the prompt. timeout: float How long to wait before returning. interaction: Interaction The interaction object to handle the confirmation dialog. success_message: str The message to show when the user clicks Confirm. cancel_message: str The message to show when the user clicks Cancel. Returns -------- Optional[bool] ``True`` if explicit confirm, ``False`` if explicit deny, ``None`` if deny due to timeout """ view = ConfirmationView( timeout=timeout, interaction=interaction, success_message=success_message, cancel_message=cancel_message, ) view.message = await self.send(interaction, message, view=view, reason="warn") await view.wait() return view.value class Giftify(GiftifyHelper, commands.AutoShardedBot): user: discord.ClientUser colour: int = 0xCB3045 __version_info__ = "1.1.4" def __init__( self, *, log_handler: LogHandler, pool: asyncpg.Pool, session: aiohttp.ClientSession, amari_client: AmariClient, ) -> None: self._log_handler = log_handler self._pool = pool self._session = session self._amari_client = amari_client intents = discord.Intents(messages=True, emojis=True, guilds=True) allowed_mentions = discord.AllowedMentions(everyone=False, roles=False, users=True, replied_user=False) member_cache_flags = discord.MemberCacheFlags.from_intents(intents=intents) sentry_sdk.init( dsn=os.environ["SENTRY_DSN"], integrations=[ LoggingIntegration( level=logging.INFO, event_level=logging.ERROR, ) ], traces_sample_rate=1.0, ) super().__init__( command_prefix=commands.when_mentioned,
tree_cls=CommandTree,
7
2023-11-09 15:00:15+00:00
24k
Kushalhk/AutoFilter
plugins/p_ttishow.py
[ { "identifier": "ADMINS", "path": "info.py", "snippet": "ADMINS = [int(admin) if id_pattern.search(admin) else admin for admin in environ.get('ADMINS', '').split()]" }, { "identifier": "LOG_CHANNEL", "path": "info.py", "snippet": "LOG_CHANNEL = int(environ.get('LOG_CHANNEL', ''))" }, { "identifier": "SUPPORT_CHAT", "path": "info.py", "snippet": "SUPPORT_CHAT = environ.get('SUPPORT_CHAT', '')" }, { "identifier": "MELCOW_NEW_USERS", "path": "info.py", "snippet": "MELCOW_NEW_USERS = is_enabled((environ.get('MELCOW_NEW_USERS', \"True\")), True)" }, { "identifier": "MELCOW_VID", "path": "info.py", "snippet": "MELCOW_VID = environ.get(\"MELCOW_VID\", \"https://te.legra.ph/file/6f55d902f9bf2d0afd4bb.mp4\")" }, { "identifier": "CHNL_LNK", "path": "info.py", "snippet": "CHNL_LNK = environ.get('CHNL_LNK', 'https://t.me/TG_LINKS_CHANNEL')" }, { "identifier": "GRP_LNK", "path": "info.py", "snippet": "GRP_LNK = environ.get('GRP_LNK', 'https://t.me/TG_SUPPORT_GROUP')" }, { "identifier": "db", "path": "database/users_chats_db.py", "snippet": "class Database:\n def __init__(self, uri, database_name):\n def new_user(self, id, name):\n def new_group(self, id, title):\n async def add_user(self, id, name):\n async def is_user_exist(self, id):\n async def total_users_count(self):\n async def remove_ban(self, id):\n async def ban_user(self, user_id, ban_reason=\"No Reason\"):\n async def get_ban_status(self, id):\n async def get_all_users(self):\n async def delete_user(self, user_id):\n async def get_banned(self):\n async def add_chat(self, chat, title):\n async def get_chat(self, chat):\n async def re_enable_chat(self, id):\n async def update_settings(self, id, settings):\n async def get_settings(self, id):\n async def disable_chat(self, chat, reason=\"No Reason\"):\n async def total_chat_count(self):\n async def get_all_chats(self):\n async def get_db_size(self):" }, { "identifier": "Media", "path": "database/ia_filterdb.py", "snippet": "class Media(Document):\n file_id = fields.StrField(attribute='_id')\n file_ref = fields.StrField(allow_none=True)\n file_name = fields.StrField(required=True)\n file_size = fields.IntField(required=True)\n file_type = fields.StrField(allow_none=True)\n mime_type = fields.StrField(allow_none=True)\n caption = fields.StrField(allow_none=True)\n\n class Meta:\n indexes = ('$file_name', )\n collection_name = COLLECTION_NAME" }, { "identifier": "get_size", "path": "utils.py", "snippet": "def get_size(size):\n \"\"\"Get size in readable format\"\"\"\n\n units = [\"Bytes\", \"KB\", \"MB\", \"GB\", \"TB\", \"PB\", \"EB\"]\n size = float(size)\n i = 0\n while size >= 1024.0 and i < len(units):\n i += 1\n size /= 1024.0\n return \"%.2f %s\" % (size, units[i])" }, { "identifier": "temp", "path": "utils.py", "snippet": "class temp(object):\n BANNED_USERS = []\n BANNED_CHATS = []\n ME = None\n CURRENT=int(os.environ.get(\"SKIP\", 2))\n CANCEL = False\n MELCOW = {}\n U_NAME = None\n B_NAME = None\n GETALL = {}\n SHORT = {}\n SETTINGS = {}" }, { "identifier": "get_settings", "path": "utils.py", "snippet": "async def get_settings(group_id):\n settings = temp.SETTINGS.get(group_id)\n if not settings:\n settings = await db.get_settings(group_id)\n temp.SETTINGS[group_id] = settings\n return settings" }, { "identifier": "script", "path": "Script.py", "snippet": "class script(object):\r\n START_TXT = \"\"\"<b>Hᴇʟʟᴏ 👋 {}</b>\r\n\r\n<b>Mʏ Nᴀᴍᴇ Is <a href=\"https://t.me/{}\">{}</a>, I Cᴀɴ Pʀᴏᴠɪᴅᴇ Mᴏᴠɪᴇs, Sᴇʀɪᴇs, Aɴɪᴍᴀᴛɪᴏɴ, Cᴀʀᴛᴏᴏɴ, Aɴɪᴍᴇ, K-Dʀᴀᴍᴀ & Mᴀɴʏ Mᴏʀᴇ ☺ Jᴜsᴛ Aᴅᴅ Mᴇ Tᴏ Yᴏᴜʀ Gʀᴏᴜᴘ As Aᴅᴍɪɴ EɴJᴏʏ 😍</b>\"\"\"\r\n\r\n HELP_TXT = \"\"\"<b>Hᴇʀᴇ Is Tʜᴇ Hᴇʟᴘ Fᴏʀ Mʏ Cᴏᴍᴍᴀɴᴅs.</b>\"\"\"\r\n \r\n ABOUT_TXT = \"\"\"\r\n<b>‣ ᴍʏ ɴᴀᴍᴇ : <a href=\"https://t.me/{}\">ʙᴏᴛ</a>\r\n‣ ᴄʀᴇᴀᴛᴏʀ : <a href=\"https://t.me/KUSHALHK\">𝐊𝐔𝐒𝐇𝐀𝐋</a>\r\n‣ ʟɪʙʀᴀʀʏ : <a href=\"https://pyrogram.org/\">ᴘʏʀᴏɢʀᴀᴍ</a>\r\n‣ ʟᴀɴɢᴜᴀɢᴇ : <a href=\"https://www.python.org/\">ᴘʏᴛʜᴏɴ</a>\r\n‣ ᴅᴀᴛᴀʙᴀꜱᴇ : <a href=\"https://www.mongodb.com/\">ᴍᴏɴɢᴏ ᴅʙ</a>\r\n‣ ʜᴏꜱᴛᴇᴅ ᴏɴ : <a href=\"https://render.com/\">Render</a>\r\n‣ ʙᴜɪʟᴅ ꜱᴛᴀᴛᴜꜱ : ᴠ.𝟹.𝟶 [ꜱᴛᴀʙʟᴇ]</b>\"\"\"\r\n \r\n DISCLAIMER_TXT = \"\"\"<b>ᴛʜɪꜱ ɪꜱ ᴀɴ ᴏᴘᴇɴ ꜱᴏᴜʀᴄᴇ ᴘʀᴏᴊᴇᴄᴛ.\r\n\r\nᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜɪꜱ ʙᴏᴛ ᴀʀᴇ ꜰʀᴇᴇʟʏ ᴀᴠᴀɪʟᴀʙʟᴇ ᴏɴ ᴛʜᴇ ɪɴᴛᴇʀɴᴇᴛ ᴏʀ ᴘᴏꜱᴛᴇᴅ ʙʏ ꜱᴏᴍᴇʙᴏᴅʏ ᴇʟꜱᴇ. ᴊᴜꜱᴛ ꜰᴏʀ ᴇᴀꜱʏ ꜱᴇᴀʀᴄʜɪɴɢ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ɪɴᴅᴇxɪɴɢ ꜰɪʟᴇꜱ ᴡʜɪᴄʜ ᴀʀᴇ ᴀʟʀᴇᴀᴅʏ ᴜᴘʟᴏᴀᴅᴇᴅ ᴏɴ ᴛᴇʟᴇɢʀᴀᴍ. ᴡᴇ ʀᴇꜱᴘᴇᴄᴛ ᴀʟʟ ᴛʜᴇ ᴄᴏᴘʏʀɪɢʜᴛ ʟᴀᴡꜱ ᴀɴᴅ ᴡᴏʀᴋꜱ ɪɴ ᴄᴏᴍᴘʟɪᴀɴᴄᴇ ᴡɪᴛʜ ᴅᴍᴄᴀ ᴀɴᴅ ᴇᴜᴄᴅ. ɪꜰ ᴀɴʏᴛʜɪɴɢ ɪꜱ ᴀɢᴀɪɴꜱᴛ ʟᴀᴡ ᴘʟᴇᴀꜱᴇ ᴄᴏɴᴛᴀᴄᴛ ᴍᴇ ꜱᴏ ᴛʜᴀᴛ ɪᴛ ᴄᴀɴ ʙᴇ ʀᴇᴍᴏᴠᴇᴅ ᴀꜱᴀᴘ. ɪᴛ ɪꜱ ꜰᴏʀʙɪᴅᴅᴇɴ ᴛᴏ ᴅᴏᴡɴʟᴏᴀᴅ, ꜱᴛʀᴇᴀᴍ, ʀᴇᴘʀᴏᴅᴜᴄᴇ, ꜱʜᴀʀᴇ ᴏʀ ᴄᴏɴꜱᴜᴍᴇ ᴄᴏɴᴛᴇɴᴛ ᴡɪᴛʜᴏᴜᴛ ᴇxᴘʟɪᴄɪᴛ ᴘᴇʀᴍɪꜱꜱɪᴏɴ ꜰʀᴏᴍ ᴛʜᴇ ᴄᴏɴᴛᴇɴᴛ ᴡɪᴛʜᴏᴜᴛ ᴇxᴘʟɪᴄɪᴛ ᴘᴇʀᴍɪꜱꜱɪᴏɴ ꜰʀᴏᴍ ᴛʜᴇ ᴄᴏɴᴛᴇɴᴛ ᴄʀᴇᴀᴛᴏʀ ᴏʀ ʟᴇɢᴀʟ ᴄᴏᴘʏʀɪɢʜᴛ ʜᴏʟᴅᴇʀ. ɪꜰ ʏᴏᴜ ʙᴇʟɪᴇᴠᴇ ᴛʜɪꜱ ʙᴏᴛ ɪꜱ ᴠɪᴏʟᴀᴛɪɴɢ ʏᴏᴜʀ ɪɴᴛᴇʟʟᴇᴄᴛᴜᴀʟ ᴘʀᴏᴘᴇʀᴛʏ, ᴄᴏɴᴛᴀᴄᴛ ᴛʜᴇ ʀᴇꜱᴘᴇᴄᴛɪᴠᴇ ᴄʜᴀɴɴᴇʟꜱ ꜰᴏʀ ʀᴇᴍᴏᴠᴀʟ. ᴛʜᴇ ʙᴏᴛ ᴅᴏᴇꜱ ɴᴏᴛ ᴏᴡɴ ᴀɴʏ ᴏꜰ ᴛʜᴇꜱᴇ ᴄᴏɴᴛᴇɴᴛꜱ, ɪᴛ ᴏɴʟʏ ɪɴᴅᴇx ᴛʜᴇ ꜰɪʟᴇꜱ ꜰʀᴏᴍ ᴛᴇʟᴇɢʀᴀᴍ.\r\n\r\nᴍᴀɪɴᴛᴀɪɴᴇᴅ ʙʏ : <a href=\"https://t.me/KUSHALHK\">𝐊𝐔𝐒𝐇𝐀𝐋</a></b>\"\"\"\r\n\r\n SOURCE_TXT = \"\"\"\r\n<b>Hᴇʏ, Tʜɪs ɪs ᴀ Oᴘᴇɴ Sᴏᴜʀᴄᴇ Pʀᴏᴊᴇᴄᴛ.\r\n\r\nTʜɪs Bᴏᴛ ʜᴀs Lᴀᴛᴇsᴛ ᴀɴᴅ Aᴅᴠᴀɴᴄᴇᴅ Fᴇᴀᴛᴜʀᴇs⚡️\r\n\r\nFork our repository and give star ⭐- <a href='https://github.com/Kushalhk/AutoFilter'>📥 ᴄʟɪᴄᴋ ʜᴇʀᴇ 📥</a></b>\r\n\"\"\"\r\n \r\n KUSHAL_TXT = \"\"\" \r\n<b>🔥 ᴘʀᴇᴍɪᴜᴍ ғᴇᴀᴛᴜʀᴇs 🔥\r\n\r\n➻ ɴᴏ ɴᴇᴇᴅ ᴛᴏ ᴠᴇʀɪғʏ\r\n➻ ᴅɪʀᴇᴄᴛ ғɪʟᴇs\r\n➻ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ\r\n➻ ʜɪɢʜ-sᴘᴇᴇᴅ ᴅᴏᴡɴʟᴏᴀᴅ ʟɪɴᴋ\r\n➻ ᴜɴʟɪᴍɪᴛᴇᴅ ᴍᴏᴠɪᴇs ᴀɴᴅ sᴇʀɪᴇs\r\n➻ ғᴜʟʟ ᴀᴅᴍɪɴ sᴜᴘᴘᴏʀᴛ \r\n➻ ʀᴇǫᴜᴇsᴛ ᴡɪʟʟ ʙᴇ ᴄᴏᴍᴘʟᴇᴛᴇᴅ ɪɴ 𝟷ʜ ɪғ ᴀᴠᴀɪʟᴀʙʟᴇ\r\n\r\n‼️ ᴄʟɪᴄᴋ ᴏɴ ʙᴇʟᴏᴡ ʙᴜᴛᴛᴏɴ ᴛᴏ ᴄʜᴇᴄᴋ ᴀʟʟ ᴀᴠᴀɪʟᴀʙʟᴇ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴs ᴀɴᴅ ɪᴛ's ᴘʀɪᴄᴇs.</b>\"\"\"\r\n\r\n \r\n SETTINGS_TXT = \"\"\"\r\nHᴇʟᴘ : <b>Sᴇᴛᴛɪɴɢꜱ</b>\r\n \r\n◈ sᴇᴛᴛɪɴɢs ɪs ᴍᴏsᴛ ɪᴍᴘᴏʀᴛᴀɴᴛ ғᴇᴀᴛᴜʀᴇ ɪɴ ᴛʜɪs ʙᴏᴛ.\r\n◈ ʏᴏᴜ ᴄᴀɴ ᴇᴀsɪʟʏ ᴄᴜsᴛᴏᴍɪᴢᴇ ᴛʜɪs ʙᴏᴛ ғᴏʀ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n1. ᴏɴʟʏ ɢʀᴏᴜᴘ ᴀᴅᴍɪɴ ᴄᴀɴ ᴜsᴇ ᴛʜɪs ᴄᴏᴍᴍᴀɴᴅ ᴀɴᴅ ᴄʜᴀɴɢᴇ sᴇᴛᴛɪɴɢs.\r\n2. ɪᴛ ᴡᴏʀᴋs ᴏɴʟʏ ᴡʜᴇɴ ʙᴏᴛ ᴀʟʀᴇᴀᴅʏ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴛᴏ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /connect - ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴛᴏ ʙᴏᴛ\r\n• /settings - ᴄʜᴀɴɢᴇ sᴇᴛᴛɪɴɢs ᴀs ʏᴏᴜʀ ᴡɪsʜ \"\"\"\r\n\r\n TELEGRAPH_TXT = \"\"\" Hᴇʟᴘ : <b>Tᴇʟᴇɢʀᴀᴘʜ</b>\r\n\r\n<b>Nᴏᴛᴇ</b>: ᴛʜɪꜱ ᴄᴏᴍᴍᴀɴᴅ ɪꜱ ᴀᴠᴀɪʟᴀʙʟᴇ ɪɴ ɢʀᴏᴜᴘꜱ ᴀɴᴅ ᴘᴍꜱ. ᴀʟꜱᴏ ᴄᴀɴ ʙᴇ ᴜꜱᴇ ʙʏ ᴇᴠᴇʀʏᴏɴᴇ.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs & Usᴀɢᴇ :</b>\r\n• /telegraph - sᴇɴᴅ ᴍᴇ ᴘɪᴄᴛᴜʀᴇ ᴏʀ ᴠɪᴅᴇᴏ ᴜɴᴅᴇʀ 𝟻ᴍʙ\"\"\"\r\n\r\n FONT_TXT = \"\"\"Hᴇʟᴘ : <b>Fᴏɴᴛ</b>\r\n\r\n<b>Nᴏᴛᴇ</b>: ʏᴏᴜ ᴄᴀɴ ᴜꜱᴇ ᴛʜɪꜱ ᴍᴏᴅᴇ ᴛᴏ ᴄʜᴀɴɢᴇ ʏᴏᴜʀ ꜰᴏɴᴛꜱ ꜱᴛʏʟᴇ, ᴊᴜꜱᴛ ꜱᴇɴᴅ ᴍᴇ ʟɪᴋᴇ ᴛʜɪꜱ ꜰᴏʀᴍᴀᴛ. \r\n\r\n<code>/font TG_LINKS_CHANNEL</code>\"\"\"\r\n\r\n MANUELFILTER_TXT = \"\"\"Hᴇʟᴘ : <b>Fɪʟᴛᴇʀꜱ</b>\r\n \r\n◈ ꜰɪʟᴛᴇʀ ɪꜱ ᴀ ꜰᴇᴀᴛᴜʀᴇ ᴡᴇʀᴇ ᴜꜱᴇʀꜱ ᴄᴀɴ ꜱᴇᴛ ᴀᴜᴛᴏᴍᴀᴛᴇᴅ ʀᴇᴘʟɪᴇꜱ ꜰᴏʀ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ᴋᴇʏᴡᴏʀᴅ ᴀɴᴅ ɪ ᴡɪʟʟ ʀᴇꜱᴘᴏɴᴅ ᴡʜᴇɴᴇᴠᴇʀ ᴀ ᴋᴇʏᴡᴏʀᴅ ɪꜱ ꜰᴏᴜɴᴅ ɪɴ ᴛʜᴇ ᴍᴇꜱꜱᴀɢᴇ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n1. ᴛʜɪꜱ ʙᴏᴛ ꜱʜᴏᴜʟᴅ ʜᴀᴠᴇ ᴀᴅᴍɪɴ ᴘʀɪᴠɪʟᴇɢᴇ.\r\n2. ᴏɴʟʏ ᴀᴅᴍɪɴꜱ ᴄᴀɴ ᴀᴅᴅ ꜰɪʟᴛᴇʀꜱ ɪɴ ᴀ ᴄʜᴀᴛ.\r\n3. ᴀʟᴇʀᴛ ʙᴜᴛᴛᴏɴꜱ ʜᴀᴠᴇ ᴀ ʟɪᴍɪᴛ ᴏꜰ 64 ᴄʜᴀʀᴀᴄᴛᴇʀꜱ.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /filter - ᴀᴅᴅ ᴀ ꜰɪʟᴛᴇʀ ɪɴ ᴀ ᴄʜᴀᴛ\r\n• /filters - ʟɪꜱᴛ ᴀʟʟ ᴛʜᴇ ꜰɪʟᴛᴇʀꜱ ᴏꜰ ᴀ ᴄʜᴀᴛ\r\n• /del - ᴅᴇʟᴇᴛᴇ ᴀ ꜱᴘᴇᴄɪꜰɪᴄ ꜰɪʟᴛᴇʀ ɪɴ ᴀ ᴄʜᴀᴛ\r\n• /delall - ᴅᴇʟᴇᴛᴇ ᴛʜᴇ ᴡʜᴏʟᴇ ꜰɪʟᴛᴇʀꜱ ɪɴ ᴀ ᴄʜᴀᴛ (ᴄʜᴀᴛ ᴏᴡɴᴇʀ ᴏɴʟʏ)\"\"\"\r\n\r\n BUTTON_TXT = \"\"\"Hᴇʟᴘ : <b>Bᴜᴛᴛᴏɴꜱ</b>\r\n \r\n◈ ᴛʜɪꜱ ʙᴏᴛ ꜱᴜᴘᴘᴏʀᴛꜱ ʙᴏᴛʜ ᴜʀʟ ᴀɴᴅ ᴀʟᴇʀᴛ ɪɴʟɪɴᴇ ʙᴜᴛᴛᴏɴꜱ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n𝟷. ᴛᴇʟᴇɢʀᴀᴍ ᴡɪʟʟ ɴᴏᴛ ᴀʟʟᴏᴡꜱ ʏᴏᴜ ᴛᴏ ꜱᴇɴᴅ ʙᴜᴛᴛᴏɴꜱ ᴡɪᴛʜᴏᴜᴛ ᴀɴʏ ᴄᴏɴᴛᴇɴᴛ, ꜱᴏ ᴄᴏɴᴛᴇɴᴛ ɪꜱ ᴍᴀɴᴅᴀᴛᴏʀʏ.\r\n𝟸. ᴛʜɪꜱ ʙᴏᴛ ꜱᴜᴘᴘᴏʀᴛꜱ ʙᴜᴛᴛᴏɴꜱ ᴡɪᴛʜ ᴀɴʏ ᴛᴇʟᴇɢʀᴀᴍ ᴍᴇᴅɪᴀ ᴛʏᴘᴇ.\r\n𝟹. ʙᴜᴛᴛᴏɴꜱ ꜱʜᴏᴜʟᴅ ʙᴇ ᴘʀᴏᴘᴇʀʟʏ ᴘᴀʀꜱᴇᴅ ᴀꜱ ᴍᴀʀᴋᴅᴏᴡɴ ꜰᴏʀᴍᴀᴛ\r\n\r\nᴜʀʟ ʙᴜᴛᴛᴏɴꜱ :\r\n<code>[Button Text](buttonurl:https://t.me/TG_LINKS_CHANNEL)</code>\r\n\r\nᴀʟᴇʀᴛ ʙᴜᴛᴛᴏɴꜱ :\r\n<code>[Button Text](buttonalert:ᴛʜɪꜱ ɪꜱ ᴀɴ ᴀʟᴇʀᴛ ᴍᴇꜱꜱᴀɢᴇ)</code>\"\"\"\r\n\r\n AUTOFILTER_TXT = \"\"\"Hᴇʟᴘ : <b>Aᴜᴛᴏ Fɪʟᴛᴇʀ</b>\r\n    \r\n<b>Nᴏᴛᴇ :</b> Fɪʟᴇ Iɴᴅᴇx\r\n𝟷. ᴍᴀᴋᴇ ᴍᴇ ᴛʜᴇ ᴀᴅᴍɪɴ ᴏꜰ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ɪꜰ ɪᴛ'ꜱ ᴘʀɪᴠᴀᴛᴇ.\r\n𝟸. ᴍᴀᴋᴇ ꜱᴜʀᴇ ᴛʜᴀᴛ ʏᴏᴜʀ ᴄʜᴀɴɴᴇʟ ᴅᴏᴇꜱ ɴᴏᴛ ᴄᴏɴᴛᴀɪɴꜱ ᴄᴀᴍʀɪᴘꜱ, ᴘᴏʀɴ ᴀɴᴅ ꜰᴀᴋᴇ ꜰɪʟᴇꜱ.\r\n𝟹. ꜰᴏʀᴡᴀʀᴅ ᴛʜᴇ ʟᴀꜱᴛ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴍᴇ ᴡɪᴛʜ ǫᴜᴏᴛᴇꜱ. ɪ'ʟʟ ᴀᴅᴅ ᴀʟʟ ᴛʜᴇ ꜰɪʟᴇꜱ ɪɴ ᴛʜᴀᴛ ᴄʜᴀɴɴᴇʟ ᴛᴏ ᴍʏ ᴅʙ.\r\n\r\n<b>Nᴏᴛᴇ :</b> Aᴜᴛᴏ Fɪʟᴛᴇʀ\r\n𝟷. Aᴅᴅ ᴛʜᴇ ʙᴏᴛ ᴀs ᴀᴅᴍɪɴ ᴏɴ ʏᴏᴜʀ ɢʀᴏᴜᴘ.\r\n𝟸. Usᴇ /connect ᴀɴᴅ ᴄᴏɴɴᴇᴄᴛ ʏᴏᴜʀ ɢʀᴏᴜᴘ ᴛᴏ ᴛʜᴇ ʙᴏᴛ.\r\n𝟹. Usᴇ /settings ᴏɴ ʙᴏᴛ's ᴘᴍ ᴀɴᴅ ᴛᴜʀɴ ᴏɴ AᴜᴛᴏFɪʟᴛᴇʀ ᴏɴ ᴛʜᴇ sᴇᴛᴛɪɴɢs ᴍᴇɴᴜ.\"\"\"\r\n\r\n \r\n RULE_TXT = \"\"\"♦ 𝗚𝗿𝗼𝘂𝗽 𝗥𝘂𝗹𝗲𝘀 ♦\r\n\r\n◈ <b>Sᴇᴀʀᴄʜ Mᴏᴠɪᴇ Wɪᴛʜ Cᴏʀʀᴇᴄᴛ Sᴘᴇʟʟɪɴɢ:</b>\r\n• ᴀᴠᴀᴛᴀʀ 𝟸𝟶𝟶𝟿 ✅\r\n• ᴀᴠᴀᴛᴀʀ ʜɪɴᴅɪ ✅\r\n• ᴀᴠᴀᴛᴀʀ ᴍᴏᴠɪᴇ ❌\r\n• ᴀᴠᴀᴛᴀʀ ʜɪɴᴅɪ ᴅᴜʙʙᴇᴅ..❌\r\n\r\n◈ <b>Sᴇᴀʀᴄʜ Wᴇʙ Sᴇʀɪᴇs Iɴ ᴛʜɪs Fᴏʀᴍᴀᴛ:</b>\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷 ✅\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷E𝟶𝟷 ✅\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷 ʜɪɴᴅɪ ✅\r\n• ᴠɪᴋɪɴɢs S𝟶𝟷 ʜɪɴᴅɪ ᴅᴜʙʙ... ❌\r\n• ᴠɪᴋɪɴɢs sᴇᴀsᴏɴ 𝟷 ❌\r\n• ᴠɪᴋɪɴɢs ᴡᴇʙ sᴇʀɪᴇs ❌\r\n\r\n<b>➙ ᴅᴏɴ'ᴛ ᴅᴏ ᴀɴʏ ꜱᴇʟꜰ ᴘʀᴏᴍᴏᴛɪᴏɴ. \r\n➙ ᴅᴏɴ'ᴛ ꜱᴇɴᴅ ᴀɴʏ ᴋɪɴᴅ ᴏꜰ ᴘʜᴏᴛᴏ, ᴠɪᴅᴇᴏ, ᴅᴏᴄᴜᴍᴇɴᴛꜱ, ᴜʀʟ, ᴇᴛᴄ...\r\n➙ ᴅᴏɴ'ᴛ ʀᴇǫᴜᴇꜱᴛ ᴀɴʏ ᴛʜɪɴɢꜱ ᴏᴛʜᴇʀ ᴛʜᴀɴ ᴍᴏᴠɪᴇꜱ, ꜱᴇʀɪᴇꜱ, ᴀɴɪᴍᴀᴛɪᴏɴ, ᴄᴀʀᴛᴏᴏɴ, ᴀɴɪᴍᴇ, ᴋ-ᴅʀᴀᴍᴀ ᴍᴀɴʏ ᴍᴏʀᴇ.</b>\r\n\r\n🔰 <b>Nᴏᴛᴇ :</b> ᴀʟʟ ᴍᴇꜱꜱᴀɢᴇꜱ ᴡɪʟʟ ʙᴇ ᴀᴜᴛᴏ-ᴅᴇʟᴇᴛᴇᴅ ᴀꜰᴛᴇʀ 𝟷𝟶 ᴍɪɴᴜᴛᴇꜱ ᴛᴏ ᴀᴠᴏɪᴅ ᴄᴏᴘʏʀɪɢʜᴛ ɪꜱꜱᴜᴇꜱ.\"\"\"\r\n\r\n CONNECTION_TXT = \"\"\"Hᴇʟᴘ : <b>Cᴏɴɴᴇᴄᴛɪᴏɴꜱ</b>\r\n \r\n◈ ᴜꜱᴇᴅ ᴛᴏ ᴄᴏɴɴᴇᴄᴛ ʙᴏᴛ ᴛᴏ ᴘᴍ ꜰᴏʀ ᴍᴀɴᴀɢɪɴɢ ꜰɪʟᴛᴇʀꜱ \r\n◈ ɪᴛ ʜᴇʟᴘꜱ ᴛᴏ ᴀᴠᴏɪᴅ ꜱᴘᴀᴍᴍɪɴɢ ɪɴ ɢʀᴏᴜᴘꜱ.\r\n\r\n<b>Nᴏᴛᴇ :</b>\r\n1. ᴏɴʟʏ ᴀᴅᴍɪɴꜱ ᴄᴀɴ ᴀᴅᴅ ᴀ ᴄᴏɴɴᴇᴄᴛɪᴏɴ.\r\n2. ꜱᴇɴᴅ /ᴄᴏɴɴᴇᴄᴛ ꜰᴏʀ ᴄᴏɴɴᴇᴄᴛɪɴɢ ᴍᴇ ᴛᴏ ʏᴏᴜʀ ᴘᴍ\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /connect - ᴄᴏɴɴᴇᴄᴛ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ᴄʜᴀᴛ ᴛᴏ ʏᴏᴜʀ ᴘᴍ\r\n• /disconnect - ᴅɪꜱᴄᴏɴɴᴇᴄᴛ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ\r\n• /connections - ʟɪꜱᴛ ᴀʟʟ ʏᴏᴜʀ ᴄᴏɴɴᴇᴄᴛɪᴏɴꜱ\"\"\"\r\n\r\n EXTRAMOD_TXT = \"\"\"Hᴇʟᴘ : <b>Exᴛʀᴀ Mᴏᴅᴜʟᴇs</b>\r\n \r\n<b>Nᴏᴛᴇ :</b>\r\nᴛʜᴇꜱᴇ ᴀʀᴇ ᴛʜᴇ ᴇxᴛʀᴀ ꜰᴇᴀᴛᴜʀᴇꜱ ᴏꜰ ᴛʜɪꜱ ʙᴏᴛ\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /id - ɢᴇᴛ ɪᴅ ᴏꜰ ᴀ ꜱᴘᴇᴄɪꜰɪᴇᴅ ᴜꜱᴇʀ.\r\n• /info - ɢᴇᴛ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ᴀʙᴏᴜᴛ ᴀ ᴜꜱᴇʀ.\r\n• /imdb - ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ɪᴍᴅʙ ꜱᴏᴜʀᴄᴇ.\r\n• /search - ɢᴇᴛ ᴛʜᴇ ꜰɪʟᴍ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ꜰʀᴏᴍ ᴠᴀʀɪᴏᴜꜱ ꜱᴏᴜʀᴄᴇꜱ.\"\"\"\r\n\r\n ADMIN_TXT = \"\"\"<b>Nᴏᴛᴇ :</b> Tʜɪs Mᴏᴅᴜʟᴇ Oɴʟʏ Wᴏʀᴋs Fᴏʀ Mʏ Aᴅᴍɪɴs.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /logs - ᴛᴏ ɢᴇᴛ ᴛʜᴇ ʀᴇᴄᴇɴᴛ ᴇʀʀᴏʀꜱ\r\n• /stats - ᴛᴏ ɢᴇᴛ ꜱᴛᴀᴛᴜꜱ ᴏꜰ ꜰɪʟᴇꜱ ɪɴ ᴅʙ. <b>[Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</b>\r\n• /delete - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ ꜱᴘᴇᴄɪꜰɪᴄ ꜰɪʟᴇ ꜰʀᴏᴍ ᴅʙ.\r\n• /users - ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴜꜱᴇʀꜱ ᴀɴᴅ ɪᴅꜱ.\r\n• /chats - ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴍʏ ᴄʜᴀᴛꜱ ᴀɴᴅ ɪᴅꜱ\r\n• /leave - ᴛᴏ ʟᴇᴀᴠᴇ ꜰʀᴏᴍ ᴀ ᴄʜᴀᴛ.\r\n• /disable - ᴛᴏ ᴅɪꜱᴀʙʟᴇ ᴀ ᴄʜᴀᴛ.\r\n• /ban - ᴛᴏ ʙᴀɴ ᴀ ᴜꜱᴇʀ.\r\n• /unban - ᴛᴏ ᴜɴʙᴀɴ ᴀ ᴜꜱᴇʀ.\r\n• /channel - ᴛᴏ ɢᴇᴛ ʟɪꜱᴛ ᴏꜰ ᴛᴏᴛᴀʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ᴄʜᴀɴɴᴇʟꜱ. \r\n• /broadcast - ᴛᴏ ʙʀᴏᴀᴅᴄᴀꜱᴛ ᴀ ᴍᴇꜱꜱᴀɢᴇ ᴛᴏ ᴀʟʟ ᴜꜱᴇʀꜱ. \r\n• /grp_broadcast - Tᴏ ʙʀᴏᴀᴅᴄᴀsᴛ ᴀ ᴍᴇssᴀɢᴇ ᴛᴏ ᴀʟʟ ᴄᴏɴɴᴇᴄᴛᴇᴅ ɢʀᴏᴜᴘs.\r\n• /gfilter - ᴛᴏ ᴀᴅᴅ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs. \r\n• /gfilters - ᴛᴏ ᴠɪᴇᴡ ʟɪsᴛ ᴏғ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs. \r\n• /delg - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀ sᴘᴇᴄɪғɪᴄ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ. \r\n• /request - ᴛᴏ sᴇɴᴅ ᴀ ᴍᴏᴠɪᴇ/sᴇʀɪᴇs ʀᴇᴏ̨ᴜᴇsᴛ ᴛᴏ ʙᴏᴛ ᴀᴅᴍɪɴs. ᴏɴʟʏ ᴡᴏʀᴋs ᴏɴ sᴜᴘᴘᴏʀᴛ ɢʀᴏᴜᴘ. <b>[Tʜɪs Cᴏᴍᴍᴀɴᴅ Cᴀɴ Bᴇ Usᴇᴅ Bʏ Aɴʏᴏɴᴇ]</b>\r\n• /delallg - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ ɢғɪʟᴛᴇʀs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.\r\n• /deletefiles - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴄᴀᴍʀɪᴘ ᴀɴᴅ ᴘʀᴇ-ᴅᴠᴅ ғɪʟᴇs ғʀᴏᴍ ᴛʜᴇ ʙᴏᴛ's ᴅᴀᴛᴀʙᴀsᴇ.\"\"\"\r\n\r\n STICKER_TXT = \"\"\"<b>yᴏᴜ ᴄᴀɴ ᴜꜱᴇ ᴛʜɪꜱ ᴍᴏᴅᴜʟᴇ ᴛᴏ ꜰɪɴᴅᴀɴy ꜱᴛɪᴄᴋᴇʀꜱ ɪᴅ.\r\n• ᴜꜱᴀɢᴇ :ᴛᴏ ɢᴇᴛ ꜱᴛɪᴄᴋᴇʀ\r\n \r\n⭕ ʜᴏᴡ ᴛᴏ ᴜꜱᴇ\r\n◉ Reply To Any Sticker [/stickerid]\r\n\r\n/𝐬𝐭𝐢𝐜𝐤𝐞𝐫𝐢𝐝 𝐬𝐭𝐢𝐜𝐤𝐞𝐫 𝐢𝐝\r\n\r\n</b>\"\"\"\r\n \r\n STATUS_TXT = \"\"\"<b>⍟─────[ <b>Bᴏᴛ Sᴛᴀᴛᴜs</b> ]─────⍟\r\n    \r\n★ ᴛᴏᴛᴀʟ ꜰɪʟᴇꜱ : <code>{}</code>\r\n★ ᴛᴏᴛᴀʟ ᴜꜱᴇʀꜱ : <code>{}</code>\r\n★ ᴛᴏᴛᴀʟ ɢʀᴏᴜᴘꜱ : <code>{}</code>\r\n★ ᴜꜱᴇᴅ ꜱᴛᴏʀᴀɢᴇ: <code>{}</code>\r\n★ ꜰʀᴇᴇ ꜱᴛᴏʀᴀɢᴇ : <code>{}</code>\r\n\r\n•❅──────✧❅✦❅✧──────❅•</b>\"\"\"\r\n\r\n\r\n LOG_TEXT_G = \"\"\"<b>#NewGroup\r\nGʀᴏᴜᴘ = {}(<code>{}</code>)\r\nTᴏᴛᴀʟ Mᴇᴍʙᴇʀs = <code>{}</code>\r\nAᴅᴅᴇᴅ Bʏ - {}</b>\"\"\"\r\n\r\n LOG_TEXT_P = \"\"\"<b>#NewUser\r\nID - <code>{}</code>\r\nNᴀᴍᴇ - {}</b>\"\"\"\r\n\r\n ALRT_TXT = \"\"\"<b>ʜᴇʟʟᴏ {},\r\nᴛʜɪꜱ ɪꜱ ɴᴏᴛ ʏᴏᴜʀ ᴍᴏᴠɪᴇ ʀᴇQᴜᴇꜱᴛ,\r\nʀᴇǫᴜᴇꜱᴛ ʏᴏᴜʀ'ꜱ...</b>\"\"\"\r\n\r\n OLD_ALRT_TXT = \"\"\"<b>ʜᴇʏ {},\r\nʏᴏᴜ ᴀʀᴇ ᴜꜱɪɴɢ ᴏɴᴇ ᴏꜰ ᴍʏ ᴏʟᴅ ᴍᴇꜱꜱᴀɢᴇꜱ, \r\nᴘʟᴇᴀꜱᴇ ꜱᴇɴᴅ ᴛʜᴇ ʀᴇǫᴜᴇꜱᴛ ᴀɢᴀɪɴ.</b>\"\"\"\r\n\r\n CUDNT_FND = \"\"\"<b>ɪ ᴄᴏᴜʟᴅɴ'ᴛ ꜰɪɴᴅ ᴀɴʏᴛʜɪɴɢ ʀᴇʟᴀᴛᴇᴅ ᴛᴏ {}\r\nᴅɪᴅ ʏᴏᴜ ᴍᴇᴀɴ ᴀɴʏ ᴏɴᴇ ᴏꜰ ᴛʜᴇꜱᴇ?</b>\"\"\"\r\n\r\n I_CUDNT = \"\"\"<b>sᴏʀʀʏ ɴᴏ ꜰɪʟᴇs ᴡᴇʀᴇ ꜰᴏᴜɴᴅ ꜰᴏʀ ʏᴏᴜʀ ʀᴇǫᴜᴇꜱᴛ {} 😕\r\n\r\nMᴏᴠɪᴇs Nᴏᴛ Aᴠᴀɪʟᴀʙʟᴇ Rᴇᴀsᴏɴ:\r\n𝟷. ᴏ.ᴛ.ᴛ ᴏʀ ᴅᴠᴅ ɴᴏᴛ ʀᴇʟᴇᴀsᴇᴅ\r\n𝟸. ᴛʏᴘᴇ ɴᴀᴍᴇ ᴡɪᴛʜ ʏᴇᴀʀ\r\n𝟹. ᴍᴏᴠɪᴇ ɪs ɴᴏᴛ ᴀᴠᴀɪʟᴀʙʟᴇ ɪɴ ᴛʜᴇ ᴅᴀᴛᴀʙᴀsᴇ ʀᴇᴘᴏʀᴛ ᴛᴏ ᴀᴅᴍɪɴs @TG_Bots_Supporter</b>\"\"\"\r\n\r\n I_CUD_NT = \"\"\"<b>ɪ ᴄᴏᴜʟᴅɴ'ᴛ ꜰɪɴᴅ ᴀɴʏ ᴍᴏᴠɪᴇ ʀᴇʟᴀᴛᴇᴅ ᴛᴏ {}.\r\nᴘʟᴇᴀꜱᴇ ᴄʜᴇᴄᴋ ᴛʜᴇ ꜱᴘᴇʟʟɪɴɢ ᴏɴ ɢᴏᴏɢʟᴇ ᴏʀ ɪᴍᴅʙ...</b>\"\"\"\r\n\r\n MVE_NT_FND = \"\"\"<b>ᴍᴏᴠɪᴇ ɴᴏᴛ ꜰᴏᴜɴᴅ ɪɴ ᴅᴀᴛᴀʙᴀꜱᴇ...</b>\"\"\"\r\n\r\n TOP_ALRT_MSG = \"\"\"<b>Cʜᴇᴄᴋɪɴɢ Fᴏʀ Mᴏᴠɪᴇ Iɴ Dᴀᴛᴀʙᴀsᴇ...</b>\"\"\"\r\n\r\n MELCOW_ENG = \"\"\"<b>Hᴇʟʟᴏ {} 😍, Aɴᴅ Wᴇʟᴄᴏᴍᴇ Tᴏ {} Gʀᴏᴜᴘ ❤️\r\n\r\n➻ ʜᴇʀᴇ ʏᴏᴜ ᴄᴀɴ ꜱᴇᴀʀᴄʜ ʏᴏᴜʀ ꜰᴀᴠᴏᴜʀɪᴛᴇ ᴍᴏᴠɪᴇꜱ ᴏʀ ꜱᴇʀɪᴇꜱ ʙʏ ᴊᴜꜱᴛ ᴛʏᴘɪɴɢ ɪᴛ'ꜱ ɴᴀᴍᴇ. \r\n\r\n⚠️ ɪꜰ ʏᴏᴜ ᴀʀᴇ ʜᴀᴠɪɴɢ ᴀɴʏ ᴘʀᴏʙʟᴇᴍ ʀᴇɢᴀʀᴅɪɴɢ ᴅᴏᴡɴʟᴏᴀᴅɪɴɢ ᴏʀ ꜱᴏᴍᴇᴛʜɪɴɢ ᴇʟꜱᴇ ᴛʜᴇɴ ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 👇</b>\"\"\"\r\n \r\n REQINFO = \"\"\"\r\n⚠ ɪɴꜰᴏʀᴍᴀᴛɪᴏɴ ⚠\r\n\r\nᴀꜰᴛᴇʀ 5 ᴍɪɴᴜᴛᴇꜱ ᴛʜɪꜱ ᴍᴇꜱꜱᴀɢᴇ ᴡɪʟʟ ʙᴇ ᴀᴜᴛᴏᴍᴀᴛɪᴄᴀʟʟʏ ᴅᴇʟᴇᴛᴇᴅ\r\n\r\nɪꜰ ʏᴏᴜ ᴅᴏ ɴᴏᴛ ꜱᴇᴇ ᴛʜᴇ ʀᴇǫᴜᴇsᴛᴇᴅ ᴍᴏᴠɪᴇ / sᴇʀɪᴇs ꜰɪʟᴇ, ʟᴏᴏᴋ ᴀᴛ ᴛʜᴇ ɴᴇxᴛ ᴘᴀɢᴇ\"\"\"\r\n\r\n \r\n\r\n SINFO = \"\"\"\r\n⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯\r\nꜱᴇʀɪᴇꜱ ʀᴇǫᴜᴇꜱᴛ ꜰᴏʀᴍᴀᴛ\r\n⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯⋯\r\n\r\nɢᴏ ᴛᴏ ɢᴏᴏɢʟᴇ ➠ ᴛʏᴘᴇ ꜱᴇʀɪᴇꜱ ɴᴀᴍᴇ ➠ ᴄᴏᴘʏ ᴄᴏʀʀᴇᴄᴛ ɴᴀᴍᴇ ➠ ᴘᴀꜱᴛᴇ ᴛʜɪꜱ ɢʀᴏᴜᴘ\r\n\r\nᴇxᴀᴍᴘʟᴇ : Loki S01E01\r\n\r\n🚯 ᴅᴏɴᴛ ᴜꜱᴇ ➠ ':(!,./)\"\"\"\r\n\r\n NORSLTS = \"\"\"\r\n★ #𝗡𝗼𝗥𝗲𝘀𝘂𝗹𝘁𝘀 ★\r\n\r\n𝗜𝗗 <b>: {}</b>\r\n\r\n𝗡𝗮𝗺𝗲 <b>: {}</b>\r\n\r\n𝗠𝗲𝘀𝘀𝗮𝗴𝗲 <b>: {}</b>🥲\"\"\"\r\n\r\n CAPTION = \"\"\" \r\n🗂 𝗙𝗶𝗹𝗲: <b><font class=smcp>{file_name}</font></b>\r\n📀 𝗦𝗶𝘇𝗲: <b><font class=smcp>{file_size}</font></b>\r\n\r\n<b>🔰 Cʀᴇᴀᴛᴏʀ : <a href=\"https://t.me/KUSHALHK\">𝐊𝐔𝐒𝐇𝐀𝐋</a>\r\n🔰 Cʜᴀɴɴᴇʟ : <a href=\"https://t.me/TG_LINKS_CHANNEL\">𝐌𝐎𝐕𝐈𝐄𝐒 𝐂𝐇𝐀𝐍𝐍𝐄𝐋</a>\r\n🔰 Gʀᴏᴜᴘ : <a href=\"https://t.me/movies_hub_official1\">𝐌𝐎𝐕𝐈𝐄 𝐑𝐄𝐐𝐔𝐄𝐒𝐓 𝐆𝐑𝐎𝐔𝐏</a></b>\"\"\"\r\n \r\n IMDB_TEMPLATE_TXT = \"\"\"\r\n<b>Query: {query}\r\nIMDb Data:\r\n\r\n🧿 𝐓𝐈𝐓𝐋𝐄: <a href={url}>{title}</a>\r\n🎭 𝐆𝐄𝐍𝐑𝐄𝐒: {genres}\r\n📆 𝐘𝐄𝐀𝐑: <a href={url}/releaseinfo>{year}</a>\r\n🌟 𝐑𝐀𝐓𝐈𝐍𝐆: <a href={url}/ratings>{rating}</a> / 10 (Based on {votes} user ratings)</b>\r\n☀️ 𝐋𝐀𝐍𝐆𝐔𝐀𝐆𝐄 : <code>{languages}</code></a>\r\n📀 𝐑𝐔𝐍𝐓𝐈𝐌𝐄: {runtime} Minutes</a>\r\n\r\n<b>👨‍💼 Requested by : {message.from_user.mention}</b>\"\"\"\r\n\r\n \r\n ALL_FILTERS = \"\"\"\r\n<b>Hᴇʏ {}, Tʜᴇsᴇ ᴀʀᴇ ᴍʏ ᴛʜʀᴇᴇ ᴛʏᴘᴇs ᴏғ ғɪʟᴛᴇʀs.</b>\"\"\"\r\n \r\n GFILTER_TXT = \"\"\"Hᴇʟᴘ : <b>Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs</b>\r\n \r\n◈ Gʟᴏʙᴀʟ Fɪʟᴛᴇʀs ᴀʀᴇ ᴛʜᴇ ғɪʟᴛᴇʀs sᴇᴛ ʙʏ ʙᴏᴛ ᴀᴅᴍɪɴs ᴡʜɪᴄʜ ᴡɪʟʟ ᴡᴏʀᴋ ᴏɴ ᴀʟʟ ɢʀᴏᴜᴘs.\r\n \r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /gfilter - Tᴏ ᴄʀᴇᴀᴛᴇ ᴀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.\r\n• /gfilters - Tᴏ ᴠɪᴇᴡ ᴀʟʟ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀs.\r\n• /delg - Tᴏ ᴅᴇʟᴇᴛᴇ ᴀ ᴘᴀʀᴛɪᴄᴜʟᴀʀ ɢʟᴏʙᴀʟ ғɪʟᴛᴇʀ.\r\n• /delallg - ᴛᴏ ᴅᴇʟᴇᴛᴇ ᴀʟʟ ɢʟᴏʙᴀʟ ꜰɪʟᴛᴇʀꜱ.\"\"\"\r\n \r\n FILE_STORE_TXT = \"\"\"Hᴇʟᴘ : <b>Fɪʟᴇ Sᴛᴏʀᴇ</b>\r\n \r\n◈ Fɪʟᴇ sᴛᴏʀᴇ ɪs ᴛʜᴇ ғᴇᴀᴛᴜʀᴇ ᴡʜɪᴄʜ ᴡɪʟʟ ᴄʀᴇᴀᴛᴇ ᴀ sʜᴀʀᴇᴀʙʟᴇ ʟɪɴᴋ ᴏғ ᴀ sɪɴɢʟᴇ ᴏʀ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.\r\n\r\n<b>Cᴏᴍᴍᴀɴᴅs Aɴᴅ Usᴀɢᴇ :</b>\r\n• /batch - ᴛᴏ ᴄʀᴇᴀᴛᴇ ᴀ ʙᴀᴛᴄʜ ʟɪɴᴋ ᴏғ ᴍᴜʟᴛɪᴘʟᴇ ғɪʟᴇs.\r\n• /link - ᴛᴏ ᴄʀᴇᴀᴛᴇ ᴀ sɪɴɢʟᴇ ғɪʟᴇ sᴛᴏʀᴇ ʟɪɴᴋ.\r\n• /pbatch - ᴊᴜsᴛ ʟɪᴋᴇ <code>/batch</code>, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇs ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴs.\r\n• /plink - ᴊᴜsᴛ ʟɪᴋᴇ <code>/link</code>, ʙᴜᴛ ᴛʜᴇ ғɪʟᴇ ᴡɪʟʟ ʙᴇ sᴇɴᴅ ᴡɪᴛʜ ғᴏʀᴡᴀʀᴅ ʀᴇsᴛʀɪᴄᴛɪᴏɴ.\"\"\"\r\n\r\n CHECK_TXT = \"\"\"\r\n<b>🔥 ᴄʜᴏᴏsᴇ ʏᴏᴜʀ sᴜɪᴛᴀʙʟᴇ ᴘʟᴀɴ ᴀɴᴅ ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ғᴇᴇs ᴜsɪɴɢ ᴀɴʏ ᴜᴘɪ ᴀᴘᴘ. \r\n\r\nᴘʟᴀɴ ᴀ : 𝟷 ᴡᴇᴇᴋ / ₹𝟷𝟻\r\nᴘʟᴀɴ ʙ : 𝟷 ᴍᴏɴᴛʜ / ₹𝟹𝟿\r\nᴘʟᴀɴ ᴄ : 𝟷 ʏᴇᴀʀ / ₹𝟹𝟼𝟶\r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n PLAN1_TXT = \"\"\"\r\n<b>🔥 ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴ ғᴇᴇs ₹𝟷𝟻 ғᴏʀ 𝟷 ᴡᴇᴇᴋ ᴘʀᴇᴍɪᴜᴍ ᴀᴄᴄᴇss ᴡɪᴛʜ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ ᴀɴᴅ ᴍᴀɴʏ ᴍᴏʀᴇ. \r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n PLAN2_TXT = \"\"\"\r\n<b>🔥 ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴ ғᴇᴇs ₹𝟹𝟿 ғᴏʀ 𝟷 ᴍᴏɴᴛʜ ᴘʀᴇᴍɪᴜᴍ ᴀᴄᴄᴇss ᴡɪᴛʜ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ ᴀɴᴅ ᴍᴀɴʏ ᴍᴏʀᴇ. \r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n PLAN3_TXT = \"\"\"\r\n<b>🔥 ᴘᴀʏ ʏᴏᴜʀ ᴘʀᴇᴍɪᴜᴍ ᴘʟᴀɴ ғᴇᴇs ₹𝟹𝟼𝟶 ғᴏʀ 𝟷 ʏᴇᴀʀ ᴘʀᴇᴍɪᴜᴍ ᴀᴄᴄᴇss ᴡɪᴛʜ ᴀᴅ-ғʀᴇᴇ ᴇxᴘᴇʀɪᴇɴᴄᴇ ᴀɴᴅ ᴍᴀɴʏ ᴍᴏʀᴇ. \r\n\r\n➻ ᴜᴘɪ ɪᴅ : harikushal234@paytm\r\n\r\n‼️ ᴍᴜsᴛ sᴇɴᴅ sᴄʀᴇᴇɴsʜᴏᴛ ᴀғᴛᴇʀ ᴘᴀʏᴍᴇɴᴛ ᴀɴᴅ ɢɪᴠᴇ ᴍᴇ sᴏᴍᴇ ᴛɪᴍᴇ ᴛᴏ ᴀᴅᴅ ʏᴏᴜ ɪɴ ᴛʜᴇ ᴘʀᴇᴍɪᴜᴍ ʟɪsᴛ.</b>\"\"\"\r\n\r\n RESTART_TXT = \"\"\"\r\n<b>Bᴏᴛ Rᴇsᴛᴀʀᴛᴇᴅ !\r\n\r\n📅 Dᴀᴛᴇ : <code>{}</code>\r\n⏰ Tɪᴍᴇ : <code>{}</code>\r\n🌐 Tɪᴍᴇᴢᴏɴᴇ : <code>Asia/Kolkata</code>\r\n🛠️ Bᴜɪʟᴅ Sᴛᴀᴛᴜs: <code>ᴠ𝟹.𝟶 [ Sᴛᴀʙʟᴇ ]</code></b>\"\"\"\r\n\r\n LOGO = \"\"\"\r\n ____ ___ ____ __ ____ ____ \r\n(_ _)/ __) ( _ \\ / \\(_ _)(__ )\r\n )( ( (_ \\ ) _ (( O ) )( / _/ \r\n (__) \\___/ (____/ \\__/ (__) (____)\"\"\"\r" } ]
from pyrogram import Client, filters, enums from pyrogram.types import InlineKeyboardButton, InlineKeyboardMarkup, CallbackQuery from pyrogram.errors.exceptions.bad_request_400 import MessageTooLong, PeerIdInvalid from info import ADMINS, LOG_CHANNEL, SUPPORT_CHAT, MELCOW_NEW_USERS, MELCOW_VID, CHNL_LNK, GRP_LNK from database.users_chats_db import db from database.ia_filterdb import Media from utils import get_size, temp, get_settings from Script import script from pyrogram.errors import ChatAdminRequired import asyncio
20,001
"""-----------------------------------------https://t.me/TG_LINKS_CHANNEL--------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url=f'https://t.me/{SUPPORT_CHAT}') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🔸 ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 🔹', url="https://t.me/TG_Bots_Supporter") ],[ InlineKeyboardButton('ᴄʜᴀɴɴᴇʟ', url=CHNL_LNK), InlineKeyboardButton('ɢʀᴏᴜᴘ', url=GRP_LNK) ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title} ❣️\n\nᴅᴏɴ'ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ. ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ǫᴜᴇꜱᴛɪᴏɴꜱ & ᴅᴏᴜʙᴛꜱ ᴀʙᴏᴜᴛ ᴜꜱɪɴɢ ᴍᴇ ᴄᴏɴᴛᴀᴄᴛ ꜰʀᴏᴍ ᴀᴅᴍɪɴ & ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 👇</b>", reply_markup=reply_markup) else:
"""-----------------------------------------https://t.me/TG_LINKS_CHANNEL--------------------------------------""" @Client.on_message(filters.new_chat_members & filters.group) async def save_group(bot, message): r_j_check = [u.id for u in message.new_chat_members] if temp.ME in r_j_check: if not await db.get_chat(message.chat.id): total=await bot.get_chat_members_count(message.chat.id) r_j = message.from_user.mention if message.from_user else "Anonymous" await bot.send_message(LOG_CHANNEL, script.LOG_TEXT_G.format(message.chat.title, message.chat.id, total, r_j)) await db.add_chat(message.chat.id, message.chat.title) if message.chat.id in temp.BANNED_CHATS: # Inspired from a boat of a banana tree buttons = [[ InlineKeyboardButton('Support', url=f'https://t.me/{SUPPORT_CHAT}') ]] reply_markup=InlineKeyboardMarkup(buttons) k = await message.reply( text='<b>CHAT NOT ALLOWED 🐞\n\nMy admins has restricted me from working here ! If you want to know more about it contact support..</b>', reply_markup=reply_markup, ) try: await k.pin() except: pass await bot.leave_chat(message.chat.id) return buttons = [[ InlineKeyboardButton('🔸 ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 🔹', url="https://t.me/TG_Bots_Supporter") ],[ InlineKeyboardButton('ᴄʜᴀɴɴᴇʟ', url=CHNL_LNK), InlineKeyboardButton('ɢʀᴏᴜᴘ', url=GRP_LNK) ]] reply_markup=InlineKeyboardMarkup(buttons) await message.reply_text( text=f"<b>ᴛʜᴀɴᴋ ʏᴏᴜ ꜰᴏʀ ᴀᴅᴅɪɴɢ ᴍᴇ ɪɴ {message.chat.title} ❣️\n\nᴅᴏɴ'ᴛ ꜰᴏʀɢᴇᴛ ᴛᴏ ᴍᴀᴋᴇ ᴍᴇ ᴀᴅᴍɪɴ. ɪꜰ ʏᴏᴜ ʜᴀᴠᴇ ᴀɴʏ ǫᴜᴇꜱᴛɪᴏɴꜱ & ᴅᴏᴜʙᴛꜱ ᴀʙᴏᴜᴛ ᴜꜱɪɴɢ ᴍᴇ ᴄᴏɴᴛᴀᴄᴛ ꜰʀᴏᴍ ᴀᴅᴍɪɴ & ᴍᴇꜱꜱᴀɢᴇ ʜᴇʀᴇ 👇</b>", reply_markup=reply_markup) else:
settings = await get_settings(message.chat.id)
11
2023-11-03 12:21:26+00:00
24k
apple/ml-reed
reed/algorithms/pebble.py
[ { "identifier": "utils", "path": "BPref/utils.py", "snippet": "def make_env(cfg):\ndef ppo_make_env(env_id, seed):\ndef tie_weights(src, trg):\ndef make_metaworld_env(cfg):\ndef ppo_make_metaworld_env(env_id, seed):\n def __init__(self, *models):\n def __enter__(self):\n def __exit__(self, *args):\n def __init__(self, *models):\n def __enter__(self):\n def __exit__(self, *args):\ndef soft_update_params(net, target_net, tau):\ndef set_seed_everywhere(seed):\ndef make_dir(*path_parts):\ndef weight_init(m):\n def __init__(self,\n input_dim,\n hidden_dim,\n output_dim,\n hidden_depth,\n output_mod=None):\n def forward(self, x):\n def __init__(self, cache_size=1):\n def atanh(x):\n def __eq__(self, other):\n def _call(self, x):\n def _inverse(self, y):\n def log_abs_det_jacobian(self, x, y):\n def __init__(self, loc, scale):\n def mean(self):\n def __init__(self, epsilon=1e-4, shape=(), device=None):\n def update(self, x):\n def update_from_moments(self, batch_mean, batch_var, batch_count):\n def std(self):\ndef update_mean_var_count_from_moments(\n mean, var, count, batch_mean, batch_var, batch_count\n):\ndef mlp(input_dim, hidden_dim, output_dim, hidden_depth, output_mod=None):\ndef to_np(t):\nclass eval_mode(object):\nclass train_mode(object):\nclass MLP(nn.Module):\nclass TanhTransform(pyd.transforms.Transform):\nclass SquashedNormal(pyd.transformed_distribution.TransformedDistribution):\nclass TorchRunningMeanStd:\n M2 = m_a + m_b + torch.pow(delta, 2) * count * batch_count / tot_count" }, { "identifier": "Logger", "path": "BPref/logger.py", "snippet": "class Logger(object):\n def __init__(self,\n log_dir,\n save_tb=False,\n log_frequency=10000,\n agent='sac'):\n self._log_dir = log_dir\n self._log_frequency = log_frequency\n if save_tb:\n tb_dir = os.path.join(log_dir, 'tb')\n if os.path.exists(tb_dir):\n try:\n shutil.rmtree(tb_dir)\n except:\n print(\"logger.py warning: Unable to remove tb directory\")\n pass\n self._sw = SummaryWriter(tb_dir)\n else:\n self._sw = None\n # each agent has specific output format for training\n assert agent in AGENT_TRAIN_FORMAT\n train_format = COMMON_TRAIN_FORMAT + AGENT_TRAIN_FORMAT[agent]\n self._train_mg = MetersGroup(os.path.join(log_dir, 'train'),\n formating=train_format)\n self._eval_mg = MetersGroup(os.path.join(log_dir, 'eval'),\n formating=COMMON_EVAL_FORMAT)\n\n def _should_log(self, step, log_frequency):\n log_frequency = log_frequency or self._log_frequency\n return step % log_frequency == 0\n\n def _try_sw_log(self, key, value, step):\n if self._sw is not None:\n self._sw.add_scalar(key, value, step)\n\n def _try_sw_log_video(self, key, frames, step):\n if self._sw is not None:\n frames = torch.from_numpy(np.array(frames))\n frames = frames.unsqueeze(0)\n self._sw.add_video(key, frames, step, fps=30)\n\n def _try_sw_log_histogram(self, key, histogram, step):\n if self._sw is not None:\n self._sw.add_histogram(key, histogram, step)\n\n def log(self, key, value, step, n=1, log_frequency=1):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n if type(value) == torch.Tensor:\n value = value.item()\n self._try_sw_log(key, value / n, step)\n mg = self._train_mg if key.startswith('train') else self._eval_mg\n mg.log(key, value, n)\n\n def log_param(self, key, param, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n self.log_histogram(key + '_w', param.weight.data, step)\n if hasattr(param.weight, 'grad') and param.weight.grad is not None:\n self.log_histogram(key + '_w_g', param.weight.grad.data, step)\n if hasattr(param, 'bias') and hasattr(param.bias, 'data'):\n self.log_histogram(key + '_b', param.bias.data, step)\n if hasattr(param.bias, 'grad') and param.bias.grad is not None:\n self.log_histogram(key + '_b_g', param.bias.grad.data, step)\n\n def log_video(self, key, frames, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_video(key, frames, step)\n\n def log_histogram(self, key, histogram, step, log_frequency=None):\n if not self._should_log(step, log_frequency):\n return\n assert key.startswith('train') or key.startswith('eval')\n self._try_sw_log_histogram(key, histogram, step)\n\n def dump(self, step, save=True, ty=None):\n if ty is None:\n self._train_mg.dump(step, 'train', save)\n self._eval_mg.dump(step, 'eval', save)\n elif ty == 'eval':\n self._eval_mg.dump(step, 'eval', save)\n elif ty == 'train':\n self._train_mg.dump(step, 'train', save)\n else:\n raise f'invalid log type: {ty}'" }, { "identifier": "TrajectoryReplayBuffer", "path": "BPref/replay_buffer.py", "snippet": "class TrajectoryReplayBuffer:\n \"\"\"\n Buffer to store trajectories of environment transitions. Unlike ReplayBuffer, which stores all transitions in a\n flat manner, transitions are sorted by trajectory. Each trajectory corresponds to an episode.\n \"\"\"\n _RELABEL_BATCH_SIZE = 256\n\n def __init__(self, capacity: int, device: torch.device, window: int = 1, num_envs: t.Optional[int] = None,\n image_observations: t.Optional[t.Union[int, np.ndarray]] = None):\n \"\"\"\n Args:\n capacity: the number of trajectories to hold in memory\n device: the device sampled transitions should be put on\n window: no idea - part of the original code and is used in add_batch(...) which has not yet been refactored\n num_envs: the number of environment instances used to train the policy. Only needs to be specified when the\n number is >1. Some algorithms train on multiple instances of an environment at once, e.g. PPO.\n Not currently used, but not yet removed because we have not tested with an algorithm that needs\n multiple environment instances.\n image_observations: (default = false) whether to collect image observations in addition to state\n observations. This is helpful to use when the policy is trained on the state, but you\n want to visualize the trajectories or the reward model is trained on images.\n\n \"\"\"\n self.capacity = capacity\n self.device = device\n\n self.observations: t.Optional[np.ndarray] = None\n self.actions: t.Optional[np.ndarray] = None\n self.rewards: t.Optional[np.ndarray] = None\n self.not_dones: t.Optional[np.ndarray] = None\n self.not_dones_no_max: t.Optional[np.ndarray] = None\n self.trajectory_lengths: t.List = []\n self.window = window\n self.env_rewards: t.Optional[np.ndarray] = None\n self.image_observations: t.Optional[np.ndarray] = None\n # track whether to collect image observations - when not None, specifies the dimensions of the images\n self._collect_image_observations = image_observations\n\n # track the trajectories as a list of Trajectory\n self.trajectories: t.List[Trajectory] = []\n\n self.idx = 0\n self.last_save = 0\n self.full = False\n\n def __len__(self):\n return np.sum(self.trajectory_lengths) - len(self.trajectory_lengths)\n\n def __getitem__(self, flat_indx: t.Union[int, t.Tuple[int, int], t.List[int]]) -> TRANSITION:\n \"\"\"\n Get the transition at the given index\n\n Args:\n flat_indx: the index assuming transitions are stored flat instead of nested in trajectories\n - when an integer is specified, a single transition is retrieved\n - when a tuple of integers is given, a slice is retrieved as if the transitions are stored flat\n\n Returns:\n current observation\n action\n reward\n next observation\n whether the episode ended\n whether the episode ended without reaching max steps\n image version of current observation (optional)\n \"\"\"\n if isinstance(flat_indx, int) or isinstance(flat_indx, np.int64):\n traj_indx, trans_indx = self._flat_indx_to_trajectory_index(flat_indx)\n # check we are grabbing from a trajectory currently being accumulated\n # When the done signal is given, the current trajectory being accumulated is converted to a trajectory,\n # is added to the list of trajectories, and the values used to accumulate the next trajectory are set to\n # done. The next trajectory is not started until the call to add(...) after the done signal is received.\n # Therefore, we need to check whether the trajectory to pull from is actually the last completed trajectory\n # prior to starting a new trajectory. This is why we compare the length of the lists containing trajectory\n # lengths and the list containing the trajectories.\n if (traj_indx == len(self.trajectory_lengths) - 1\n and len(self.trajectory_lengths) > len(self.trajectories)):\n # we need to grab from the trajectory currently being populated\n return (self.observations[trans_indx].astype(np.float32), self.actions[trans_indx].astype(np.float32),\n self.rewards[trans_indx].astype(np.float32), self.observations[trans_indx + 1].astype(np.float32),\n self.not_dones[trans_indx].astype(np.float32),\n self.not_dones_no_max[trans_indx].astype(np.float32),\n (self.env_rewards[trans_indx].astype(np.float32)\n if self.env_rewards is not None\n else None),\n ((self.image_observations[trans_indx].astype(np.float32))\n if self.image_observations is not None\n else None),\n ((self.image_observations[trans_indx+1].astype(np.float32))\n if self.image_observations is not None\n else None))\n else:\n # grab from a previously completed trajectory\n transition: Transition = self.trajectories[traj_indx][trans_indx]\n return (transition.observation.astype(np.float32), transition.action.astype(np.float32),\n transition.reward.astype(np.float32), transition.next_observation.astype(np.float32),\n transition.not_done.astype(np.float32), transition.not_done_no_max.astype(np.float32),\n transition.env_reward.astype(np.float32),\n (transition.image_observation.astype(np.float32)\n if transition.image_observation is not None\n else None),\n (transition.next_image_observation.astype(np.float32)\n if transition.next_image_observation is not None\n else None))\n elif isinstance(flat_indx, t.List):\n observations = []\n actions = []\n rewards = []\n next_observations = []\n not_dones = []\n not_dones_no_max = []\n env_rewards = []\n image_observations = []\n next_image_observations = []\n for indx in flat_indx:\n observation, action, reward, next_observation, not_done, not_done_no_max, env_reward, image_observation, next_image_observation = self[indx]\n observations.append(observation)\n actions.append(action)\n rewards.append(reward)\n next_observations.append(next_observation)\n not_dones.append(not_done)\n not_dones_no_max.append(not_done_no_max)\n if env_reward is not None:\n env_rewards.append(env_reward)\n if image_observation is not None:\n image_observations.append(image_observation)\n if next_image_observation is not None:\n next_image_observations.append(next_image_observation)\n return (np.asarray(observations, dtype=np.float32), np.asarray(actions, dtype=np.float32),\n np.asarray(rewards, dtype=np.float32), np.asarray(next_observations, dtype=np.float32),\n np.asarray(not_dones, dtype=np.float32), np.asarray(not_dones_no_max, dtype=np.float32),\n (np.asarray(env_rewards, dtype=np.float32) if len(env_rewards) > 0 else None),\n (np.asarray(image_observations, dtype=np.float32) if self._collect_image_observations else None),\n (np.asarray(next_image_observations, dtype=np.float32) if self._collect_image_observations else None))\n else:\n # get the locations of the start and end transitions\n start_traj_indx, start_trans_indx = self._flat_indx_to_trajectory_index(flat_indx[0])\n end_traj_indx, end_trans_indx = self._flat_indx_to_trajectory_index(flat_indx[1])\n # check that we are not spanning trajectories\n if start_traj_indx == end_traj_indx:\n # grab the sub-trajectory\n sub_trajectory = self.trajectories[start_traj_indx][tuple((start_trans_indx, end_trans_indx))]\n else:\n # grab what remains of the trajectory\n end_trans_indx = len(self.trajectories[start_traj_indx]) - 1\n sub_trajectory = self.trajectories[start_traj_indx][tuple((start_trans_indx, end_trans_indx))]\n return (sub_trajectory.initial_observations,\n sub_trajectory.actions,\n sub_trajectory.rewards,\n sub_trajectory.next_observations,\n sub_trajectory.not_dones,\n sub_trajectory.not_dones_no_max,\n sub_trajectory.env_rewards,\n (sub_trajectory.initial_image_observations\n if sub_trajectory.initial_image_observations is not None\n else None),\n (sub_trajectory.next_image_observations\n if sub_trajectory.next_image_observations is not None\n else None))\n\n @property\n def trajectory_count(self) -> int:\n \"\"\"\n The number of trajectories in the buffer\n \"\"\"\n return len(self.trajectories)\n\n @property\n def all_not_dones(self) -> np.ndarray:\n \"\"\"\n Rewards from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.not_dones, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_rewards(self) -> np.ndarray:\n \"\"\"\n Rewards from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.rewards, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_environment_rewards(self) -> np.ndarray:\n \"\"\"\n Environment rewards from all trajectories and all transitions\n \"\"\"\n return np.concatenate([np.expand_dims(traj.rewards, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_initial_image_observations(self) -> np.ndarray:\n \"\"\"\n Image observations from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.initial_image_observations, axis=0)\n for traj in self.trajectories],\n axis=0)\n\n @property\n def all_next_image_observations(self) -> np.ndarray:\n \"\"\"\n Image observations from the state-action pairs from all trajectories and all transitions,\n\n The result of a transition\n \"\"\"\n return np.concatenate([np.expand_dims(traj.next_image_observations, axis=0)\n for traj in self.trajectories],\n axis=0)\n\n @property\n def all_initial_observations(self) -> np.ndarray:\n \"\"\"\n observations from the state-action pairs from all trajectories and all transitions, where the action was taken in the state\n \"\"\"\n return np.concatenate([np.expand_dims(traj.initial_observations, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_next_observations(self) -> np.ndarray:\n \"\"\"\n Observations from the state-action pairs from all trajectories and all transitions\n\n The result of a transition\n \"\"\"\n return np.concatenate([np.expand_dims(traj.next_observations, axis=0) for traj in self.trajectories], axis=0)\n\n @property\n def all_actions(self) -> np.ndarray:\n \"\"\"\n Actions from the state-action pairs from all trajectories and all transitions\n \"\"\"\n return np.concatenate([np.expand_dims(traj.actions, axis=0) for traj in self.trajectories], axis=0)\n\n def _flat_indx_to_trajectory_index(self, flat_indx: int) -> t.Tuple[int, int]:\n \"\"\"\n Converts an index that assumes the transitions are flat to a trajectory and transition (w/in trajectory) index\n\n Args:\n flat_indx: the index assuming transitions are stored flat\n\n Returns:\n the index of the trajectory containing the transition\n the index of the transition within the trajectory\n \"\"\"\n # need to figure out which transition indices are stored in which trajectories\n transition_cumulative_sum = np.cumsum(self.trajectory_lengths)\n # the trajectory containing the transition is at the first index where the cumulative sum of transitions is\n # less than the transition index\n target_trajectory_indx = int(np.argmax(flat_indx < transition_cumulative_sum))\n # get the transition's index within the trajectory as the different between the flat index and the cumulative\n # sum at the previous trajectory - tells us how far into the target trajectory the transition is\n if target_trajectory_indx == 0:\n transition_trajectory_indx = flat_indx\n else:\n transition_trajectory_indx = flat_indx - transition_cumulative_sum[target_trajectory_indx - 1]\n return target_trajectory_indx, transition_trajectory_indx\n\n def _add_transition(self, observation: np.ndarray, action: np.ndarray, reward: float, done: t.Union[float, bool],\n done_no_max: t.Union[float, bool],\n env_reward: t.Optional[float] = None, image_observations: t.Optional[np.ndarray] = None):\n \"\"\"\n Track the transition and update the length of the trajectory currently being accumulated\n\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observations: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n \"\"\"\n self.observations = np.concatenate([self.observations, np.expand_dims(observation, axis=0)], axis=0)\n self.actions = np.concatenate([self.actions, np.expand_dims(action, axis=0)], axis=0)\n self.rewards = np.concatenate([self.rewards, np.asarray(reward).reshape(1, 1)], axis=0)\n if type(done) is float:\n self.not_dones = np.concatenate([self.not_dones,\n np.asarray(not done, dtype=np.float32).reshape(1, 1)], axis=0)\n self.not_dones_no_max = np.concatenate([self.not_dones_no_max,\n np.asarray(not done_no_max, dtype=np.float32).reshape(1, 1)],\n axis=0)\n else:\n self.not_dones = np.concatenate([self.not_dones,\n np.asarray(~done, dtype=np.float32).reshape(1, 1)], axis=0)\n self.not_dones_no_max = np.concatenate([self.not_dones_no_max,\n np.asarray(~done_no_max, dtype=np.float32).reshape(1, 1)],\n axis=0)\n\n self.trajectory_lengths[-1] += 1\n if env_reward is not None:\n self.env_rewards = np.concatenate([self.env_rewards,\n np.asarray(env_reward, dtype=np.float32).reshape(1, 1)], axis=0)\n\n if image_observations is not None and self._collect_image_observations:\n self.image_observations = np.concatenate([self.image_observations, np.expand_dims(image_observations, axis=0)], axis=0)\n\n def _start_trajectory(self, observation: np.ndarray,\n action: np.ndarray,\n reward: float,\n done: t.Union[float, bool],\n done_no_max: t.Union[float, bool],\n env_reward: t.Optional[float] = None,\n image_observations: t.Optional[np.ndarray] = None):\n \"\"\"\n Start a new trajectory and track the transition\n\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observations: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n \"\"\"\n self.observations = np.expand_dims(observation, axis=0).astype(dtype=np.float32)\n self.actions = np.expand_dims(action, axis=0).astype(dtype=np.float32)\n self.rewards = np.asarray(reward, dtype=np.float32).reshape(1, 1)\n if type(done) is float:\n self.not_dones = np.asarray(not done, dtype=np.float32).reshape(1, 1)\n self.not_dones_no_max = np.asarray(not done_no_max, dtype=np.float32).reshape(1, 1)\n else:\n self.not_dones = np.asarray(~done, dtype=np.float32).reshape(1, 1)\n self.not_dones_no_max = np.asarray(~done_no_max, dtype=np.float32).reshape(1, 1)\n\n self.trajectory_lengths.append(1)\n\n if env_reward is not None:\n self.env_rewards = np.asarray(env_reward, dtype=np.float32).reshape(1, 1)\n\n if image_observations is not None and self._collect_image_observations:\n self.image_observations = np.expand_dims(image_observations, axis=0).astype(dtype=np.float32)\n\n def add(self, observation, action, reward, next_observation, done, done_no_max,\n env_reward: t.Optional[float] = None, image_observation: t.Optional[np.ndarray] = None,\n image_next_observation: t.Optional[np.ndarray] = None):\n \"\"\"\n Args:\n observation: the current observation\n action: the action taken in the current state\n reward: the reward associated with the last state-action pait\n next_observation: only used when an episode is completed to ensure the last observation is captured\n done: whether the last action completed an episode\n done_no_max: whether the last action completed an episode without reaching the maximum allowed steps\n env_reward: (optional) the reward given by the environment - stored and used to train the preference-learned\n reward model when learning from synthetic feedback\n image_observation: (optional) image-based observation -> should not be given is observations is also an image. This\n should be used when you want to accumulate images separately from policy training.\n image_next_observation: (optional) the image-based next observation -> should not be given when next_observation is also\n and image. This should be used when you want to accumulate the images separately from the\n trained policy.\n \"\"\"\n if self.observations is None:\n self._start_trajectory(observation, action, reward, done, done_no_max, env_reward, image_observation)\n elif done:\n self._add_transition(observation, action, reward, done, done_no_max, env_reward, image_observation)\n # the episode has ended, so we need to track the next observation\n self.observations = np.concatenate([self.observations, np.expand_dims(next_observation, axis=0)], axis=0)\n if image_next_observation is not None:\n self.image_observations = np.concatenate([self.image_observations,\n np.expand_dims(image_next_observation, axis=0)], axis=0)\n # create the trajectory\n self.trajectories.append(Trajectory(self.observations.astype(dtype=np.float32),\n (self.image_observations.astype(dtype=np.float32)\n if self.image_observations is not None\n else None),\n actions=self.actions.astype(dtype=np.float32),\n rewards=self.rewards.astype(dtype=np.float32),\n not_dones=self.not_dones.astype(dtype=np.float32),\n not_dones_no_max=self.not_dones_no_max.astype(dtype=np.float32),\n env_rewards=self.env_rewards.astype(dtype=np.float32)))\n # check if the inclusion of the just completed trajectory puts the buffer at capacity\n # if it does, remove the first trajectory as this is a FIFO buffer\n if np.sum(self.trajectory_lengths) >= self.capacity:\n self.trajectories = self.trajectories[1:]\n self.trajectory_lengths = self.trajectory_lengths[1:]\n self.observations = None\n self.actions = None\n self.rewards = None\n self.not_dones = None\n self.not_dones_no_max = None\n self.env_rewards = None\n self.image_observations = None\n else:\n self._add_transition(observation, action, reward, done, done_no_max, env_reward, image_observation)\n\n self.idx = (self.idx + 1) % self.capacity\n self.full = self.full or self.idx == 0\n\n def relabel_with_predictor(self, predictor, state_action_formatter: PreProcessInference):\n \"\"\"\n Relabel the rewards stored in the replay buffer using the given predictor\n\n Args:\n predictor: network that will consume state-action pairs and assign a reward\n state_action_formatter: formats the states and actions for consumption by the reward model\n \"\"\"\n print(\"Relabelling the replay buffer with the updated reward model.\")\n for trajectory in self.trajectories:\n # the number of batches to run through the model\n total_iter = int(len(trajectory) / self._RELABEL_BATCH_SIZE)\n # handle the case where we have more transitions than is evenly divisible by the batch size\n if len(trajectory) > self._RELABEL_BATCH_SIZE * total_iter:\n total_iter += 1\n # collect and process each batch to be passed through predictor\n for index in range(total_iter):\n start_indx = index * self._RELABEL_BATCH_SIZE\n # make sure we don't have an end index that is after the end of the trajectory\n end_indx = min((index + 1) * self._RELABEL_BATCH_SIZE, len(trajectory))\n\n # pull out the actions from the transitions that will be relabelled\n actions = trajectory.actions[start_indx:end_indx]\n # we need to handle the case where the reward model operates off of images\n if predictor.image_observations:\n observations = trajectory.all_image_observations[start_indx:end_indx]\n else:\n observations = trajectory.all_observations[start_indx:end_indx]\n formatted_state_action = state_action_formatter.format_state_action(observations, actions, batch_sa=True)\n pred_reward = predictor.r_hat_batch(formatted_state_action)\n # update the rewards assigned to the transitions\n trajectory.rewards[start_indx:end_indx] = pred_reward\n\n def sample(self, batch_size: int):\n indxs = list(np.random.randint(0, np.sum(self.trajectory_lengths) - 1, size=batch_size))\n observations, actions, rewards, next_observations, not_dones, not_dones_no_max, env_rewards, image_observations, next_image_observations = self[indxs]\n observations = torch.as_tensor(observations, device=self.device).float()\n actions = torch.as_tensor(actions, device=self.device)\n rewards = torch.as_tensor(rewards, device=self.device)\n next_observations = torch.as_tensor(next_observations, device=self.device).float()\n not_dones = torch.as_tensor(not_dones, device=self.device)\n not_dones_no_max = torch.as_tensor(not_dones_no_max, device=self.device)\n env_rewards = torch.as_tensor(env_rewards, device=self.device)\n image_observations = (torch.as_tensor(image_observations, device=self.device).float() if self._collect_image_observations else None)\n next_image_observations = (torch.as_tensor(next_image_observations, device=self.device).float() if self._collect_image_observations else None)\n return observations, actions, rewards, next_observations, not_dones, not_dones_no_max, env_rewards, image_observations, next_image_observations\n\n def sample_state_ent(self, batch_size: int):\n observations, actions, rewards, next_observations, not_dones, not_dones_no_max, _, _, _ = self.sample(batch_size)\n full_observation = torch.as_tensor(np.concatenate([traj.all_observations for traj in self.trajectories], axis=0),\n device=self.device)\n return observations, full_observation, actions, rewards, next_observations, not_dones, not_dones_no_max\n\n def save(self, out_directory: Path, env_id: str, step: int):\n \"\"\"\n Save the replay buffer to disk as a npz archive\n Args:\n out_directory: location where replay buffer will be saved\n env_id: the environment within which the data was generated\n step: the number of policy training steps taken to produce this dataset\n \"\"\"\n # create the ZipFile object\n zip_obj = ZipFile(out_directory / f\"{env_id}_replay_buffer_{step}.zip\", \"w\")\n\n # write each trajectory file to disk and to the zip archive\n for traj_id, trajectory in enumerate(self.trajectories):\n trajectory.save(out_directory / f\"{traj_id}.npz\")\n zip_obj.write(out_directory / f\"{traj_id}.npz\")\n # close the Zip File\n zip_obj.close()\n\n @staticmethod\n def from_directory(directory_path: Path,\n device: torch.device = 'cuda') -> \"TrajectoryReplayBuffer\":\n \"\"\"\n Create a TrajectoryReplay buffer from a directory of npz archive trajectories\n\n Args:\n directory_path: the location of the npz_archive on disk\n device: the device sampled transitions should be pushed to\n Returns:\n populated trajectory replay buffer\n \"\"\"\n # accumulate the trajectories\n trajectories = []\n trajectory_lengths = []\n # determine how many transitions are in the replay buffer\n capacity = 0\n # load each trajectory from disk\n for traj_filename in directory_path.iterdir():\n # we only load data from npz archives, so we need to skip anything else\n if not traj_filename.suffix == \".npz\": continue\n # load the trajectory from disk\n traj = Trajectory.from_npz(traj_filename)\n # track the trajectory\n trajectories.append(traj)\n # track the trajectory's length\n trajectory_lengths.append(len(traj))\n # track the trajectory's length\n capacity += len(traj)\n # create the buffer\n _buffer = TrajectoryReplayBuffer(capacity=capacity, device=device)\n # add the trajectories to the buffer\n _buffer.trajectories = trajectories\n _buffer.trajectory_lengths = trajectory_lengths\n\n return _buffer" }, { "identifier": "StateActionRewardModel", "path": "reed/models/reward_model.py", "snippet": "class StateActionRewardModel:\n \"\"\"\n Reward model that operates over state action pairs\n \"\"\"\n def __init__(self,\n in_dim: t.Union[int, t.List[int]],\n ensemble_size: int = 3,\n hidden_dim: int = 256,\n hidden_layers: int = 3,\n final_activation: str = 'tanh',\n lr: float = 3e-4,\n optimizer: str = \"adam\",\n reward_train_batch: int = 128,\n size_segment: int = 1,\n device: torch.device = \"cuda\",\n multi_gpu: bool = False,\n image_observations: bool = False,\n image_encoder_architecture: str = \"pixl2r\",\n image_hidden_num_channels: int = 32,\n grayscale_images: bool = True):\n # the device the model will be put on\n self.device = device\n # whether data parallelism should be used during model training\n self.multi_gpu = multi_gpu\n # reward model configuration\n self.in_dim = in_dim\n self.hidden_dim = hidden_dim\n self.hidden_layers = hidden_layers\n self.ensemble_size = ensemble_size\n self.lr = lr\n self.optimizer_type = optimizer\n self.ensemble = []\n self.paramlst = []\n self.optimizer = None\n self.model = None\n self.final_activation = final_activation\n self.size_segment = size_segment\n\n self.image_observations = image_observations\n self.image_encoder_architecture = image_encoder_architecture\n self.image_hidden_num_channels = image_hidden_num_channels\n self.grayscale_images = grayscale_images\n\n # construct the reward ensemble\n self.construct_ensemble()\n\n # parameters used to train the reward model on the preference labelled trajectories\n self.train_batch_size = reward_train_batch\n self.CEloss = nn.CrossEntropyLoss()\n\n def eval(self):\n \"\"\"Set each reward model in the ensemble to evaluation mode\"\"\"\n self.ensemble = [net.eval() for net in self.ensemble]\n\n def train(self):\n \"\"\"Set each reward model in the ensemble to train mode\"\"\"\n self.ensemble = [net.train() for net in self.ensemble]\n\n def softXEnt_loss(self, predicted: torch.Tensor, target: torch.Tensor):\n logprobs = F.log_softmax(predicted, dim=1)\n return -(target * logprobs).sum() / predicted.shape[0]\n\n def construct_ensemble(self):\n for _ in range(self.ensemble_size):\n if self.image_observations:\n model = ImageStateActionNetwork(self.in_dim,\n out_size=1,\n hidden_dim=self.hidden_dim,\n hidden_depth=self.hidden_layers,\n final_activation=self.final_activation,\n image_encoder_architecture=self.image_encoder_architecture,\n image_hidden_num_channels=self.image_hidden_num_channels).float()\n else:\n model = StateActionNetwork(self.in_dim,\n out_size=1,\n hidden_dim=self.hidden_dim,\n hidden_depth=self.hidden_layers,\n final_activation=self.final_activation).float()\n print(model)\n # check if the model will be run with Data Parallelism\n if self.multi_gpu:\n print(f\"There are {torch.cuda.device_count()} GPU devices, so the reward ensemble WILL be trained \"\n f\"using nn.DataParallel\")\n self.ensemble.append(nn.DataParallel(model).to(self.device))\n else:\n print(f\"There are {torch.cuda.device_count()} GPU devices, so the reward ensemble will NOT be trained \"\n f\"using nn.DataParallel\")\n self.ensemble.append(model.to(self.device))\n # track all model parameters\n self.paramlst.extend(model.parameters())\n # create a single optimizer applied to all ensemble members\n if self.optimizer_type == \"adam\":\n self.optimizer = torch.optim.Adam(self.paramlst, lr=self.lr)\n elif self.optimizer_type == \"sgd\":\n self.optimizer = torch.optim.SGD(self.paramlst, lr=self.lr)\n else:\n raise NotImplementedError(f\"{self.optimizer_type} is not implemented as a reward optimizer and must be \"\n f\"one of 'adam' or 'sgd'.\")\n\n def format_state(self, obs: np.ndarray, batch_states: bool = False, by_trajectory: bool = False):\n \"\"\"\n Args:\n obs: the state observations\n batch_states: whether a batch of state is to be processed\n by_trajectory: whether the batch of states is structured by trajectory -> should only be\n True when batch_sa=True\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n # check if the images needs to be converted to grayscale\n if self.grayscale_images:\n obs = _to_grayscale(obs, batch_states=batch_states)\n if batch_states:\n # permute the input so that the channels are in the first dimension\n if by_trajectory:\n obs = np.transpose(obs, (0, 1, 4, 2, 3))\n else:\n print(obs.shape)\n obs = np.transpose(obs, (0, 3, 1, 2))\n return obs\n else:\n # permute the input so that the channels are in the first dimension\n obs = np.transpose(obs, (2, 0, 1))\n # add a dimension along the front for concatenation into the buffer\n return obs.reshape(1, *obs.shape)\n else:\n return obs.reshape(1, obs.shape[1:]) if batch_states else obs.reshape(1, obs.shape[0])\n\n def format_state_action(self, obs: np.ndarray, act: np.ndarray,\n batch_sa: bool = False, by_trajectory: bool = False) -> np.ndarray:\n \"\"\"\n Args:\n obs: the state observations\n act: the actions associated with each state observation\n batch_sa: whether a batch of state-action pairs is to be processed\n by_trajectory: whether the batch of state-action pairs is structured by trajectory -> should only be\n True when batch_sa=True\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n # check if the images needs to be converted to grayscale\n if self.grayscale_images:\n obs = _to_grayscale(obs, batch_states=batch_sa)\n if batch_sa:\n obs_dim = obs.shape[1:]\n # we concatenate the actions along channel dimension of the image\n if by_trajectory:\n repeated_actions = np.tile(act.reshape((act.shape[0], act.shape[1], 1, 1, act.shape[-1])),\n (1, 1, obs_dim[0], obs_dim[1], 1))\n else:\n repeated_actions = np.tile(act.reshape((act.shape[0], 1, 1, act.shape[-1])),\n (1, obs_dim[0], obs_dim[1], 1))\n # now concatenate the two\n sa_t = np.concatenate((obs, repeated_actions), axis=-1)\n # permute the input so that the channels are in the first dimension\n if by_trajectory:\n sa_t = np.transpose(sa_t, (0, 1, 4, 2, 3))\n else:\n sa_t = np.transpose(sa_t, (0, 3, 1, 2))\n return sa_t\n else:\n obs_dim = obs.shape\n # we concatenate the actions along channel dimension of the image\n repeated_actions = np.tile(act.reshape((1, 1, -1)), (obs_dim[0], obs_dim[1], 1))\n # now concatenate the two\n sa_t = np.concatenate((obs, repeated_actions), axis=-1)\n # permute the input so that the channels are in the first dimension\n sa_t = np.transpose(sa_t, (2, 0, 1))\n # add a dimension along the front for concatenation into the buffer\n return sa_t.reshape(1, *self.in_dim)\n else:\n sa_t = np.concatenate([obs, act], axis=-1)\n if batch_sa:\n return sa_t\n else:\n return sa_t.reshape(1, -1)\n\n def p_hat_member(self, x_1: np.ndarray, x_2: np.ndarray, member: int = -1):\n # softmaxing to get the probabilities according to eqn 1\n with torch.no_grad():\n # if we are using image observations, we need to collapse along the batch and time dimensions to push\n # a forward pass through the network\n # to compute the probabilities when then need to re-construct the batch and time dimensions\n if self.image_observations:\n # we need to compute the probabilities in batches to avoid out of memory issues\n # we use the train batch size as it should be an amount safe to put on the GPU's memory without causing\n # issues\n mb_size = self.train_batch_size\n start_indx = 0\n r_hat1 = None\n r_hat2 = None\n while start_indx < x_1.shape[0]:\n # check if there is a mb_size worth of trajectories to still be processed\n if start_indx + mb_size <= x_1.shape[0]:\n mb_x_1 = x_1[start_indx:start_indx + mb_size].reshape((-1, *x_1.shape[2:]))\n mb_x_2 = x_1[start_indx:start_indx + mb_size].reshape((-1, *x_1.shape[2:]))\n else:\n # process the leftover trajectories in a batch smaller than mb_size\n mb_x_1 = x_1[start_indx:].reshape((-1, *x_1.shape[2:]))\n mb_x_2 = x_2[start_indx:].reshape((-1, *x_2.shape[2:]))\n # process the leftover trajectories in a batch smaller than mb_size\n mb_rhat1 = self.r_hat_member(torch.from_numpy(mb_x_1).float().to(self.device),\n member=member).detach().cpu().reshape((mb_size, x_1.shape[1], 1))\n mb_rhat2 = self.r_hat_member(torch.from_numpy(mb_x_2).float().to(self.device),\n member=member).detach().cpu().reshape((mb_size, x_2.shape[1], 1))\n start_indx += mb_size\n\n # accumulate the rhats\n if r_hat1 is None:\n r_hat1 = mb_rhat1\n r_hat2 = mb_rhat2\n else:\n r_hat1 = torch.concat((r_hat1, mb_rhat1), dim=0)\n r_hat2 = torch.concat((r_hat2, mb_rhat2))\n\n else:\n r_hat1 = self.r_hat_member(x_1, member=member).cpu()\n r_hat2 = self.r_hat_member(x_2, member=member).cpu()\n r_hat1 = r_hat1.sum(axis=1)\n r_hat2 = r_hat2.sum(axis=1)\n r_hat = torch.cat([r_hat1, r_hat2], axis=-1)\n # taking 0 index for probability x_1 > x_2\n return F.softmax(r_hat, dim=-1)[:, 0]\n\n def p_hat_entropy(self, x_1: np.ndarray, x_2: np.ndarray, member: int = -1):\n # softmaxing to get the probabilities according to eqn 1\n with torch.no_grad():\n r_hat1 = self.r_hat_member(x_1, member=member)\n r_hat2 = self.r_hat_member(x_2, member=member)\n r_hat1 = r_hat1.sum(axis=1)\n r_hat2 = r_hat2.sum(axis=1)\n r_hat = torch.cat([r_hat1, r_hat2], axis=-1)\n\n ent = F.softmax(r_hat, dim=-1) * F.log_softmax(r_hat, dim=-1)\n ent = ent.sum(axis=-1).abs()\n return ent\n\n def r_hat_member(self, x: torch.Tensor, member: int = -1) -> torch.Tensor:\n # the network parameterizes r hat in eqn 1 from the paper\n # return self.ensemble[member](torch.from_numpy(x).float().to(device))\n return self.ensemble[member](x)\n\n def r_hat(self, x: np.ndarray):\n # they say they average the rewards from each member of the ensemble, but I think this only makes sense if the\n # rewards are already normalized and I don't understand how the normalization should be happening right now :(\n r_hats = []\n for member in range(self.ensemble_size):\n r_hats.append(self.r_hat_member(torch.from_numpy(x).float().to(self.device), member=member).detach().cpu().numpy())\n r_hats = np.array(r_hats)\n return np.mean(r_hats)\n\n def r_hat_batch(self, x: np.ndarray):\n # they say they average the rewards from each member of the ensemble, but I think this only makes sense if the rewards are already normalized\n # but I don't understand how the normalization should be happening right now :(\n r_hats = []\n for member in range(self.ensemble_size):\n r_hats.append(self.r_hat_member(torch.from_numpy(x).float().to(self.device), member=member).detach().cpu().numpy())\n r_hats = np.array(r_hats)\n return np.mean(r_hats, axis=0)\n\n def save(self, model_dir: str, env_id: str, step: int):\n \"\"\"\n Save the reward ensemble to disk\n\n Args:\n model_dir: path where the ensemble is to be saved\n env_id: the environment on which the ensemble has been trained\n step: the number of policy training steps\n \"\"\"\n for member in range(self.ensemble_size):\n torch.save(\n self.ensemble[member].state_dict(), f'{model_dir}/{env_id}_reward_model_{step}_{member}.pt'\n )\n\n def train_reward(self,\n preference_data_loader: PreferenceTripletEnsembleDataLoader,\n num_epoch: int):\n \"\"\"\n Train the reward model on the given preference dataset.\n\n Args:\n preference_data_loader: loads batches of preference triplets. Separated handles different preference\n dataset permutations for each member of the reward's ensemble.\n num_epoch: the number of training epochs to execute\n \"\"\"\n # track the accuracy and loss by ensemble member per epoch\n ensemble_accuracies = np.zeros((num_epoch, self.ensemble_size))\n ensemble_losses = np.zeros((num_epoch, self.ensemble_size))\n\n # train the reward model for the specified number of epochs\n for epoch in range(num_epoch):\n if epoch % 10 == 0:\n print(f\"Running preference training epoch {epoch} of {num_epoch}\")\n epoch_ensemble_losses = np.zeros(self.ensemble_size)\n epoch_ensemble_acc = np.zeros(self.ensemble_size)\n # train on each batch\n for batch_indx, batch in enumerate(preference_data_loader):\n # confirm there is either a single batch to be shared by all networks in the reward ensemble or\n # a batch per network in the ensemble\n assert len(batch) == 1 or len(batch) == self.ensemble_size\n # we need to zero out the gradients before we begin to process this batch\n self.optimizer.zero_grad()\n # we will need to accumulate the loss across the ensemble members\n batch_loss = 0.0\n for member_indx, preference_triplet_batch in enumerate(batch):\n # the predicted reward per transition in each trajectory\n # check if we need to collapse the batch and time dimensions into one and then reconstruct the two\n if self.image_observations:\n # get the rewards for each transition in the trajectories one\n traj_one_shape = preference_triplet_batch.trajectories_one.shape\n formatted_trajectories_one = preference_triplet_batch.trajectories_one.reshape(\n (-1, *traj_one_shape[2:]))\n r_hat1 = self.r_hat_member(formatted_trajectories_one,\n member=member_indx).reshape((traj_one_shape[0],\n traj_one_shape[1], 1))\n # get the rewards for each transition in the trajectories two\n traj_two_shape = preference_triplet_batch.trajectories_two.shape\n formatted_trajectories_two = preference_triplet_batch.trajectories_two.reshape(\n (-1, *traj_two_shape[2:]))\n r_hat2 = self.r_hat_member(formatted_trajectories_two,\n member=member_indx).reshape((traj_two_shape[0],\n traj_two_shape[1], 1))\n else:\n r_hat1 = self.r_hat_member(preference_triplet_batch.trajectories_one,\n member=member_indx)\n r_hat2 = self.r_hat_member(preference_triplet_batch.trajectories_two,\n member=member_indx)\n # compute the return per trajectory\n r_hat1 = r_hat1.sum(axis=1)\n r_hat2 = r_hat2.sum(axis=1)\n\n r_hat = torch.cat([r_hat1, r_hat2], dim=-1)\n\n # compute the ensemble member's loss\n curr_loss = self.CEloss(r_hat, preference_triplet_batch.preference_labels.squeeze())\n # add the loss from the ensemble member to the batch loss\n batch_loss += curr_loss\n # track the loss for this ensemble member\n epoch_ensemble_losses[member_indx] += curr_loss.item()\n\n # compute the accuracy of the ensemble member's predictions\n _, predicted = torch.max(r_hat.data, 1)\n correct = (predicted == preference_triplet_batch.preference_labels.squeeze()).sum().item()\n epoch_ensemble_acc[member_indx] += correct\n # compute the gradients\n batch_loss.backward()\n # apply the gradients to the model\n self.optimizer.step()\n # compute the ensemble accuracy for this epoch\n ensemble_accuracies[epoch] = epoch_ensemble_acc / preference_data_loader.dataset_length()\n # compute the mean ensemble loss for this epoch\n ensemble_losses[epoch] = epoch_ensemble_losses / preference_data_loader.dataset_length()\n\n if epoch % 10 == 0:\n print(f\"Epoch {epoch} mean accuracy = {np.mean(ensemble_accuracies[:epoch + 1]):.2f}\")\n\n # check the current mean accuracy, if it is greater than 0.97 then terminate training\n if np.mean(ensemble_accuracies[epoch]) >= 0.97:\n print(f\"Epoch accuracy {np.mean(ensemble_accuracies[epoch]):.2f} \"\n f\"after {epoch} epochs triggered early stopping.\")\n return ensemble_accuracies[:epoch + 1], ensemble_losses[:epoch + 1]\n\n print(f\"Epoch {num_epoch} mean accuracy = {np.mean(ensemble_accuracies):.2f}\")\n\n return ensemble_accuracies, ensemble_losses" }, { "identifier": "PreferenceDataset", "path": "reed/data/preference_dataset.py", "snippet": "class PreferenceDataset:\n def __init__(self, observation_dim: t.Union[t.Tuple, int], action_dim: t.Union[t.Tuple, int], capacity: int,\n size_segment: int, out_path: Path, image_observations: bool, grayscale_images: bool,\n collect_image_pref_dataset: bool, state_action_formatter: PreProcessInference,\n teacher_beta: float = -1, teacher_gamma: float = 1,\n teacher_eps_mistake: float = 0, teacher_eps_skip: float = 0, teacher_eps_equal: float = 0):\n \"\"\"\n Args:\n observation_dim: the dimensionality of the observations\n action_dim: the dimensionality of the actions\n capacity: the maximum number of trajectory pairs to include in the action_dimtaset\n size_segment: the length of the trajectory segments\n out_path: the location where the preference action_dimtaset will be written to disk during training\n image_observations: whether the observations given to the reward model are images\n grayscale_images: whether the image observations should be converted to grayscale instead of color\n collect_image_pref_dataset: whether to collect the image preference dataset separate from the observations.\n Should NOT be set to true if the observations are images.\n state_action_formatter: function that maps states and actions to a single input\n teacher_beta\n teacher_gamma: used to determine how much influence each reward has on the preference label based on\n order within the trajectory. Used to compute the return\n teacher_eps_mistake: the frequency with which the teacher assigns an incorrect label\n teacher_eps_skip: the frequency with which the teacher does not assign a label\n teacher_eps_equal: the maximum difference between trajectory returns for the two trajectories to be labelled\n as equally preferred\n \"\"\"\n self.observation_dim = observation_dim\n self.action_dim = action_dim\n self.capacity = capacity\n self.size_segment = size_segment\n self.out_path = out_path\n self.image_observations = image_observations\n self.grayscale_images = grayscale_images\n # whether to collect the preference dataset as images\n # only needs to be set to True if we are not learning the reward function from images\n # if we are learning the reward function from images then we have an image dataset\n self.collect_image_pref_dataset = collect_image_pref_dataset\n\n # formats the state-action pairs into a single input to the reward model\n self.state_action_formatter = state_action_formatter\n\n # track where each preference triplet is written to disk\n self._preference_triplet_tracker: t.List[Path] = []\n\n self.buffer_index = 0\n self.buffer_full = False\n\n # create the preference labeller\n self._preference_labeller = _PreferenceLabeller(teacher_beta=teacher_beta, teacher_gamma=teacher_gamma,\n teacher_eps_mistake=teacher_eps_mistake,\n teacher_eps_skip=teacher_eps_skip,\n teacher_eps_equal=teacher_eps_equal)\n\n # make sure the outpath where the trajectories will be written exist\n self.out_path.mkdir(parents=True, exist_ok=True)\n\n def __len__(self):\n return len(self._preference_triplet_tracker)\n\n def __getitem__(self, item: int) -> PREFERENCE_TRIPLET:\n \"\"\"\n Load and return the preference triplet at the specified index in the buffer\n\n Args:\n item: index of the triplet in the buffer\n Returns:\n trajectory one\n trajectory two\n preference label\n \"\"\"\n # get the location of the specified preference triplet and load it into memory\n npz_archive = np.load(self._preference_triplet_tracker[item].as_posix())\n\n # grab the trajectories and preference labels\n trajectory_one = npz_archive[\"trajectory_one\"]\n trajectory_two = npz_archive[\"trajectory_two\"]\n preference_label = npz_archive[\"preference_label\"]\n\n return trajectory_one, trajectory_two, preference_label\n\n def get_batch(self, indices: t.List[int]) -> PREFERENCE_TRIPLET_BATCH:\n \"\"\"\n Load and return the batch of preference triplets at the given indices in the buffer\n\n Args:\n indices: the buffer indices of the preference triplets to load into memory\n Returns:\n batch of trajectories one\n batch of trajectories two\n batch of preference labels\n \"\"\"\n # accumulate the trajectory pairs and preference labels\n trajectories_one = []\n trajectories_two = []\n preference_labels = []\n # grab each preference triplet\n for index in indices:\n trajectory_one, trajectory_two, preference_label = self[index]\n trajectories_one.append(np.expand_dims(trajectory_one, axis=0))\n trajectories_two.append(np.expand_dims(trajectory_two, axis=0))\n preference_labels.append(preference_label)\n\n return (np.concatenate(trajectories_one, axis=0), np.concatenate(trajectories_two, axis=0),\n np.concatenate(preference_labels, axis=0))\n\n def _sample_trajectory_segments_uniform(self,\n experience_buffer: TrajectoryReplayBuffer,\n trajectory_count: int,\n mini_batch_size: int) -> t.Tuple[np.ndarray, np.ndarray, t.Optional[np.ndarray]]:\n \"\"\"\n Uniformly sample trajectories and then uniformly sample a segment of the trajectory.\n\n Format and track the state-action pairs from each trajectory segment\n Format and track rewards from each trajectory segment\n\n Combine the formatted state-action pairs and the rewards across trajectory segments\n\n Args:\n experience_buffer: the replay buffer from which trajectory pairs will be drawn\n trajectory_count: the number of trajectories to be sampled from\n mini_batch_size: the number of trajectories to sample\n\n Returns:\n the formatted state-action pairs from random trajectory segments from trajectories\n the rewards from each random trajectory segment\n (optionally) the image observations from each random trajectory segment - only returned when the flag to\n collect image observations in the preference dataset is true and image observations are not\n used to train the reward model\n \"\"\"\n # select the trajectories to be included in this batch of trajectory segments\n trajectory_indices = np.random.choice(trajectory_count, size=mini_batch_size, replace=True)\n\n # accumulate the formatted state-action pairs and rewards from each trajectory segment\n state_action_pairs = []\n rewards = []\n # optionally accumulate image observations\n image_observations = ([] if self.collect_image_pref_dataset and not self.image_observations else None)\n # extract each trajectory and randomly sample a segment\n for traj_index in trajectory_indices:\n # grab the trajectory\n trajectory = experience_buffer.trajectories[traj_index]\n # select a random segment from the trajectory\n traj_segment = trajectory.random_segment(length=self.size_segment)\n # track the rewards associated with the random segment\n rewards.append(np.expand_dims(traj_segment.env_rewards, axis=0))\n # format the state and action based on whether image observations are being used\n if self.image_observations:\n formatted_pair = self.state_action_formatter.format_state_action(\n traj_segment.initial_image_observations,\n traj_segment.actions,\n batch_sa=True)\n else:\n formatted_pair = self.state_action_formatter.format_state_action(\n traj_segment.initial_observations,\n traj_segment.actions,\n batch_sa=True)\n if self.collect_image_pref_dataset:\n image_observations.append(np.expand_dims(traj_segment.initial_image_observations, axis=0))\n # add a dimension in the front so we can concatenate later and the track\n state_action_pairs.append(np.expand_dims(formatted_pair, axis=0))\n return (np.concatenate(state_action_pairs, axis=0),\n np.concatenate(rewards, axis=0),\n (np.concatenate(image_observations, axis=0) if image_observations is not None else None))\n\n @staticmethod\n def get_rank_probability(trajectories_one: np.ndarray, trajectories_two: np.ndarray,\n reward_model: torch.nn.Module):\n \"\"\"\n Compute the preference-prediction disagreement between the ensemble members for each trajectory pair\n\n Args:\n trajectories_one: the trajectories one to be evaluated for ensemble disagreement\n trajectories_two: the trajectories two to be evaluated for ensemble disagreement\n reward_model: the ensemble of networks that will be used to compute disagreement\n \"\"\"\n\n # get probability x_1 > x_2\n probs = []\n for member in range(len(reward_model.ensemble)):\n probs.append(reward_model.p_hat_member(trajectories_one,\n trajectories_two,\n member=member).cpu().numpy())\n probs = np.array(probs)\n\n return np.mean(probs, axis=0), np.std(probs, axis=0)\n\n def get_queries(self, experience_buffer: TrajectoryReplayBuffer, mb_size=20):\n len_traj, max_len = experience_buffer.trajectory_lengths[0], experience_buffer.trajectory_count\n\n # if len(self.experience_buffer.trajectory_lengths[0][-1]) < len_traj:\n # check that the last trajectory contains at least as many transitions as the target segment length\n # we check the last trajectory, because it may be incomplete\n # this is a carry over from the original code. The authors had an assumption that all \"completed\" trajectories\n # will be at least as long as the target segment length\n if experience_buffer.trajectory_lengths[-1] < self.size_segment:\n max_len = max_len - 1\n\n # grab each trajectory, select a random segment from each, format the state-action pairs, and concatenate\n # along the batch dimension\n state_action_pair_traj_one, r_t_1, images_traj_one = self._sample_trajectory_segments_uniform(\n experience_buffer=experience_buffer,\n trajectory_count=max_len,\n mini_batch_size=mb_size)\n state_action_pair_traj_two, r_t_2, images_traj_two = self._sample_trajectory_segments_uniform(\n experience_buffer=experience_buffer,\n trajectory_count=max_len,\n mini_batch_size=mb_size)\n # confirm the image-specific variables are only populated when they should be\n if not self.collect_image_pref_dataset and self.image_observations:\n assert images_traj_one is None and images_traj_two is None\n return state_action_pair_traj_one, state_action_pair_traj_two, r_t_1, r_t_2, images_traj_one, images_traj_two\n\n def put_queries(self, state_action_pair_traj_one: np.ndarray, state_action_pair_traj_two: np.ndarray,\n preference_labels: np.ndarray,\n images_traj_one: t.Optional[np.ndarray] = None, images_traj_two: t.Optional[np.ndarray] = None):\n \"\"\"\n Args:\n state_action_pair_traj_one: the state-action pairs that make up the trajectories one in the queries\n state_action_pair_traj_two: the state-action pairs that make up the trajectories two in the queries\n preference_labels: the preference labels for each pair of trajectories\n images_traj_one: the images for trajectories one\n images_traj_two: the images for trajectories two\n \"\"\"\n # get the number of triplets to be stored\n total_sample = state_action_pair_traj_one.shape[0]\n # write each preference_triplet to disk\n for batch_indx in range(total_sample):\n # get the index of the triplet in the \"buffer\"\n preference_triplet_index = self.buffer_index + batch_indx\n # check if we need to wrap the buffer\n if preference_triplet_index >= self.capacity:\n preference_triplet_index -= self.capacity\n elif not self.buffer_full:\n # this is a previously unseen preference triplet buffer index, so we need to track the triplet location\n self._preference_triplet_tracker.append(self.out_path / f\"preference_triplet_{preference_triplet_index}.npz\")\n # save the preference triplet\n np.savez((self.out_path / f\"preference_triplet_{preference_triplet_index}.npz\").as_posix(),\n trajectory_one=state_action_pair_traj_one[batch_indx],\n trajectory_two=state_action_pair_traj_two[batch_indx],\n preference_label=preference_labels[batch_indx],\n image_trajectory_one=(\n None if images_traj_one is None else images_traj_one[batch_indx]),\n image_trajectory_two=(\n None if images_traj_two is None else images_traj_two[batch_indx]))\n # set the new buffer index\n next_index = self.buffer_index + total_sample\n # check if the buffer has wrapped\n if next_index >= self.capacity:\n self.buffer_full = True\n # wrap the buffer index\n self.buffer_index = next_index - self.capacity\n else:\n self.buffer_index = next_index\n\n def uniform_sampling(self, experience_buffer: TrajectoryReplayBuffer, mb_size: int) -> int:\n \"\"\"\n Grow the preference dataset with preference triplets uniformly sampled from the experience buffer\n\n Args:\n experience_buffer: the replay buffer from which to sample trajectory pairs\n mb_size: target number of preference triplets to add to the preference dataset. Fewer than the target may\n be added depending on the whether labeller skips labelling some trajectories.\n Returns:\n number of preference triplets added to the dataset\n \"\"\"\n # get queries\n sa_t_1, sa_t_2, r_t_1, r_t_2, img_sa_t_1, img_sa_t_2 = self.get_queries(experience_buffer=experience_buffer,\n mb_size=mb_size)\n\n # get labels\n sa_t_1, sa_t_2, r_t_1, r_t_2, labels = self._preference_labeller.get_label(sa_t_1, sa_t_2, r_t_1, r_t_2)\n if len(labels) > 0:\n self.put_queries(sa_t_1, sa_t_2, labels, img_sa_t_1, img_sa_t_2)\n\n return len(labels)\n\n # TODO: refactor to break the circular import that would need to happen in order to specify that reward_model here\n # should be BPref.reward_model.RewardModel\n def disagreement_sampling(self, experience_buffer: TrajectoryReplayBuffer, mb_size: int, large_batch: int,\n reward_model: torch.nn.Module) -> int:\n \"\"\"\n Grow the preference dataset with preference triplets from the experience buffer that the reward ensemble\n disagrees about\n\n Args:\n experience_buffer: the replay buffer from which to sample trajectory pairs\n mb_size: target number of preference triplets to add to the preference dataset. Fewer than the target may\n be added depending on the whether labeller skips labelling some trajectories.\n large_batch: scales up the number of triplets to add to the preference dataset to uniformly select a large\n number of trajectory pairs, which are then pruned based on which ones the reward ensemble\n has the most disagreement over\n reward_model: the ensemble of reward networks that will be used to assess disagreement.\n Should be BPref.reward_model.RewardModel, but cannot import and reference from here right now\n as it would lead to circular imports\n Returns:\n number of preference triplets added to the dataset\n \"\"\"\n # get queries\n sa_t_1, sa_t_2, r_t_1, r_t_2, img_sa_t_1, img_sa_t_2 = self.get_queries(\n experience_buffer=experience_buffer, mb_size=mb_size * large_batch)\n\n # get final queries based on ensemble member disagreement\n _, disagree = self.get_rank_probability(sa_t_1, sa_t_2, reward_model=reward_model)\n top_k_index = (-disagree).argsort()[:mb_size]\n r_t_1, sa_t_1 = r_t_1[top_k_index], sa_t_1[top_k_index]\n r_t_2, sa_t_2 = r_t_2[top_k_index], sa_t_2[top_k_index]\n if img_sa_t_1 is not None:\n img_sa_t_1 = img_sa_t_1[top_k_index]\n img_sa_t_2 = img_sa_t_2[top_k_index]\n\n # get labels\n sa_t_1, sa_t_2, r_t_1, r_t_2, labels = self._preference_labeller.get_label(\n sa_t_1, sa_t_2, r_t_1, r_t_2)\n if len(labels) > 0:\n self.put_queries(sa_t_1, sa_t_2, labels, img_sa_t_1, img_sa_t_2)\n\n return len(labels)\n\n def set_teacher_thres_skip(self, new_margin):\n self._preference_labeller.teacher_thres_skip = new_margin * self._preference_labeller.teacher_eps_skip\n\n def set_teacher_thres_equal(self, new_margin):\n self._preference_labeller.teacher_eps_equal = new_margin * self._preference_labeller.teacher_eps_equal\n\n def save(self, dataset_dir: Path, env_id: str, step: int):\n \"\"\"\n Saves the preference dataset as a zip archive and the labeller configuration as a yaml to the specified location\n\n Args:\n dataset_dir: path where the dataset is to be saved\n env_id: the environment/task within which the data was generated\n step: the number of policy training steps taken to produce this dataset\n \"\"\"\n # create the ZipFile object\n zip_obj = ZipFile(dataset_dir / f\"{env_id}_preference_dataset_{step}.zip\", \"w\")\n # the configuration for the online preference dataset\n config = {\"teacher_params\": {\"teacher_beta\": self._preference_labeller.teacher_beta,\n \"teacher_gamma\": self._preference_labeller.teacher_gamma,\n \"teacher_eps_mistake\": self._preference_labeller.teacher_eps_mistake,\n \"teacher_eps_equal\": self._preference_labeller.teacher_eps_equal,\n \"teacher_eps_skip\": self._preference_labeller.teacher_eps_skip,\n \"teacher_thres_skip\": self._preference_labeller.teacher_thres_skip,\n \"teacher_thres_equal\": self._preference_labeller.teacher_thres_equal,\n \"label_margin\": self._preference_labeller.label_margin,\n \"label_target\": self._preference_labeller.label_target}}\n with open((dataset_dir / f\"preference_dataset_config.yaml\").as_posix(), \"w+\") as f:\n yaml.dump(config, f)\n # write the labeller config to the preference dataset's zip archive\n zip_obj.write(dataset_dir / f\"preference_dataset_config.yaml\")\n\n # add each preference triplet to the zip archive\n for pref_triplet_path in self._preference_triplet_tracker:\n zip_obj.write(pref_triplet_path)\n # move the file from it temp location to the artifact directory\n file_dest_path = dataset_dir / pref_triplet_path.name\n shutil.move(pref_triplet_path, file_dest_path)\n # close the Zip File\n zip_obj.close()" }, { "identifier": "PreferenceTripletEnsembleDataLoader", "path": "reed/data/preference_data_loader.py", "snippet": "class PreferenceTripletEnsembleDataLoader:\n \"\"\"\n Handles loading and generating batches of preference triplets.\n\n The special logic needed is to handle different batch orderings for different networks in the reward ensemble\n \"\"\"\n def __init__(self, dataset: PreferenceDataset, ensemble_size: int,\n batch_size: int = 64, num_workers: int = 0, shuffle: bool = True, device: torch.device = \"cuda\"):\n \"\"\"\n Args:\n\n \"\"\"\n # create a data loader per ensemble network\n self.loader_ensemble = [DataLoader(dataset=dataset,\n batch_size=batch_size,\n shuffle=shuffle,\n num_workers=num_workers)\n for _ in range(ensemble_size)]\n\n self.device = device\n\n def _format_batch(self, batch: UNFORMATTED_PREFERENCE_TRIPLET_BATCH) -> FORMATTED_PREFERENCE_TRIPLET_BATCH:\n \"\"\"\n Format the preference batch so that the tensors are longs and on the correct device\n \"\"\"\n return [PreferenceTripletBatch(trajectories_one=member[0].float().to(self.device),\n trajectories_two=member[1].float().to(self.device),\n preference_labels=member[2].long().to(self.device))\n for member in batch]\n\n def dataset_length(self) -> int:\n return len(self.loader_ensemble[0].dataset)\n\n def __iter__(self) -> FORMATTED_PREFERENCE_TRIPLET_BATCH:\n \"\"\"\n Iterate through the preference triplet data loaders and return the batch per ensemble member\n\n Returns:\n list of PreferenceTripletBatch\n \"\"\"\n # set up each loader as an iterator\n iter_loader_ensemble = [iter(loader) for loader in self.loader_ensemble]\n # for each data loader grab the next batch until there are no more batches to grab\n while True:\n # check if there is a next batch to return\n try:\n yield self._format_batch([next(dataloader_iterator) for dataloader_iterator in iter_loader_ensemble])\n except StopIteration:\n break" }, { "identifier": "PreProcessInference", "path": "reed/data/preprocess_images.py", "snippet": "class PreProcessInference:\n \"\"\"\n Preprocess the data for inference by the reward, SSC, and SFC models\n \"\"\"\n def __init__(self,\n image_observations: bool = False,\n grayscale_images: bool = True,\n normalize_images: bool = True,\n environment_id: str = \"dmc\"):\n \"\"\"\n Args:\n image_observations: whether the observations are images\n grayscale_images: whether images observations should be in grayscale\n normalize_images: whether the image observations should be normalized\n environment_id: the environment from which the data is coming\n \"\"\"\n self.image_observations = image_observations\n self.grayscale_images = grayscale_images\n self.normalize_images = normalize_images\n self.environment_id = environment_id\n\n @staticmethod\n def _channel_first_to_last(observation: np.ndarray,\n batch_states: bool = False,\n by_trajectory: bool = False) -> np.ndarray:\n \"\"\"\n Move the channel from the first dimension to the last dimension\n \"\"\"\n if batch_states and by_trajectory:\n return np.transpose(observation, (0, 1, 3, 4, 2))\n elif batch_states:\n return np.transpose(observation, (0, 2, 3, 1))\n else:\n return np.transpose(observation, (1, 2, 0))\n\n @staticmethod\n def _channel_last_to_first(observation: np.ndarray, batch_states: bool = False,\n by_trajectory: bool = False) -> np.ndarray:\n \"\"\"\n Move the channel from the last dimension to the first dimension\n Args:\n observation: the state observations\n batch_states: whether a batch of state is to be processed\n by_trajectory: whether the batch of states is structured by trajectory -> should only be\n True when batch_sa=True\n Returns:\n the image with the channel dimension moved from first to last\n \"\"\"\n # permute the input so that the channels are in the first dimension of the images\n if batch_states and by_trajectory:\n return np.transpose(observation, (0, 1, 4, 2, 3))\n elif batch_states:\n return np.transpose(observation, (0, 3, 1, 2))\n else:\n # permute the input so that the channels are in the first dimension\n obs = np.transpose(observation, (2, 0, 1))\n # add a dimension along the front for concatenation into the buffer\n return np.expand_dims(obs, axis=0)\n\n def format_state(self, obs: np.ndarray, batch_states: bool = False,\n by_trajectory: bool = False, channel_first: bool = False) -> np.ndarray:\n \"\"\"\n Args:\n obs: the state observations\n batch_states: whether a batch of state is to be processed\n by_trajectory: whether the batch of states is structured by trajectory -> should only be\n True when batch_sa=True\n channel_first: whether the channel dimension is first when the observations are images.\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n if channel_first:\n # move the channel dimension from first to last to avoid a bunch of logic in our formatting methods\n # that handles variable locations for the channel dimension\n obs = self._channel_first_to_last(observation=obs,\n batch_states=batch_states,\n by_trajectory=by_trajectory)\n if self.grayscale_images:\n obs = _to_grayscale(observation=obs)\n if self.normalize_images:\n # TODO: add normalization based on pixel mean and standard deviation instead of scaling 0 to 1\n obs = np.divide(obs, 255.)\n # move the channel dimension from first to last\n return self._channel_last_to_first(observation=obs, batch_states=batch_states, by_trajectory=by_trajectory)\n\n else:\n return obs.reshape(1, obs.shape[1:]) if batch_states else obs.reshape(1, obs.shape[0])\n\n def format_state_action(self, obs: np.ndarray, act: np.ndarray,\n batch_sa: bool = False, by_trajectory: bool = False,\n channel_first: bool = False) -> np.ndarray:\n \"\"\"\n Args:\n obs: the state observations\n act: the actions associated with each state observation\n batch_sa: whether a batch of state-action pairs is to be processed\n by_trajectory: whether the batch of state-action pairs is structured by trajectory -> should only be\n True when batch_sa=True\n channel_first: whether the channel dimension is first when the observations are images.\n Returns:\n the state-action pairs as a single array\n \"\"\"\n if self.image_observations:\n if channel_first:\n # move the channel dimension from first to last to avoid a bunch of logic in our formatting methods\n # that handles variable locations for the channel dimension\n obs = self._channel_first_to_last(observation=obs,\n batch_states=batch_sa,\n by_trajectory=by_trajectory)\n if self.grayscale_images:\n obs = _to_grayscale(observation=obs)\n if self.normalize_images:\n # TODO: add normalization based on pixel mean and standard deviation instead of scaling 0 to 1\n obs = np.divide(obs, 255.)\n\n # get the dimensions of the image\n obs_dim = obs.shape[-3:]\n assert len(obs_dim) == 3\n # add the actions to the image channels and permute the input so that the channels are in the first\n # dimension of the images\n if batch_sa and by_trajectory:\n repeated_actions = np.tile(act.reshape((act.shape[0], act.shape[1], 1, 1, act.shape[-1])),\n (1, 1, obs_dim[0], obs_dim[1], 1))\n elif batch_sa:\n repeated_actions = np.tile(act.reshape((act.shape[0], 1, 1, act.shape[-1])),\n (1, obs_dim[0], obs_dim[1], 1))\n else:\n repeated_actions = np.tile(act.reshape((1, 1, -1)), (obs_dim[0], obs_dim[1], 1))\n sa_t = np.concatenate((obs, repeated_actions), axis=-1)\n return self._channel_last_to_first(sa_t, batch_states=batch_sa, by_trajectory=by_trajectory)\n else:\n sa_t = np.concatenate([obs, act], axis=-1)\n if batch_sa:\n return sa_t\n else:\n return sa_t.reshape(1, -1)" } ]
import typing as t import time import numpy as np import torch import hydra from pathlib import Path from omegaconf import dictconfig, OmegaConf from BPref import utils from BPref.logger import Logger from BPref.replay_buffer import TrajectoryReplayBuffer from collections import deque from reed.models.reward_model import StateActionRewardModel from reed.data.preference_dataset import PreferenceDataset from reed.data.preference_data_loader import PreferenceTripletEnsembleDataLoader from reed.data.preprocess_images import PreProcessInference
20,852
Determine the dimensionality of the inputs to the reward model Args: observation_dim: the dimensionality of agent observations. If the observation is an image, the dimensionality should have the following order: (num_channels, height, width) action_dim: the dimensionality of agent actions Returns: the dimensionality of the reward model's inputs """ # compute the dimensions of the input to the reward function if not self.experiment_config.reward_from_image_observations: return observation_dim + action_dim else: # we need to concatenate the actions to last dimension of the image # the input to the reward net also needs to have the channels first # the image dimensions are given to us a (height, width, channels) sample_shape = list(observation_dim) if self.experiment_config.grayscale_images: num_channels = action_dim + 1 else: num_channels = sample_shape[0] + action_dim # update the number of channels sample_shape[0] = num_channels # the dimensions of the input to the reward model return sample_shape def _determine_reward_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the reward input shape needs to be set accordingly Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") formatted_image_observation = self._reward_input_preprocessor.format_state(img_obs).squeeze(axis=0) observation_space = formatted_image_observation.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _determine_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the replay buffer needs to be set up to accumulate the image observations Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") observation_space = img_obs.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _render_image_observation(self) -> np.ndarray: """ Render the current image observation """ if "metaworld" in self.experiment_config.env: img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) else: img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) return img_obs
# # For licensing see accompanying LICENSE file. # Copyright (C) 2023 Apple Inc. All Rights Reserved. # class PEBBLE: """ Train a reward model in conjunction with policy training following the PEBBLE algorithm from (Lee et al. 2021) """ def __init__(self, experiment_config: dictconfig.DictConfig): """ Args: experiment_config: contains the configuration for the experiment to be run. Access like a dictionry """ # track the experimental configuration self.experiment_config = experiment_config # create the logger to track policy learning progress self.logger = Logger( self.experiment_config.out_dir, save_tb=self.experiment_config.log_save_tb, log_frequency=self.experiment_config.log_frequency, agent=self.experiment_config.agent.name) # used to track where we are in training # total amount of feedback the reward model has solicited self.total_feedback = 0 # total amount of feedback given to the reward model self.labeled_feedback = 0 # policy train step self.step = 0 # we need to set the random seed for replication purposes utils.set_seed_everywhere(self.experiment_config.seed) # the device on which models will be trained self.device = torch.device(self.experiment_config.device) # flag to make sure we are handling multi-gpu training where we need to self.multi_gpu = torch.cuda.device_count() > 1 print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print(f"There is {torch.cuda.device_count()} GPU, so models will be trained with torch.nn.DataParallel.") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") print("----------------------------------------") # make the environment if 'metaworld' in self.experiment_config.env: self.env = utils.make_metaworld_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = True else: self.env = utils.make_env(self.experiment_config) # we are not evaluating a domain where we need to log whether an agent has reached a goal state self.log_success = False print('----------------------') print('----------------------') print('----------------------') print('----------------------') print("observation space ", self.env.observation_space.shape[0]) print("action space ", self.env.action_space.shape[0]) print('----------------------') print('----------------------') print('----------------------') print('----------------------') # we need to set the policy's observation and action space self.experiment_config.agent.params.obs_dim = self.env.observation_space.shape[0] self.experiment_config.agent.params.action_dim = self.env.action_space.shape[0] self.experiment_config.agent.params.action_range = [ float(self.env.action_space.low.min()), float(self.env.action_space.high.max()) ] # create the agent specified in the configuration self.agent = hydra.utils.instantiate(self.experiment_config.agent) # the class that will format the observations and observation action pairs for consumption by the reward model self._reward_input_preprocessor = PreProcessInference( image_observations=self.experiment_config.reward_from_image_observations, grayscale_images=self.experiment_config.grayscale_images, normalize_images=self.experiment_config.normalized_images) # determine the reward's observation space # if the reward is trained on images then the reward's observation space differs from the policy's, which is # trained on the state space self._observation_dimensionality = self._determine_observation_dimensions() self._reward_observation_dimensionality = self._determine_reward_observation_dimensions() # create the agent's replay buffer setting if image observations will need to be tracked self.replay_buffer = TrajectoryReplayBuffer( int(self.experiment_config.replay_buffer_capacity), self.device, image_observations=(self._observation_dimensionality if (self.experiment_config.reward_from_image_observations or self.experiment_config.save_image_observations) else None) ) # determine the dimensionality of the input to the reward function self.reward_in_dim = self._determine_reward_input_dimensions( observation_dim=self._reward_observation_dimensionality, action_dim=self.env.action_space.shape[0]) # instantiating the reward model self.reward_model = self.construct_reward_ensemble() # create the preference dataset that will solicit and hold labelled preference triplets self.preference_dataset = PreferenceDataset( observation_dim=self._reward_observation_dimensionality, action_dim=self.env.action_space.shape[0], capacity=self.experiment_config.preference_dataset_capacity, size_segment=self.experiment_config.segment_size, out_path=Path("/tmp/preference_dataset/"), image_observations=self.experiment_config.reward_from_image_observations, state_action_formatter=self._reward_input_preprocessor, grayscale_images=self.experiment_config.grayscale_images, collect_image_pref_dataset=self.experiment_config.save_image_observations, teacher_beta=self.experiment_config.teacher_beta, teacher_gamma=self.experiment_config.teacher_gamma, teacher_eps_mistake=self.experiment_config.teacher_eps_mistake, teacher_eps_skip=self.experiment_config.teacher_eps_skip, teacher_eps_equal=self.experiment_config.teacher_eps_equal ) # save the experimental configuration with open(Path(self.experiment_config.out_dir) / "experiment_config.yaml", "w+") as f: OmegaConf.save(config=self.experiment_config, f=f) def _determine_reward_input_dimensions(self, observation_dim: t.Union[int, np.ndarray], action_dim: int) -> t.Union[int, t.Sequence]: """ Determine the dimensionality of the inputs to the reward model Args: observation_dim: the dimensionality of agent observations. If the observation is an image, the dimensionality should have the following order: (num_channels, height, width) action_dim: the dimensionality of agent actions Returns: the dimensionality of the reward model's inputs """ # compute the dimensions of the input to the reward function if not self.experiment_config.reward_from_image_observations: return observation_dim + action_dim else: # we need to concatenate the actions to last dimension of the image # the input to the reward net also needs to have the channels first # the image dimensions are given to us a (height, width, channels) sample_shape = list(observation_dim) if self.experiment_config.grayscale_images: num_channels = action_dim + 1 else: num_channels = sample_shape[0] + action_dim # update the number of channels sample_shape[0] = num_channels # the dimensions of the input to the reward model return sample_shape def _determine_reward_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the reward input shape needs to be set accordingly Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") formatted_image_observation = self._reward_input_preprocessor.format_state(img_obs).squeeze(axis=0) observation_space = formatted_image_observation.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _determine_observation_dimensions(self) -> t.Union[int, np.ndarray]: """ Check if the reward will use the image observations. If so the replay buffer needs to be set up to accumulate the image observations Returns: the dimensionality of reward's observation space """ if self.experiment_config.reward_from_image_observations: # get a sample image rendering of the environment and get its shape self.env.reset() if "metaworld" in self.experiment_config.env: start_time = time.time() img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) end_time = time.time() print(f"Sample render time for metaworld is {end_time - start_time} seconds") else: start_time = time.time() img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) end_time = time.time() print(f"Sample render time for DMC is {end_time - start_time} seconds") observation_space = img_obs.shape print("--------------------------") print("--------------------------") print("--------------------------") print("image observation shape", observation_space) print("--------------------------") print("--------------------------") print("--------------------------") else: observation_space = self.env.observation_space.shape[0] return observation_space def _render_image_observation(self) -> np.ndarray: """ Render the current image observation """ if "metaworld" in self.experiment_config.env: img_obs = self.env.render(camera_name=self.experiment_config.camera_name, resolution=( self.experiment_config.image_height, self.experiment_config.image_width)) else: img_obs = self.env.render(mode="rgb_array", height=self.experiment_config.image_height, width=self.experiment_config.image_width) return img_obs
def construct_reward_ensemble(self) -> StateActionRewardModel:
3
2023-11-06 23:14:20+00:00
24k
allenai/unified-io-2
t5x/train.py
[ { "identifier": "PackingStrategy", "path": "t5x/examples/unified_io/packing.py", "snippet": "class PackingStrategy:\n \"\"\"Defines how to pack data during training and handles batch-level constraints\n from the input/target encoders\"\"\"\n\n pack_max_len: Optional[Tuple[int, int]] = None\n \"\"\"If packing, max input/target length to to\"\"\"\n\n pack_pool_size: int = 10\n \"\"\"Pool to use when packing examples\"\"\"\n\n constraint_pool_size: int = 10\n \"\"\"Pool to use when matching batch constraints\"\"\"\n\n max_to_pack: int = 2\n \"\"\"Max examples to pack together\"\"\"\n\n @property\n def pack(self):\n return self.pack_max_len is not None\n\n def batch(self, ds, batch_size, drop_remainder=True, batch_constraints=None):\n if batch_constraints is None:\n batch_constraints = []\n for k, v in get_input_modalities().items():\n bound = v.get_constraints()\n if bound is not None:\n def _fn(ex):\n mask = tf.cast(ex[f\"inputs/{k}/mask\"], tf.bool)\n return tf.reduce_sum(tf.cast(tf.reduce_any(mask, -1), tf.int32))\n batch_bound = int(round(bound * batch_size))\n if self.pack_max_len is None:\n logging.info(f\"Adding batch constraint {k}/{bound} => \"\n f\"({batch_bound} per batch of {batch_size})\")\n else:\n bound *= self.max_to_pack\n logging.info(f\"Adding batch constraint {k}/{bound} => \"\n f\"({batch_bound} per batch of {batch_size} groups of {self.max_to_pack})\")\n batch_constraints.append((_fn, batch_bound))\n if self.pack:\n enc, dec = self.pack_max_len\n if self.max_to_pack == 2:\n ds = pair_examples(ds, enc, dec, self.pack_pool_size)\n else:\n raise NotImplementedError()\n if batch_constraints:\n ds = batch_with_constraints(ds, batch_size, self.constraint_pool_size, batch_constraints)\n else:\n ds = ds.batch(batch_size, drop_remainder=drop_remainder)\n return unfold(ds, batch_size, n=self.max_to_pack)\n else:\n if batch_constraints:\n return batch_with_constraints(ds, batch_size, self.constraint_pool_size, batch_constraints)\n else:\n return ds.batch(batch_size, drop_remainder=drop_remainder)" }, { "identifier": "checkpoints", "path": "t5x/checkpoints.py", "snippet": "VERSION = 3\n_DESIRED_CHUNK_SIZE_BYTES = 64 * 1024 * 1024\n_TRAIN_DS_PREFIX = 'train_ds'\n_OPTIMIZER_KEY = 'optimizer'\n_VERSION_KEY = 'version'\n_CHECKPOINTS_SUBDIR = 'checkpoints'\n_STATE_KEY = 'state'\n_DATASET_KEY = 'dataset'\n_FLAX_CHECKPOINT_FILE = 'checkpoint'\ndef _choose_chunk_shape(write_shape: Sequence[int],\n target_elements: int) -> List[int]:\n def get_total_elements():\ndef _run_future_tree(future_tree):\ndef all_steps(checkpoints_dir: str) -> Sequence[int]:\ndef all_dataset_checkpoint_steps(checkpoints_dir: str) -> Sequence[int]:\ndef latest_step(checkpoints_dir: str) -> Optional[int]:\ndef _get_local_data(x):\ndef _sync_global_devices(name: str) -> None:\ndef get_checkpoint_dir(checkpoints_dir: str, step: int) -> str:\ndef _cast(target: PyTreeDef, dtype: jnp.dtype):\n def maybe_cast(x):\ndef _update_ts_path_from_relative_to_absolute(\n ckpt_dir: str, ts_spec_dict: MutableMapping[str, Any]):\ndef _maybe_update_ts_from_file_to_gcs(ckpt_contents):\n def _gfile_to_gcs_driver(arr_or_ts_spec_dict):\n def _is_leaf(value):\ndef _maybe_update_ts_from_gcs_to_file(ckpt_contents):\n def _gcs_to_file_driver(arr_or_ts_spec_dict):\n def _is_leaf(value):\n def __init__(self, num_bytes):\n async def wait_for_bytes(self, n_bytes):\n async def return_bytes(self, n_bytes):\n def __call__(self, state_dict: PyTreeDef,\n parameter_infos: PyTreeDef) -> Tuple[PyTreeDef, PyTreeDef]:\n def __call__(self,\n state_dict: PyTreeDef,\n target_state_dict: PyTreeDef,\n *,\n is_resuming: bool = False) -> PyTreeDef:\n def __init__(self, dataset_iterator: tf.data.Iterator):\n def save(self, filename: str):\n def load(self, filename: str):\n def __init__(self,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n checkpoints_dir: str,\n dataset_iterator: Optional[\n Union[tf.data.Iterator,\n clu.data.dataset_iterator.DatasetIterator]] = None,\n *,\n keep: Optional[int] = None,\n save_dtype: jnp.dtype = np.float32,\n restore_dtype: Optional[jnp.dtype] = None,\n use_gda: Optional[bool] = True,\n keep_dataset_checkpoints: Optional[int] = None):\n def _get_state_dict_for_save(\n self,\n state_dict: Dict[str, Any],\n lazy_load: bool = True) -> MutableMapping[str, Any]:\n def _lazy_load_device_array(arr):\n def _get_parameter_infos(self):\n def _get_param_info(name: str, arr: Any, axes: partitioning.PartitionSpec):\n def _get_checkpoint_dir(self, step: int) -> str:\n def all_steps(self) -> Sequence[int]:\n def all_dataset_checkpoint_steps(self) -> Sequence[int]:\n def latest_step(self) -> Optional[int]:\n def _remove_old_dataset_checkpoints(self):\n def _remove_old_checkpoints(self):\n def save(self,\n train_state: train_state_lib.TrainState,\n state_transformation_fns: Sequence[SaveStateTransformationFn] = (),\n *,\n concurrent_gb: int = 128):\n def _write_state_to_tensorstore(\n self,\n ckpt_dir: str,\n train_state: train_state_lib.TrainState,\n concurrent_gb: int,\n state_transformation_fns: Sequence[SaveStateTransformationFn],\n ) -> Mapping[str, Any]:\n async def _write_array(maybe_arr: Any,\n param_info: Optional[_ParameterInfo],\n cast: bool = False):\n def _cast_arr_if_not_partitioned(maybe_arr, param_info):\n def _transform_state_and_infos(\n self,\n state_dict: PyTreeDef,\n parameter_infos: PyTreeDef,\n state_transformation_fns: Sequence[SaveStateTransformationFn],\n ) -> Tuple[PyTreeDef, PyTreeDef]:\n def restore(\n self,\n step: Optional[int] = None,\n path: Optional[str] = None,\n state_transformation_fns: Sequence[RestoreStateTransformationFn] = (),\n fallback_state: Optional[Mapping[str, Any]] = None,\n lazy_parameters: bool = False) -> train_state_lib.TrainState:\n def _restore_train_state(\n self,\n state_dict: optimizers.OptimizerStateType) -> train_state_lib.TrainState:\n def _create_lazy_awaitable_array(\n self, param_info: _ParameterInfo, maybe_ts_spec: Any, ckpt_path: str,\n restore_dtype: Optional[jnp.dtype]) -> LazyAwaitableArray:\n async def get_fn():\n def _read_state_from_tensorstore(\n self,\n ckpt_path: str,\n written_state_dict: Mapping[str, Any],\n restore_parameter_infos: Optional[Mapping[str, Any]] = None,\n lazy_parameters: bool = False,\n ) -> Mapping[str, Any]:\n def restore_from_tf_checkpoint(\n self,\n path_or_dir: str,\n strict: bool = True,\n translator: Optional[checkpoint_importer.CheckpointTranslator] = None\n ) -> train_state_lib.TrainState:\n def _partition_parameter(maybe_arr: Any, param_info: _ParameterInfo):\n def convert_from_tf_checkpoint(\n self,\n path_or_dir: str,\n *,\n state_transformation_fns: Sequence[SaveStateTransformationFn] = (),\n concurrent_gb: int = 16,\n translator: Optional[checkpoint_importer.CheckpointTranslator] = None):\n def _get_optimizer_state_dict(\n self, ckpt_contents: PyTreeDef,\n state_transformation_fns: Sequence[RestoreStateTransformationFn]):\n def __call__(self,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n checkpoints_dir: str,\n dataset_iterator: Optional[tf.data.Iterator] = None,\n *,\n keep: Optional[int] = None,\n save_dtype: jnp.dtype = np.float32,\n restore_dtype: Optional[jnp.dtype] = None,\n use_gda: Optional[bool] = False,\n keep_dataset_checkpoints: Optional[int] = None) -> Checkpointer:\n def __init__(self,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n checkpoints_dir: str,\n dataset_iterator: Optional[tf.data.Iterator] = None,\n *,\n keep: Optional[int] = None,\n save_dtype: jnp.dtype = np.float32,\n restore_dtype: Optional[jnp.dtype] = None,\n metric_name_to_monitor: str = 'train/accuracy',\n metric_mode: str = 'max',\n keep_checkpoints_without_metrics: bool = True,\n force_keep_period: Optional[int] = None,\n use_gda: bool = False,\n keep_dataset_checkpoints: Optional[int] = None):\n def _populate_metrics_for_steps(self,\n steps: Iterable[int]) -> Mapping[int, float]:\n def _try_fill_metric_run_and_tag_names(self, run_keys: Iterable[str]) -> bool:\n def _filter_out_force_keep_period_steps(self, existing_steps):\n def _remove_old_checkpoints(self):\ndef _get_optimizer_state_dict(\n ckpt_contents: PyTreeDef, optimizer_state: Mapping[str, Any],\n state_transformation_fns: Sequence[RestoreStateTransformationFn]):\ndef _transform_state_and_infos(\n state_dict: PyTreeDef,\n parameter_infos: PyTreeDef,\n state_transformation_fns: Sequence[SaveStateTransformationFn],\n) -> Tuple[PyTreeDef, PyTreeDef]:\nasync def _read_ts(param_info: _ParameterInfo,\n maybe_tspec: Any,\n ckpt_path: str,\n restore_dtype: Optional[jnp.dtype] = None,\n mesh: Optional[gda_lib.Shape] = None,\n axes: Optional[gda_lib.MeshAxes] = None):\ndef fake_param_info(maybe_tspec: Any) -> Optional[_ParameterInfo]:\ndef find_checkpoint(path: str, step: Optional[int] = None) -> str:\ndef load_t5x_checkpoint(\n path: str,\n step: Optional[int] = None,\n state_transformation_fns: Sequence[RestoreStateTransformationFn] = (),\n remap: bool = True,\n restore_dtype: Optional[jnp.dtype] = None,\n lazy_parameters: bool = False) -> PyTreeDef:\n def _create_lazy_awaitable_array(\n param_info: _ParameterInfo, maybe_ts_spec: Any, ckpt_path: str,\n restore_dtype: Optional[jnp.dtype]) -> LazyAwaitableArray:\ndef _transforms_from_state_transformation_fns(\n state_transformation_fns: Sequence[SaveStateTransformationFn]):\n def __init__(self, checkpoint_filename: str):\n def save(self, directory: epath.Path, item: tf.data.Iterator):\n def restore(self,\n directory: epath.Path,\n item: Optional[tf.data.Iterator] = None) -> tf.data.Iterator:\n def structure(self, directory: epath.Path) -> Any:\n def structure(self, directory: epath.Path) -> PyTreeDef:\n def tensorstore_spec_to_name(leaf):\n def save(self,\n directory: Union[str, epath.Path],\n item: Any,\n *args,\n force: bool = False,\n tmp_directory: Optional[Union[str, epath.Path]] = None,\n **kwargs):\n def __init__(self,\n directory: str,\n train_state_shape: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n dataset_iterator: Optional[tf.data.Iterator] = None,\n save_dtype: Optional[jnp.dtype] = None,\n restore_dtype: Optional[jnp.dtype] = None,\n keep: Optional[int] = None,\n keep_dataset_checkpoints: Optional[int] = None):\n def all_steps(self) -> Sequence[int]:\n def _parameter_infos(self,\n train_state: train_state_lib.TrainState) -> PyTreeDef:\n def _get_save_directory(self,\n step: int,\n directory: epath.Path,\n key_name: Optional[str] = None) -> epath.Path:\n def save(self,\n train_state: train_state_lib.TrainState,\n state_transformation_fns: Sequence[SaveStateTransformationFn] = ()):\n def _save_args(param_info):\n def restore(\n self,\n step: int,\n fallback_state: Optional[Mapping[str, Any]] = None,\n state_transformation_fns: Sequence[SaveStateTransformationFn] = (),\n lazy_parameters: Optional[bool] = False) -> train_state_lib.TrainState:\n def _restore_args(param_info):\n def _add_checkpoint_info(self, step, metrics):\nclass _ParameterInfo:\nclass _BytesConditionVariable(object):\nclass SaveStateTransformationFn(typing_extensions.Protocol):\nclass RestoreStateTransformationFn(typing_extensions.Protocol):\nclass _TfDataCheckpointer:\nclass Checkpointer(object):\nclass CheckpointerConstructor(typing_extensions.Protocol):\nclass SaveBestCheckpointer(Checkpointer):\nclass _OrbaxParamInfo:\nclass DatasetCheckpointHandler(orbax.checkpoint.CheckpointHandler):\nclass TrainStateCheckpointHandler(orbax.checkpoint.PyTreeCheckpointHandler):\nclass NonAtomicCheckpointer(orbax.checkpoint.Checkpointer):\nclass CheckpointManager(orbax.checkpoint.CheckpointManager):" }, { "identifier": "eval", "path": "t5x/eval.py", "snippet": "_DEFAULT_GIN_SEARCH_PATHS = [\n os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n]\n FLAGS = flags.FLAGS\nclass SummarizeConfigFn(Protocol):\nclass InferenceEvaluator:\n def __call__(self, model_dir: str,\n summary_writer: Optional[metric_writers.SummaryWriter],\n step: int) -> None:\n def __init__(\n self,\n infer_eval_dataset_cfg: utils.DatasetConfig,\n inference_evaluator_cls: utils.EvaluatorConstructor,\n model: models.BaseModel,\n partitioner: partitioning.BasePartitioner,\n log_dir: Optional[str] = None,\n verify_matching_vocabs_fn: Optional[\n Callable[[utils.DatasetConfig, models.BaseModel], None]]=None,\n ):\n def model_feature_shapes(self) -> Mapping[str, Tuple[int, ...]]:\n def eval_tasks(self) -> Sequence[seqio.Task]:\n def close(self):\n def evaluate(\n self,\n train_state: train_state_lib.TrainState,\n train_state_axes: train_state_lib.TrainState,\n ) -> seqio.evaluation.AllMetricsFuture:\ndef evaluate(\n *,\n model: models.BaseTransformerModel,\n dataset_cfg: utils.DatasetConfig,\n restore_checkpoint_cfg: utils.RestoreCheckpointConfig,\n partitioner: partitioning.BasePartitioner,\n output_dir: str,\n inference_evaluator_cls: utils.EvaluatorConstructor = UnifiedIOEvaluator,\n use_wandb = True,\n summarize_config_fn: SummarizeConfigFn = gin_utils.summarize_gin_config,\n train_state_initializer_cls: Type[\n utils.TrainStateInitializer] = utils.TrainStateInitializer,\n fallback_init_rng: Optional[int] = None,\n log_only=False\n):\n def main(argv: Sequence[str]):\n def _main(argv: Sequence[str]):" }, { "identifier": "models", "path": "t5x/models.py", "snippet": "class TokensIdsToLogitsCallable(typing_extensions.Protocol):\nclass DecodeFnCallable(typing_extensions.Protocol):\nclass BaseModel(abc.ABC):\nclass BaseTransformerModel(BaseModel):\nclass EncoderDecoderModel(BaseTransformerModel):\nclass DecoderOnlyModel(BaseTransformerModel):\n def __call__(\n self, decoding_state: decoding.DecodingState\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def __call__(self, *, inputs: jnp.ndarray, cache: Mapping[str, jnp.ndarray],\n tokens_to_logits: TokensIdsToLogitsCallable, eos_id: int,\n num_decodes: int, decode_rng: Optional[jax.random.KeyArray],\n cache_offset: int, **kwargs) -> Tuple[jnp.ndarray, jnp.ndarray]:\n def __init__(self, optimizer_def: optimizers.OptimizerDefType):\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def eval_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def predict_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: Optional[DecodeFnCallable] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[Union[\n float, int, str, losses.SpecialLossNormalizingFactor]] = None,\n ):\n def input_vocabulary(self):\n def output_vocabulary(self):\n def decode_fn(self):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None) -> jnp.ndarray:\n def loss_fn(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray],\n ) -> Tuple[jnp.ndarray, MetricsMap]:\n def _compute_metrics(\n self,\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n ) -> MetricsMap:\n def __init__(\n self,\n module: nn.Module,\n input_vocabulary: seqio.Vocabulary,\n output_vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.beam_search,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False,\n other_variables: Optional[PyTreeDef] = None,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, flax_scope.FrozenVariableDict]]:\n def _compute_logits_from_slice(\n self, decoding_state: decoding.DecodingState, params: PyTreeDef,\n encoded_inputs: jnp.ndarray, raw_inputs: jnp.ndarray,\n max_decode_length: int) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n prompt_with_targets: bool = False\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False,\n ) -> Union[jnp.ndarray, Tuple[jnp.ndarray, Mapping[str, Any]]]:\n def __init__(\n self,\n module: nn.Module,\n vocabulary: seqio.Vocabulary,\n optimizer_def: optimizers.OptimizerDefType,\n decode_fn: DecodeFnCallable = decoding.temperature_sample,\n inputs_bidirectional_attention: bool = False,\n feature_converter_cls: Optional[Callable[...,\n seqio.FeatureConverter]] = None,\n label_smoothing: float = 0.0,\n z_loss: float = 0.0,\n loss_normalizing_factor: Optional[float] = None,\n ):\n def get_initial_variables(\n self,\n rng: jax.random.KeyArray,\n input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str, jnp.dtype]] = None\n ) -> flax_scope.FrozenVariableDict:\n def _get_decoder_causal_attention(self, batch):\n def _compute_logits(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n dropout_rng: Optional[jax.random.KeyArray] = None,\n mutable: flax_scope.CollectionFilter = False) -> jnp.ndarray:\n def _compute_logits_from_slice(\n self,\n decoding_state: decoding.DecodingState,\n params: PyTreeDef,\n max_decode_length: int,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\n def score_batch(self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n return_intermediates: bool = False) -> jnp.ndarray:\n def _compute_kv_cache(\n self,\n params: PyTreeDef,\n inputs: jnp.ndarray,\n inputs_lengths: jnp.ndarray,\n decoder_causal_attention: jnp.ndarray,\n ) -> PyTreeDef:\n def predict_batch_with_aux(\n self,\n params: PyTreeDef,\n batch: Mapping[str, jnp.ndarray],\n rng: Optional[jax.random.KeyArray] = None,\n *,\n return_all_decodes: bool = False,\n num_decodes: int = 1,\n decoder_params: Optional[MutableMapping[str, Any]] = None,\n ) -> Tuple[jnp.ndarray, Mapping[str, jnp.ndarray]]:\ndef remove_prefix(sequence: jnp.ndarray,\n prefix_length: jnp.ndarray) -> jnp.ndarray:\ndef compute_weighted_accuracy(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n weights: Optional[jnp.ndarray] = None) -> Tuple[jnp.ndarray, jnp.ndarray]:\ndef compute_metrics(logits: jnp.ndarray, targets: jnp.ndarray,\n weights: jnp.ndarray, loss: jnp.ndarray,\n weight_sum: jnp.ndarray,\n additional_metrics: MetricsMap) -> MetricsMap:\ndef compute_base_metrics(\n logits: jnp.ndarray,\n targets: jnp.ndarray,\n mask: jnp.ndarray,\n loss: jnp.ndarray,\n z_loss: Optional[jnp.ndarray] = None,\n segment_ids: Optional[Mapping[str, jnp.ndarray]] = None,\n) -> MetricsMap:\ndef get_input_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\ndef get_output_vocabulary(model: BaseTransformerModel) -> seqio.Vocabulary:\n FEATURE_CONVERTER_CLS: Callable[..., seqio.FeatureConverter]\n FEATURE_CONVERTER_CLS = seqio.EncDecFeatureConverter\n FEATURE_CONVERTER_CLS = seqio.DecoderFeatureConverter" }, { "identifier": "evaluator", "path": "t5x/examples/unified_io/evaluator.py", "snippet": "class UnifiedIOOutput:\nclass UnifiedIOEvaluator(seqio.Evaluator):\n def text(self):\n def text_tokens(self):\n def image_tokens(self):\n def image(self):\n def audio(self):\n def scores(self):\ndef build_uio_outputs(aux_values, vocab) -> List[UnifiedIOOutput]:\n def __init__(\n self,\n mixture_or_task_name: str,\n feature_converter,\n eval_split: str = \"validation\",\n use_cached: bool = False,\n seed: Optional[int] = 42,\n sequence_length: Optional[Mapping[str, int]] = None,\n num_examples: Optional[int] = None,\n shuffle: bool = False,\n logger_cls: Sequence = (),\n log_dir: Optional[str] = None,\n use_memory_cache: bool = True,\n target_field_name: str = \"targets\",\n ):\n def _compute_metrics(self,\n predicted_tokens: AllOutputTokensType,\n scores: AllOutputScoresType,\n all_aux_values: AllOutputAuxValuesType,\n step: Optional[int] = None) -> AllMetricsType:" }, { "identifier": "partitioning", "path": "t5x/partitioning.py", "snippet": "class AxisNames(tuple):\nclass LocalChunkInfo:\nclass LocalChunker:\nclass DataLayout:\nclass BasePartitioner(metaclass=abc.ABCMeta):\nclass PjittedFnWithContext(PartitionedCallable):\nclass BasePjitPartitioner(BasePartitioner):\nclass PjitPartitioner(BasePjitPartitioner):\n def __new__(cls, *names):\n def __repr__(self):\ndef pjit(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef pjit_with_cpu_fallback(\n fun: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = (),\n backend: Optional[str] = None):\ndef with_sharding_constraint(x, axis_resources):\ndef bounds_from_last_device(\n last_device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef get_coords(device: jax.lib.xla_client.Device) -> HardwareMesh:\ndef global_mesh_defined():\ndef get_mesh(model_parallel_submesh: HardwareMesh,\n input_devices: Sequence[JaxDevice] = (),\n input_local_devices: Sequence[JaxDevice] = (),\n tile_by_host_if_needed: bool = True,\n backend: Optional[str] = None) -> Mesh:\n def dh_dd_mh_md(g: int, m: int, l: int) -> Tuple[int, int, int, int]:\ndef get_cpu_mesh() -> Mesh:\ndef get_gpu_mesh(num_partitions: int) -> Mesh:\ndef default_mesh(num_partitions: int,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n backend: Optional[str] = None) -> Mesh:\n def __init__(self, global_mesh: Mesh):\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\ndef standard_logical_axis_rules(\n activation_partitioning_dims: int = 1,\n parameter_partitioning_dims: int = 1,\n additional_rules: Optional[LogicalAxisRules] = None) -> LogicalAxisRules:\ndef _id_fn(x, ix):\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None):\n def mesh(self) -> Mesh:\n def data_partition_spec(self) -> PartitionSpec:\n def get_data_layout(self,\n batch_size: Optional[int] = None,\n host_index: Optional[int] = None) -> DataLayout:\n def get_local_chunk_info(\n self, global_shape: Tuple[int, ...],\n mesh_axes: Sequence[Optional[str]]) -> LocalChunkInfo:\n def params_on_devices(self):\n def move_params_to_devices(self, train_state: TrainState,\n train_state_axes: TrainState) -> TrainState:\n def _local_chunker(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PartitionedCallable:\n def compile(self, partitioned_fn: PartitionedCallable,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n pjitted_fn,\n partition_mesh: Mesh,\n logical_axis_rules: flax_partitioning.LogicalRules = ()):\n def __call__(self, *args):\n def lower(self, *args):\n def _local_chunker(self) -> LocalChunker:\n def mesh(self) -> Mesh:\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def compile(self, partitioned_fn: PjittedFnWithContext,\n *args) -> CompiledPartitionedCallable:\n def __init__(self,\n num_partitions: Optional[int] = None,\n model_parallel_submesh: Optional[HardwareMesh] = None,\n params_on_devices: bool = True,\n backend: Optional[str] = None,\n logical_axis_rules: Optional[LogicalAxisRules] = None,\n use_cpu_pjit: Optional[bool] = False):\n def partition(\n self,\n fn: Callable, # pylint: disable=g-bare-generic\n in_axis_resources,\n out_axis_resources,\n static_argnums: Union[int, Sequence[int]] = (),\n donate_argnums: Union[int, Sequence[int]] = ()\n ) -> PjittedFnWithContext:\n def logical_axis_rules(self):\n def get_logical_axes(self, train_state: TrainState) -> TrainState:\n def get_mesh_axes(self, train_state: TrainState) -> TrainState:\n def _logical_to_mesh_axes(param_name, logical_axes):" }, { "identifier": "train_state", "path": "t5x/train_state.py", "snippet": "EMPTY_DICT = flax.core.freeze({})\nclass TrainState(typing_extensions.Protocol):\nclass FlaxOptimTrainState(flax.struct.PyTreeNode):\nclass GanOptimTrainState(FlaxOptimTrainState):\nclass InferenceState(flax.struct.PyTreeNode):\n def step(self) -> jnp.ndarray:\n def params(self) -> FrozenVariableDict:\n def param_states(self) -> FrozenVariableDict:\n def flax_mutables(self) -> FrozenVariableDict:\n def state_dict(self) -> MutableVariableDict:\n def restore_state(self, state_dict: Mapping[str, Any]) -> 'TrainState':\n def replace_params(self, params: VariableDict) -> 'TrainState':\n def replace_flax_mutables(self, flax_mutables: FrozenDict) -> 'TrainState':\n def replace_step(self, step: jnp.ndarray) -> 'TrainState':\n def apply_gradient(self,\n grads,\n learning_rate,\n flax_mutables=EMPTY_DICT) -> 'TrainState':\n def as_logical_axes(self) -> 'TrainState':\ndef _validate_params_axes(params_axes, params):\ndef _split_variables_and_axes(\n variables_and_axes: FrozenVariableDict\n) -> Tuple[FrozenVariableDict, FrozenVariableDict]:\n def create(cls, optimizer_def: optimizers.OptimizerDefType,\n model_variables: FrozenVariableDict) -> 'FlaxOptimTrainState':\n def step(self) -> jnp.ndarray:\n def params(self) -> FrozenVariableDict:\n def param_states(self) -> FrozenVariableDict:\n def state_dict(self) -> MutableVariableDict:\n def apply_gradient(self,\n grads,\n learning_rate,\n flax_mutables=EMPTY_DICT) -> 'FlaxOptimTrainState':\n def replace_params(self, params: VariableDict) -> 'FlaxOptimTrainState':\n def replace_flax_mutables(self,\n flax_mutables: FrozenDict) -> 'FlaxOptimTrainState':\n def replace_step(self, step: jnp.ndarray) -> 'FlaxOptimTrainState':\n def restore_state(self, state_dict: VariableDict) -> 'FlaxOptimTrainState':\n def as_logical_axes(self) -> 'FlaxOptimTrainState':\n def apply_gradient(self,\n grads,\n learning_rate,\n flax_mutables=EMPTY_DICT) -> 'FlaxOptimTrainState':\n def create(cls, model_variables: FrozenVariableDict) -> 'InferenceState':\n def param_states(self) -> FrozenVariableDict:\n def apply_gradient(self, *args, **kwargs) -> 'InferenceState':\n def state_dict(self) -> MutableMapping[str, Any]:\n def replace_step(self, step: jnp.ndarray) -> 'InferenceState':\n def replace_params(self, params: FrozenVariableDict) -> 'InferenceState':\n def replace_flax_mutables(self,\n flax_mutables: FrozenDict) -> 'InferenceState':\n def restore_state(self, state_dict: Mapping[str, Any]) -> 'InferenceState':\n def as_logical_axes(self) -> 'InferenceState':" }, { "identifier": "trainer", "path": "t5x/trainer.py", "snippet": "def _merge_metrics(a, b):\ndef merge_metrics(a, b):\n def result(self) -> Mapping[str, Array]:\n def result(self) -> Mapping[str, clu.values.Value]:\n def result(self) -> float:\n def __call__(\n self,\n step: jnp.ndarray,\n ) -> jnp.ndarray:\n def __call__(self, metrics: MetricMapType, duration: float,\n num_steps: int) -> Mapping[str, jnp.ndarray]:\n def __call__(\n self, train_state: train_state_lib.TrainState,\n batch: BatchType) -> Tuple[train_state_lib.TrainState, MetricMapType]:\n def __call__(self, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def _make_rms_metrics(name, tree):\n def _make_max_metrics(name, tree):\n def compute_metrics(\n self, gradients: ModelWeights,\n old_train_state: train_state_lib.TrainState,\n new_train_state: train_state_lib.TrainState) -> MutableMetricMapType:\n def __init__(self):\n def close(self):\n def __del__(self):\n def _get_completion_future(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def _get_completion_time():\n def start(self, block_on: PyTreeDef = ()):\n def stop(self, block_on: PyTreeDef = ()) -> TimeFuture:\n def __init__(self, name: str, summary_dir: Optional[str] = None, log_to_wandb=False):\n def __del__(self):\n def close(self):\n def summary_writer(self) -> metric_writers.MetricWriter:\n def write_scalar(self, key: str, val: metric_writers.interface.Scalar,\n step: int):\n def write_scalars(self, step: int,\n scalars: Mapping[str, metric_writers.interface.Scalar]):\n def start_duration_timer(self, block_on: PyTreeDef = ()):\n def write_metrics_summary(self, metrics: MetricMapType, step: int,\n num_steps: int) -> MetricValueMapFuture:\n def _summarize_and_write():\n def _ensure_not_on_device(x):\n def flush(self):\n def __init__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng,\n use_wandb=False, packing_strategy=None, log_weights=None):\n def __enter__(self):\n def __exit__(self, exc_type, exc_value, traceback):\n def close(self):\n def _get_step_rng(self, step: int) -> Rng:\n def train_state(self):\n def train_state(self, train_state: PyTreeDef):\n def _weight_metric_fn(self):\n def _get_weight_metrics_fn(_params):\n def train(self,\n batch_iter: Union[Iterator[BatchType],\n clu.data.dataset_iterator.DatasetIterator],\n num_steps: int,\n start_step: Optional[int] = None) -> ArrayMapFuture:\n def compile_train(self, batch: ElementSpec) -> None:\n def eval(\n self, batch_iters: Mapping[str,\n Iterator[BatchType]], pbar_nsteps=None) -> Mapping[str, Array]:\n def compile_eval(self, batches: Mapping[str, BatchType]) -> None:\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef accumulate_grads_microbatched(\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n dropout_rng: Rng,\n num_microbatches: Optional[int],\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n) -> Tuple[train_state_lib.TrainState, MutableMetricMapType,\n def get_microbatch(batch: BatchType, idx: int) -> Mapping[str, jnp.ndarray]:\n def metrics_and_grad(loop_cnt, dropout_rng, flax_mutables=None):\n def per_microbatch_train_step(\n loop_cnt: int, state: Tuple[jnp.ndarray, jnp.ndarray,\n Mapping[str, jnp.ndarray],\n Optional[FlaxMutables]]\n ) -> Tuple[jnp.ndarray, jnp.ndarray, Mapping[str, jnp.ndarray],\ndef apply_grads(\n train_state: train_state_lib.TrainState,\n grad_accum: ModelWeights,\n metrics: MutableMetricMapType,\n learning_rate: jnp.ndarray,\n weight_metrics_computer: Optional[WeightMetricsComputer],\n other_state_variables: Optional[Mapping[str, Any]] = None\n) -> Tuple[train_state_lib.TrainState, MetricMapType]:\ndef eval_step(model: models.BaseModel, train_state: train_state_lib.TrainState,\n batch: jnp.ndarray) -> MetricMapType:\ndef train_with_lr(\n train_state: train_state_lib.TrainState,\n batch: BatchType,\n learning_rate: jnp.ndarray,\n dropout_rng: Rng,\n model: models.BaseModel,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n data_partition_spec: PartitionSpec = PartitionSpec(\"data\"),\n loss_fn_args=None\n):\n def __call__(self, model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str], summary_dir: Optional[str],\n train_state_axes: Any, rng: Rng) -> BaseTrainer:\n def __init__(self,\n model: models.BaseModel,\n train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n eval_names: Sequence[str],\n summary_dir: Optional[str],\n train_state_axes: Any,\n rng: Rng,\n learning_rate_fn: LearningRateCallable,\n num_microbatches: Optional[int],\n weight_metrics_computer: Optional[WeightMetricsComputer] = None,\n use_wandb=True,\n packing_strategy=None,\n log_weights=False\n ):\n def _partitioned_train_step(self) -> PartitionedTrainCallable:\n def train_step(train_state: train_state_lib.TrainState, batch: BatchType, static_args=None):\n def _partitioned_eval_step(self) -> PartitionedEvalCallable:\ndef _warn_action_not_run(action, task, metric):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self,\n metric: Tuple[str, str],\n mode: str,\n patience: int = 3,\n atol: float = 0.,\n rtol: float = 0.):\n def _compare_fn(self, current, previous):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\n def __init__(self, task: str, metric: str = \"loss\"):\n def run(self, train_state: train_state_lib.TrainState,\n metrics_by_task: Mapping[str, MetricValueMapType]) -> bool:\nclass ArrayMapFuture(typing_extensions.Protocol):\nclass MetricValueMapFuture(typing_extensions.Protocol):\nclass TimeFuture(typing_extensions.Protocol):\nclass LearningRateCallable(typing_extensions.Protocol):\nclass SummarizeMetricsCallable(typing_extensions.Protocol):\nclass PartitionedTrainCallable(typing_extensions.Protocol):\nclass PartitionedEvalCallable(typing_extensions.Protocol):\nclass GradNormComputer(object):\nclass WeightMetricsComputer(object):\nclass _AsyncTimer(object):\nclass MetricsManager(object):\nclass PreemptionError(Exception):\nclass BaseTrainer(abc.ABC):\nclass BaseTrainerConstructor(Protocol):\nclass Trainer(BaseTrainer):\nclass ActionMode(enum.Enum):\nclass BaseAction(abc.ABC):\nclass EarlyStoppingAction(BaseAction):\nclass TerminateOnNanAction(BaseAction):\n _WEIGHT_METRICS = [\n \"weight_rms\", \"weight_gradient_rms\", \"weight_update_rms\", \"weight_max\"\n ]\n TRAIN = 1\n TRAIN_EVAL = 2\n INFER_EVAL = 3" }, { "identifier": "utils", "path": "t5x/utils.py", "snippet": "class EvaluatorConstructor(typing_extensions.Protocol):\nclass SaveCheckpointConfig:\nclass RestoreCheckpointConfig:\nclass CheckpointConfig:\nclass LegacyCheckpointer(orbax.checkpoint.Checkpointer):\nclass LegacyCheckpointManager(orbax.checkpoint.CheckpointManager):\nclass DatasetConfig:\nclass GDADatasetIterator(clu.data.dataset_iterator.DatasetIterator):\nclass InitFnCallable(typing_extensions.Protocol):\nclass LearningRateCallable(typing_extensions.Protocol):\nclass TrainStateInitializer:\nclass InferStepWithRngCallable(typing_extensions.Protocol):\nclass InferStepWithoutRngCallable(typing_extensions.Protocol):\nclass InferFnCallable(typing_extensions.Protocol):\nclass GetDatasetCallable(typing_extensions.Protocol):\nclass GetEvalDatasetCallable(typing_extensions.Protocol):\nclass _RegexMap(collections.abc.Mapping):\n def __call__(\n self,\n mixture_or_task_name: str,\n feature_converter: seqio.FeatureConverter,\n eval_split: str,\n use_cached: bool,\n seed: Optional[int],\n sequence_length: Optional[Mapping[str, int]],\n log_dir: Optional[str],\n use_memory_cache: bool,\n ) -> seqio.Evaluator:\n def __post_init__(self):\n def __post_init__(self):\n def __init__(self,\n *,\n save_checkpointer: Optional[checkpoints.Checkpointer] = None,\n restore_checkpointer: checkpoints.Checkpointer,\n strict: Optional[bool] = False):\n async def async_save(self, path: str, item: Any):\n async def async_restore(self, path: str, item: Optional[Any] = None) -> Any:\n def save(self,\n path: str,\n item: train_state_lib.TrainState,\n state_transformation_fns: Sequence[\n checkpoints.SaveStateTransformationFn] = (),\n *,\n concurrent_gb: int = 128):\n def restore(self,\n path: str,\n item: Optional[train_state_lib.TrainState],\n state_transformation_fns: Sequence[\n checkpoints.RestoreStateTransformationFn] = (),\n fallback_state: Optional[Mapping[str, Any]] = None,\n lazy_parameters: bool = False) -> train_state_lib.TrainState:\n def __init__(self,\n *,\n save_cfg: Optional[SaveCheckpointConfig] = None,\n restore_cfg: RestoreCheckpointConfig,\n train_state_shape: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner,\n ds_iter: Optional[\n Union[tf.data.Iterator,\n clu.data.dataset_iterator.DatasetIterator]] = None,\n model_dir: Optional[str] = None,\n use_gda: Optional[bool] = True):\n def save(self,\n train_state: train_state_lib.TrainState,\n state_transformation_fns: Sequence[\n checkpoints.SaveStateTransformationFn] = ()):\n def restore(\n self,\n paths: Sequence[str],\n restore_cfg: RestoreCheckpointConfig,\n fallback_state: Optional[Mapping[str, Any]] = None\n ) -> Union[train_state_lib.TrainState, Sequence[train_state_lib.TrainState]]:\ndef _get_index_mappings(device_to_idxs):\ndef _create_gda(partitioner: partitioning.BasePartitioner,\n global_shapes: PyTreeDef, host_arrays: PyTreeDef) -> PyTreeDef:\n def _put_to_devices(x, global_shape):\n def _gda(dbs, global_shape):\n def __init__(self, iterator: clu.data.dataset_iterator.DatasetIterator,\n partitioner: partitioning.BasePartitioner,\n global_shapes: PyTreeDef):\n def __next__(self):\n def reset(self):\n def element_spec(self):\n def save(self, filename):\n def restore(self, filename):\n def iterator(self):\ndef sync_global_devices(name: str) -> None:\ndef multihost_assert_equal(input_tree, fail_message: str = ''):\ndef _hardware_uniform(\n rng_key: Array,\n shape: Shape,\n dtype: jnp.dtype = np.float32,\n minval: Array = np.float32(0),\n maxval: Array = np.float32(1)\n) -> Array:\ndef _hardware_bernoulli(\n rng_key: Array, p: np.ndarray = np.float32(0.5),\n shape: Shape = ()) -> Array:\ndef set_hardware_rng_ops():\ndef get_zeros_batch_like_spec(\n batch_spec: Mapping[str,\n jax.ShapeDtypeStruct]) -> Mapping[str, jnp.ndarray]:\ndef get_zeros_batch_like_dataset(dataset: tf.data.Dataset,\n batch_size=None) -> Mapping[str, jnp.ndarray]:\n def __call__(\n self, rng: Array, input_shapes: Mapping[str, Array],\n input_types: Optional[Mapping[str,\n DType]]) -> flax_scope.FrozenVariableDict:\n def __call__(self, step: jnp.ndarray) -> jnp.ndarray:\ndef create_learning_rate_scheduler(\n factors: str = 'constant * linear_warmup * rsqrt_decay',\n base_learning_rate: float = 0.5,\n warmup_steps: int = 1000,\n decay_factor: float = 0.5,\n steps_per_decay: int = 20000,\n steps_per_cycle: int = 100000,\n step_offset: int = 0,\n min_learning_rate: float = 1e-8) -> LearningRateCallable:\n def step_fn(step: jnp.ndarray) -> jnp.ndarray:\ndef steps(prefix, config, data_size=None, batch_size=None, default=ValueError):\ndef create_vision_learning_rate_scheduler(\n total_steps, batch_size=None, data_size=None,\n base=1.0, decay_type=\"stair\",\n scale_with_batchsize=False, **kw):\n def step_fn(step):\ndef get_first_valid_restore_config_and_paths(\n restore_cfgs: Sequence[RestoreCheckpointConfig]\n) -> Tuple[Optional[RestoreCheckpointConfig], Sequence[str]]:\ndef get_fallback_state(restore_cfg: RestoreCheckpointConfig,\n init_fn: Callable[[jnp.ndarray], Mapping[str, Any]],\n init_rng: jnp.ndarray) -> Optional[Mapping[str, Any]]:\n def __init__(self,\n optimizer_def: Optional[optimizers.OptimizerDefType],\n init_fn: InitFnCallable,\n input_shapes: Mapping[str, Array],\n partitioner: partitioning.BasePartitioner,\n model=None,\n input_types: Optional[Mapping[str, DType]] = None):\n def initialize_train_state(rng: Array):\n def from_scratch(self, init_rng: Array) -> train_state_lib.TrainState:\n def from_checkpoints(\n self,\n restore_cfgs: Sequence[RestoreCheckpointConfig],\n ds_iter: Optional[tf.data.Iterator] = None,\n init_rng: Optional[jnp.ndarray] = None,\n ) -> Iterable[train_state_lib.TrainState]:\n def _restore_path(path, cfg):\n def from_checkpoint(\n self,\n ckpt_cfgs: Sequence[RestoreCheckpointConfig],\n *,\n ds_iter: Optional[tf.data.Iterator] = None,\n init_rng: Optional[jnp.ndarray] = None\n ) -> Optional[train_state_lib.TrainState]:\n def from_checkpoint_or_scratch(\n self,\n ckpt_cfgs: Sequence[RestoreCheckpointConfig],\n *,\n init_rng: Array,\n ds_iter: Optional[tf.data.Iterator] = None) -> train_state_lib.TrainState:\ndef log_model_info(log_file: Optional[str],\n full_train_state: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner):\n def _log_info_and_write_to_file(writer, format_str, *args):\n def _log_variable(name: str, arr: Optional[np.ndarray],\n logical_axes: Optional[partitioning.AxisNames],\n mesh_axes: Optional[partitioning.PartitionSpec]):\n def __call__(self,\n params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray],\n rng: jnp.ndarray = None) -> PyTreeDef:\n def __call__(self, params: Mapping[str, Any],\n batch: Mapping[str, jnp.ndarray]) -> PyTreeDef:\n def __call__(\n self,\n ds: tf.data.Dataset,\n train_state: train_state_lib.TrainState,\n rng: Optional[jnp.ndarray] = None\n ) -> Union[_InferFnResult, _InferFnWithAuxResult]:\ndef _remove_padding(all_inferences, all_indices):\ndef get_infer_fn(infer_step: InferStepCallable, batch_size: int,\n train_state_axes: train_state_lib.TrainState,\n partitioner: partitioning.BasePartitioner, \n pbar=False) -> InferFnCallable:\n def infer_step_with_indices(params, batch, rng, indices):\n def infer_fn(ds: tf.data.Dataset,\n train_state: train_state_lib.TrainState,\n rng: Optional[jnp.ndarray] = None):\n def _copy_to_host_async(x):\ndef import_module(module: str):\ndef get_vocabulary(\n cfg: DatasetConfig) -> Tuple[seqio.Vocabulary, seqio.Vocabulary]:\ndef verify_matching_vocabs(cfg: DatasetConfig, model: Any):\ndef get_dataset(cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n num_epochs: Optional[int] = None,\n continue_from_last_checkpoint: bool = False,\n batching_fn=None) -> tf.data.Dataset:\ndef get_dataset_inner(cfg: DatasetConfig,\n shard_info: seqio.ShardInfo,\n feature_converter_cls: Callable[...,\n seqio.FeatureConverter],\n seed: Optional[int] = None,\n num_epochs: Optional[int] = None,\n batching_fn=None\n ):\n def __call__(\n self,\n cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n num_epochs: Optional[int] = None,\n continue_from_last_checkpoint: bool = True\n ) -> Union[clu.data.dataset_iterator.DatasetIterator, tf.data.Dataset]:\n def __call__(\n self, cfg: DatasetConfig, shard_id: int, num_shards: int, eval_steps: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter]\n ) -> Mapping[str, tf.data.Dataset]:\ndef get_training_eval_datasets(\n cfg: DatasetConfig,\n shard_id: int,\n num_shards: int,\n eval_steps: int,\n feature_converter_cls: Callable[..., seqio.FeatureConverter],\n deterministic: bool = False,\n model_dir: Optional[str] = None,\n start_step: int = 0,\n) -> Mapping[str, tf.data.Dataset]:\n def _repeat_shard_batch_take_cache(ds: tf.data.Dataset):\ndef round_vocab_size_to_multiple(vocabulary: seqio.Vocabulary,\n divisor: int = 128):\ndef flatten_dict_string_keys(x):\ndef flatten_lists(lsts: Iterable[Iterable]) -> Sequence:\n def __init__(self, kvs: Sequence[Tuple[str, Any]]):\n def __getitem__(self, key: str) -> Any:\n def __len__(self) -> int:\n def __iter__(self) -> Iterable[Tuple[re.Pattern, Any]]:\ndef override_params_axes_names(\n model_variables: flax_scope.FrozenVariableDict,\n params_axes_names_override: Sequence[Tuple[str, Tuple[str, ...]]] = ()\n) -> flax_scope.FrozenVariableDict:\ndef get_local_data(x):" }, { "identifier": "init_wandb", "path": "t5x/examples/unified_io/utils.py", "snippet": "@gin.configurable()\ndef init_wandb(name=None, group=None, entity=None, project=None):\n utils.create_learning_rate_scheduler() # Makes sure this is registered in `operative_config`\n config_str = gin.operative_config_str()\n logging.info(f\"Init wandb with group={group} name={name}\")\n wandb.init(\n group=group,\n name=name,\n entity=entity,\n project=project,\n force=True,\n notes=config_str\n )" } ]
import functools import math import os import time import warnings import clu.data import jax import jax.numpy as jnp import numpy as np import seqio import tensorflow as tf import jax.profiler import gin from typing import Callable, Sequence, Mapping, Tuple, Type, Optional from t5x.examples.unified_io.packing import PackingStrategy from absl import logging from clu import metric_writers from jax import random from jax.experimental import multihost_utils from jax.experimental.global_device_array import GlobalDeviceArray from t5x import checkpoints from t5x import eval as eval_lib from t5x import models from t5x.examples.unified_io import evaluator from t5x import partitioning from t5x import train_state as train_state_lib from t5x import trainer as trainer_lib from t5x import utils from os.path import expanduser from t5x.examples.unified_io.utils import init_wandb from t5x.examples.unified_io.metrics.metrics import null_metric from t5x.examples.unified_io.data.postprocessing import return_example from absl import app from absl import flags from t5x import gin_utils
15,059
metrics_by_task: A map of metrics keyed by task name. Returns: A bool indicating whether training should be halted. Raises: RuntimeError: When the metrics processed on host 0 is None. """ stop_training = False if jax.process_index() == 0: if not metrics_by_task: raise RuntimeError('Metric is unexpectedly empty on process 0') for action in actions.get(mode, []): stop_training |= action.run(train_state, metrics_by_task=metrics_by_task) # Broadcast result from host 0 to others. return bool(multihost_utils.broadcast_one_to_all(jnp.array(stop_training))) def train( *, model: models.BaseTransformerModel, train_dataset_cfg: utils.DatasetConfig, train_eval_dataset_cfg: Optional[utils.DatasetConfig], infer_eval_dataset_cfg: Optional[utils.DatasetConfig], checkpoint_cfg: utils.CheckpointConfig, partitioner: partitioning.BasePartitioner, trainer_cls: trainer_lib.BaseTrainerConstructor, model_dir: str, total_steps: int, eval_steps: int, eval_period: int, stats_period: Optional[int] = None, random_seed: Optional[int], use_hardware_rng: bool = False, summarize_config_fn: Callable[[str, metric_writers.MetricWriter, int], None], inference_evaluator_cls: utils.EvaluatorConstructor = seqio.Evaluator, get_dataset_fn: utils.GetDatasetCallable = utils.get_dataset, concurrent_metrics: bool = True, actions: Optional[Mapping[str, Sequence[trainer_lib.BaseAction]]] = None, train_eval_get_dataset_fn: utils.GetEvalDatasetCallable = utils .get_training_eval_datasets, run_eval_before_training: bool = False, use_wandb = True, weight_metrics="norm", packing_strategy: PackingStrategy = None, train_state_initializer_cls: Type[ utils.TrainStateInitializer] = utils.TrainStateInitializer, use_gda: bool = True, verify_matching_vocabs_fn: Optional[ Callable[[utils.DatasetConfig, models.BaseTransformerModel], None]] = utils.verify_matching_vocabs, shuffle_buffer_size=None, cycle_length=None, block_length=None ) -> Tuple[int, train_state_lib.TrainState]: """Train function. Args: model: The model object to use for training. train_dataset_cfg: Specification for the dataset to train with. train_eval_dataset_cfg: Specification for the dataset to evaluate with using the train metrics and no inference (e.g., uses teacher forcing). If None, train eval is disabled. infer_eval_dataset_cfg: Specification for the dataset to evaluate with using the inference metrics (e.g., uses sampled decoding). If None, inference eval is disabled. checkpoint_cfg: Specification for saving and restoring model parameters and dataset state to/from checkpoints. partitioner: Partitioner for model parameters and data across devices. trainer_cls: An implementation of BaseTrainer. model_dir: Path of directory to store checkpoints and metric summaries. total_steps: The step number to stop training after. The number of actual steps trained in this run will be this number minus the starting step from the checkpoint. If this is set to the starting step from the checkpoint, the model will not be compiled for training and training will not be run. This can be used in conjunction with `run_eval_before_training` to only evaluate a model. eval_steps: The number of batches to process for each train-eval loop. eval_period: The number of train steps between each evaluation (both train-eval and infer-eval). stats_period: The number of train steps between writing scalar stats. If None, defaults to eval_period. random_seed: A random seed to use for dropout and initialization. If None, a fast, non-deterministic hardware-based RNG is used. use_hardware_rng: Whether to force using the RngBitGenerator based hardware rng, which takes seeds and acts similarly to software PRNG in that it should be seed-deterministic. The new RngBitGenerator custom PRNG system should be reproducible for a given sharding, but the numbers will change for different shardings of the same model. summarize_config_fn: A function that takes in the model directory, a SummaryWriter, and the step number, and writes a summary of the inference_evaluator_cls: seqio.Evaluator class to use for inference evaluation, potentially with bound configuration args. get_dataset_fn: The callable use to get the train and train-eval datasets based on the DatasetConfig and shard information. concurrent_metrics: If True, allow metrics computation and logging to overlap with training. Will likely result in additional TPU memory usage. actions: A mapping of actions that runs after train, eval or infer_eval, to inspect the model and perform useful operations, e.g., early stopping. The key must have a 1:1 mapping to ActionMode enum. For EVAL actions to actually work, this requires `concurrent_metrics` to be turned off, since chaining futures and mutating states concurrently might be error-prone. train_eval_get_dataset_fn: Optional callable use to get the train-eval datasets based on the DatasetConfig and shard information. If missing, it defaults to `utils.get_training_eval_datasets`. run_eval_before_training: If True, calculate training eval and inference eval metrics before training begins. train_state_initializer_cls: t5x.utils.TrainStateInitializer class for initializing partitioned TrainState from checkpoints or scratch. use_gda: if True, uses GlobalDeviceArray. Experimental feature. verify_matching_vocabs_fn: Function to validate whether the task vocabulary matches the model vocabulary. Should raise an exception on error. Returns: The tuple of (last_step, last_train_state). """ if jax.process_index() == 0 and use_wandb: if not os.environ.get("WANDB_API_KEY"): use_wandb = False logging.warning("WANDB_API_KEY not found, wandb will not be used") else:
# Copyright 2022 The T5X Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""Script to pretrain or finetune in JAX using a SeqIO pipeline. """ # Set Linen to add profiling information when constructing Modules. # Must be set before flax imports. # pylint:disable=g-import-not-at-top os.environ['FLAX_PROFILE'] = 'true' # TODO(adarob): Re-enable once users are notified and tests are updated. os.environ['FLAX_LAZY_RNG'] = 'no' os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = os.path.join( expanduser("~"), ".config/gcloud/application_default_credentials.json") # Automatically search for gin files relative to the T5X package. _DEFAULT_GIN_SEARCH_PATHS = [ os.path.dirname(os.path.dirname(os.path.abspath(__file__))) ] PyTreeDef = type(jax.tree_util.tree_structure(None)) P = partitioning.PartitionSpec # Special key that used to distinguish train metrics. TRAIN_METRIC_KEY = 'train' # String keys that is acceptable from config. _ACTION_KEYS = frozenset(trainer_lib.ActionMode.__members__.keys()) def run_actions( mode: trainer_lib.ActionMode, actions: trainer_lib.ActionMapType, train_state: train_state_lib.TrainState, metrics_by_task: Mapping[str, trainer_lib.MetricValueMapType]) -> bool: """Invokes all actions on the given mode on host 0, then broadcasts to all. Args: mode: The mode to run the actions. e.g., if mode is `train`, only actions configured to run with `train` mode will be invoked. actions: A mapping of actions that runs after train, eval or infer_eval, to inspect the model and perform useful operations, e.g., early stopping. train_state: The current train_state of the trainer. metrics_by_task: A map of metrics keyed by task name. Returns: A bool indicating whether training should be halted. Raises: RuntimeError: When the metrics processed on host 0 is None. """ stop_training = False if jax.process_index() == 0: if not metrics_by_task: raise RuntimeError('Metric is unexpectedly empty on process 0') for action in actions.get(mode, []): stop_training |= action.run(train_state, metrics_by_task=metrics_by_task) # Broadcast result from host 0 to others. return bool(multihost_utils.broadcast_one_to_all(jnp.array(stop_training))) def train( *, model: models.BaseTransformerModel, train_dataset_cfg: utils.DatasetConfig, train_eval_dataset_cfg: Optional[utils.DatasetConfig], infer_eval_dataset_cfg: Optional[utils.DatasetConfig], checkpoint_cfg: utils.CheckpointConfig, partitioner: partitioning.BasePartitioner, trainer_cls: trainer_lib.BaseTrainerConstructor, model_dir: str, total_steps: int, eval_steps: int, eval_period: int, stats_period: Optional[int] = None, random_seed: Optional[int], use_hardware_rng: bool = False, summarize_config_fn: Callable[[str, metric_writers.MetricWriter, int], None], inference_evaluator_cls: utils.EvaluatorConstructor = seqio.Evaluator, get_dataset_fn: utils.GetDatasetCallable = utils.get_dataset, concurrent_metrics: bool = True, actions: Optional[Mapping[str, Sequence[trainer_lib.BaseAction]]] = None, train_eval_get_dataset_fn: utils.GetEvalDatasetCallable = utils .get_training_eval_datasets, run_eval_before_training: bool = False, use_wandb = True, weight_metrics="norm", packing_strategy: PackingStrategy = None, train_state_initializer_cls: Type[ utils.TrainStateInitializer] = utils.TrainStateInitializer, use_gda: bool = True, verify_matching_vocabs_fn: Optional[ Callable[[utils.DatasetConfig, models.BaseTransformerModel], None]] = utils.verify_matching_vocabs, shuffle_buffer_size=None, cycle_length=None, block_length=None ) -> Tuple[int, train_state_lib.TrainState]: """Train function. Args: model: The model object to use for training. train_dataset_cfg: Specification for the dataset to train with. train_eval_dataset_cfg: Specification for the dataset to evaluate with using the train metrics and no inference (e.g., uses teacher forcing). If None, train eval is disabled. infer_eval_dataset_cfg: Specification for the dataset to evaluate with using the inference metrics (e.g., uses sampled decoding). If None, inference eval is disabled. checkpoint_cfg: Specification for saving and restoring model parameters and dataset state to/from checkpoints. partitioner: Partitioner for model parameters and data across devices. trainer_cls: An implementation of BaseTrainer. model_dir: Path of directory to store checkpoints and metric summaries. total_steps: The step number to stop training after. The number of actual steps trained in this run will be this number minus the starting step from the checkpoint. If this is set to the starting step from the checkpoint, the model will not be compiled for training and training will not be run. This can be used in conjunction with `run_eval_before_training` to only evaluate a model. eval_steps: The number of batches to process for each train-eval loop. eval_period: The number of train steps between each evaluation (both train-eval and infer-eval). stats_period: The number of train steps between writing scalar stats. If None, defaults to eval_period. random_seed: A random seed to use for dropout and initialization. If None, a fast, non-deterministic hardware-based RNG is used. use_hardware_rng: Whether to force using the RngBitGenerator based hardware rng, which takes seeds and acts similarly to software PRNG in that it should be seed-deterministic. The new RngBitGenerator custom PRNG system should be reproducible for a given sharding, but the numbers will change for different shardings of the same model. summarize_config_fn: A function that takes in the model directory, a SummaryWriter, and the step number, and writes a summary of the inference_evaluator_cls: seqio.Evaluator class to use for inference evaluation, potentially with bound configuration args. get_dataset_fn: The callable use to get the train and train-eval datasets based on the DatasetConfig and shard information. concurrent_metrics: If True, allow metrics computation and logging to overlap with training. Will likely result in additional TPU memory usage. actions: A mapping of actions that runs after train, eval or infer_eval, to inspect the model and perform useful operations, e.g., early stopping. The key must have a 1:1 mapping to ActionMode enum. For EVAL actions to actually work, this requires `concurrent_metrics` to be turned off, since chaining futures and mutating states concurrently might be error-prone. train_eval_get_dataset_fn: Optional callable use to get the train-eval datasets based on the DatasetConfig and shard information. If missing, it defaults to `utils.get_training_eval_datasets`. run_eval_before_training: If True, calculate training eval and inference eval metrics before training begins. train_state_initializer_cls: t5x.utils.TrainStateInitializer class for initializing partitioned TrainState from checkpoints or scratch. use_gda: if True, uses GlobalDeviceArray. Experimental feature. verify_matching_vocabs_fn: Function to validate whether the task vocabulary matches the model vocabulary. Should raise an exception on error. Returns: The tuple of (last_step, last_train_state). """ if jax.process_index() == 0 and use_wandb: if not os.environ.get("WANDB_API_KEY"): use_wandb = False logging.warning("WANDB_API_KEY not found, wandb will not be used") else:
init_wandb()
9
2023-12-12 20:23:33+00:00
24k
alibaba/animate-anything
train.py
[ { "identifier": "VideoJsonDataset", "path": "utils/dataset.py", "snippet": "class VideoJsonDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n video_dir: str = \"./data\",\n video_json: str = \"\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n cache_latents = False,\n motion_threshold = 50,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n self.video_dir = video_dir\n self.video_files = json.load(open(video_json))\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n self.cache_latents = cache_latents\n self.motion_threshold = motion_threshold\n self.transform = T.Compose([\n #T.RandomResizedCrop(size=(height, width), scale=(0.8, 1.0), ratio=(width/height, width/height), antialias=False),\n T.Resize(min(height, width), antialias=False),\n T.CenterCrop([height, width])\n ])\n\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n \n @staticmethod\n def __getname__(): return 'video_json'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n mask = None\n try:\n item = self.video_files[index]\n video_path = os.path.join(self.video_dir, item['video'])\n cache_path = os.path.splitext(video_path)[0] + '.pt'\n if self.cache_latents and os.path.exists(cache_path):\n return torch.load(cache_path, map_location='cpu')\n\n prompt = item['caption']\n if self.fallback_prompt == \"<no_text>\":\n prompt = \"\"\n vr = decord.VideoReader(video_path)\n video = get_frame_batch(self.n_sample_frames, self.fps, vr, self.transform)\n except Exception as err:\n print(\"read video error\", err, video_path)\n return self.__getitem__(index+1)\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n example = {\n \"pixel_values\": normalize_input(video), \n \"prompt_ids\": prompt_ids, \n \"text_prompt\": prompt, \n 'cache_path': cache_path,\n 'dataset': self.__getname__()\n }\n mask = get_moved_area_mask(video.permute([0,2,3,1]).numpy())\n example['motion'] = calculate_motion_score(video.permute([0,2,3,1]).numpy())\n if example['motion'] < self.motion_threshold:\n return self.__getitem__(random.randint(0, len(self)-1))\n return example" }, { "identifier": "SingleVideoDataset", "path": "utils/dataset.py", "snippet": "class SingleVideoDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n frame_step: int = 1,\n single_video_path: str = \"\",\n single_video_prompt: str = \"\",\n use_caption: bool = False,\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n self.frames = []\n self.index = 1\n\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.n_sample_frames = n_sample_frames\n self.frame_step = frame_step\n\n self.single_video_path = single_video_path\n self.single_video_prompt = single_video_prompt\n\n self.width = width\n self.height = height\n def create_video_chunks(self):\n # Create a list of frames separated by sample frames\n # [(1,2,3), (4,5,6), ...]\n vr = decord.VideoReader(self.single_video_path)\n vr_range = range(1, len(vr), self.frame_step)\n\n self.frames = list(self.chunk(vr_range, self.n_sample_frames))\n\n # Delete any list that contains an out of range index.\n for i, inner_frame_nums in enumerate(self.frames):\n for frame_num in inner_frame_nums:\n if frame_num > len(vr):\n print(f\"Removing out of range index list at position: {i}...\")\n del self.frames[i]\n\n return self.frames\n\n def chunk(self, it, size):\n it = iter(it)\n return iter(lambda: tuple(islice(it, size)), ())\n\n def get_frame_batch(self, vr, resize=None):\n index = self.index\n frames = vr.get_batch(self.frames[self.index])\n video = rearrange(frames, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n \n return video, vr \n\n def single_video_batch(self, index):\n train_data = self.single_video_path\n self.index = index\n\n if train_data.endswith(self.vid_types):\n video, _ = self.process_video_wrapper(train_data)\n\n prompt = self.single_video_prompt\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return video, prompt, prompt_ids\n else:\n raise ValueError(f\"Single video is not a video type. Types: {self.vid_types}\")\n \n @staticmethod\n def __getname__(): return 'single_video'\n\n def __len__(self):\n \n return len(self.create_video_chunks())\n\n def __getitem__(self, index):\n\n video, prompt, prompt_ids = self.single_video_batch(index)\n\n example = {\n \"pixel_values\": normalize_input(video),\n \"prompt_ids\": prompt_ids,\n \"text_prompt\": prompt,\n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "ImageDataset", "path": "utils/dataset.py", "snippet": "class ImageDataset(Dataset):\n \n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n base_width: int = 256,\n base_height: int = 256,\n use_caption: bool = False,\n image_dir: str = '',\n single_img_prompt: str = '',\n use_bucketing: bool = False,\n fallback_prompt: str = '',\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.img_types = (\".png\", \".jpg\", \".jpeg\", '.bmp')\n self.use_bucketing = use_bucketing\n #self.image_dir = self.get_images_list(image_dir)\n self.image_dir_path = image_dir\n self.image_dir = json.load(open(kwargs['image_json']))\n self.fallback_prompt = fallback_prompt\n\n self.use_caption = use_caption\n self.single_img_prompt = single_img_prompt\n\n self.width = width\n self.height = height\n\n def get_images_list(self, image_dir):\n if os.path.exists(image_dir):\n imgs = [x for x in os.listdir(image_dir) if x.endswith(self.img_types)]\n full_img_dir = []\n\n for img in imgs: \n full_img_dir.append(f\"{image_dir}/{img}\")\n\n return sorted(full_img_dir)\n\n return ['']\n\n def image_batch(self, index):\n train_data = self.image_dir[index]\n img, prompt = train_data['image'], train_data['caption']\n img = os.path.join(self.image_dir_path, img)\n try:\n img = torchvision.io.read_image(img, mode=torchvision.io.ImageReadMode.RGB)\n except:\n img = T.transforms.PILToTensor()(Image.open(img).convert(\"RGB\"))\n\n width = self.width\n height = self.height\n\n if self.use_bucketing:\n _, h, w = img.shape\n width, height = sensible_buckets(width, height, w, h)\n \n resize = T.transforms.Resize((height, width), antialias=True)\n\n img = resize(img) \n img = repeat(img, 'c h w -> f c h w', f=1)\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return img, prompt, prompt_ids\n\n @staticmethod\n def __getname__(): return 'image'\n \n def __len__(self):\n # Image directory\n return len(self.image_dir)\n\n def __getitem__(self, index):\n img, prompt, prompt_ids = self.image_batch(index)\n example = {\n \"pixel_values\": normalize_input(img),\n \"frames\": img,\n \"prompt_ids\": prompt_ids,\n \"text_prompt\": prompt, \n 'dataset': self.__getname__()\n }\n\n return example" }, { "identifier": "VideoFolderDataset", "path": "utils/dataset.py", "snippet": "class VideoFolderDataset(Dataset):\n def __init__(\n self,\n tokenizer=None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 16,\n fps: int = 8,\n path: str = \"./data\",\n fallback_prompt: str = \"\",\n use_bucketing: bool = False,\n **kwargs\n ):\n self.tokenizer = tokenizer\n self.use_bucketing = use_bucketing\n\n self.fallback_prompt = fallback_prompt\n\n self.video_files = glob(f\"{path}/*.mp4\")\n\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.fps = fps\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def get_frame_batch(self, vr, resize=None):\n n_sample_frames = self.n_sample_frames\n native_fps = vr.get_avg_fps()\n \n every_nth_frame = max(1, round(native_fps / self.fps))\n every_nth_frame = min(len(vr), every_nth_frame)\n \n effective_length = len(vr) // every_nth_frame\n if effective_length < n_sample_frames:\n n_sample_frames = effective_length\n raise RuntimeError(\"not enough frames\")\n\n effective_idx = random.randint(0, (effective_length - n_sample_frames))\n idxs = every_nth_frame * np.arange(effective_idx, effective_idx + n_sample_frames)\n\n video = vr.get_batch(idxs)\n video = rearrange(video, \"f h w c -> f c h w\")\n\n if resize is not None: video = resize(video)\n return video, vr\n \n def process_video_wrapper(self, vid_path):\n video, vr = process_video(\n vid_path,\n self.use_bucketing,\n self.width, \n self.height, \n self.get_frame_buckets, \n self.get_frame_batch\n )\n return video, vr\n \n @staticmethod\n def __getname__(): return 'folder'\n\n def __len__(self):\n return len(self.video_files)\n\n def __getitem__(self, index):\n try:\n video, _ = self.process_video_wrapper(self.video_files[index])\n except Exception as err:\n print(\"read video error\", self.video_files[index])\n video, _ = self.process_video_wrapper(self.video_files[index+1])\n\n if os.path.exists(self.video_files[index].replace(\".mp4\", \".txt\")):\n with open(self.video_files[index].replace(\".mp4\", \".txt\"), \"r\") as f:\n lines = f.readlines()\n prompt = random.choice(lines)\n else:\n prompt = self.fallback_prompt\n\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n\n return {\"pixel_values\": normalize_input(video[0]), \"frames\": video[0],\n \"prompt_ids\": prompt_ids, \"text_prompt\": prompt, 'dataset': self.__getname__()}" }, { "identifier": "CachedDataset", "path": "utils/dataset.py", "snippet": "class CachedDataset(Dataset):\n def __init__(self,cache_dir: str = ''):\n self.cache_dir = cache_dir\n self.cached_data_list = self.get_files_list()\n\n def get_files_list(self):\n tensors_list = [f\"{self.cache_dir}/{x}\" for x in os.listdir(self.cache_dir) if x.endswith('.pt')]\n return sorted(tensors_list)\n\n def __len__(self):\n return len(self.cached_data_list)\n\n def __getitem__(self, index):\n cached_latent = torch.load(self.cached_data_list[index], map_location='cuda:0')\n return cached_latent" }, { "identifier": "VideoBLIPDataset", "path": "utils/dataset.py", "snippet": "class VideoBLIPDataset(Dataset):\n def __init__(\n self,\n tokenizer = None,\n width: int = 256,\n height: int = 256,\n n_sample_frames: int = 4,\n sample_start_idx: int = 1,\n fps: int = 1,\n json_path: str =\"\",\n json_data = None,\n vid_data_key: str = \"video_path\",\n preprocessed: bool = False,\n use_bucketing: bool = False,\n cache_latents: bool = False,\n motion_threshold = 50,\n **kwargs\n ):\n self.vid_types = (\".mp4\", \".avi\", \".mov\", \".webm\", \".flv\", \".mjpeg\")\n self.use_bucketing = use_bucketing\n self.tokenizer = tokenizer\n self.preprocessed = preprocessed\n \n self.vid_data_key = vid_data_key\n self.train_data = self.load_from_json(json_path, json_data)\n self.cache_latents = cache_latents\n self.motion_threshold = motion_threshold\n self.width = width\n self.height = height\n\n self.n_sample_frames = n_sample_frames\n self.sample_start_idx = sample_start_idx\n self.fps = fps\n self.transform = T.Compose([\n #T.RandomResizedCrop(size=(height, width), scale=(0.8, 1.0), ratio=(width/height, width/height), antialias=False)\n T.Resize(min(height, width), antialias=False),\n T.CenterCrop([height, width])\n ])\n\n def build_json(self, json_data):\n extended_data = []\n for data in json_data['data']:\n for nested_data in data['data']:\n self.build_json_dict(\n data, \n nested_data, \n extended_data\n )\n json_data = extended_data\n return json_data\n\n def build_json_dict(self, data, nested_data, extended_data):\n clip_path = nested_data['clip_path'] if 'clip_path' in nested_data else None\n \n extended_data.append({\n self.vid_data_key: data[self.vid_data_key],\n 'frame_index': nested_data['frame_index'],\n 'prompt': nested_data['prompt'],\n 'clip_path': clip_path\n })\n \n def load_from_json(self, path, json_data):\n try:\n with open(path) as jpath:\n print(f\"Loading JSON from {path}\")\n json_data = json.load(jpath)\n\n return self.build_json(json_data)\n\n except:\n import traceback\n traceback.print_exc()\n self.train_data = []\n print(\"Non-existant JSON path. Skipping.\")\n \n def validate_json(self, base_path, path):\n return os.path.exists(f\"{base_path}/{path}\")\n\n def get_frame_buckets(self, vr):\n _, h, w = vr[0].shape \n width, height = sensible_buckets(self.width, self.height, h, w)\n resize = T.transforms.Resize((height, width), antialias=True)\n\n return resize\n\n def train_data_batch(self, index):\n vid_data = self.train_data[index]\n # Get video prompt\n prompt = vid_data['prompt']\n # If we are training on individual clips.\n if 'clip_path' in self.train_data[index] and \\\n self.train_data[index]['clip_path'] is not None:\n clip_path = vid_data['clip_path']\n else:\n clip_path = vid_data[self.vid_data_key]\n # Get the frame of the current index.\n self.sample_start_idx = vid_data['frame_index']\n cache_path = os.path.splitext(clip_path)[0] + '.pt'\n if self.cache_latents and os.path.exists(cache_path):\n return torch.load(cache_path, map_location='cpu')\n\n vr = decord.VideoReader(clip_path)\n video = get_frame_batch(self.n_sample_frames, self.fps, vr, self.transform)\n prompt_ids = get_prompt_ids(prompt, self.tokenizer)\n example = {\n \"pixel_values\": normalize_input(video),\n \"prompt_ids\": prompt_ids,\n \"text_prompt\": prompt,\n 'dataset': self.__getname__(),\n 'cache_path': cache_path,\n }\n mask = get_moved_area_mask(video.permute([0,2,3,1]).numpy())\n example['mask'] = mask\n example['motion'] = calculate_motion_score(video.permute([0,2,3,1]).numpy())\n return example\n \n\n @staticmethod\n def __getname__(): return 'video_blip'\n\n def __len__(self):\n if self.train_data is not None:\n return len(self.train_data)\n else: \n return 0\n\n def __getitem__(self, index):\n example = self.train_data_batch(index)\n if example['motion'] < self.motion_threshold:\n return self.__getitem__(random.randint(0, len(self)-1))\n return example" }, { "identifier": "UNet3DConditionModel", "path": "models/unet_3d_condition_mask.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n r\"\"\"\n UNet3DConditionModel is a conditional 2D UNet model that takes in a noisy sample, conditional state, and a timestep\n and returns sample shaped output.\n\n This model inherits from [`ModelMixin`]. Check the superclass documentation for the generic methods the library\n implements for all the models (such as downloading or saving, etc.)\n\n Parameters:\n sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):\n Height and width of input/output sample.\n in_channels (`int`, *optional*, defaults to 4): The number of channels in the input sample.\n out_channels (`int`, *optional*, defaults to 4): The number of channels in the output.\n down_block_types (`Tuple[str]`, *optional*, defaults to `(\"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"CrossAttnDownBlock2D\", \"DownBlock2D\")`):\n The tuple of downsample blocks to use.\n up_block_types (`Tuple[str]`, *optional*, defaults to `(\"UpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\", \"CrossAttnUpBlock2D\",)`):\n The tuple of upsample blocks to use.\n block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):\n The tuple of output channels for each block.\n layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.\n downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.\n mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.\n act_fn (`str`, *optional*, defaults to `\"silu\"`): The activation function to use.\n norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.\n If `None`, it will skip the normalization and activation layers in post-processing\n norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.\n cross_attention_dim (`int`, *optional*, defaults to 1280): The dimension of the cross attention features.\n attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.\n \"\"\"\n\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n up_block_types: Tuple[str] = (\"UpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\", \"CrossAttnUpBlock3D\"),\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1024,\n attention_head_dim: Union[int, Tuple[int]] = 64,\n motion_mask = False,\n motion_strength = False,\n ):\n super().__init__()\n self.motion_mask = motion_mask\n self.motion_strength = motion_strength\n print(f\"motion mask {self.motion_mask}, motion_strength {self.motion_strength}\")\n self.sample_size = sample_size\n self.gradient_checkpointing = False\n # Check inputs\n if len(down_block_types) != len(up_block_types):\n raise ValueError(\n f\"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}.\"\n )\n\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_out_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n self.conv_in2 = nn.Conv2d(\n 5, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n self.time_proj = Timesteps(block_out_channels[0], True, 0)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n cond_proj_dim=block_out_channels[0],\n )\n\n self.motion_proj = Timesteps(block_out_channels[0], True, 0)\n self.motion_embedding = nn.Sequential(\n nn.Linear(timestep_input_dim, time_embed_dim), nn.SiLU(),\n nn.Linear(time_embed_dim, time_embed_dim))\n nn.init.zeros_(self.motion_embedding[-1].weight)\n nn.init.zeros_(self.motion_embedding[-1].bias)\n\n self.transformer_in = TransformerTemporalModel(\n num_attention_heads=8,\n attention_head_dim=attention_head_dim,\n in_channels=block_out_channels[0],\n num_layers=1,\n )\n\n # class embedding\n self.down_blocks = nn.ModuleList([])\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=False,\n )\n self.down_blocks.append(down_block)\n\n # mid\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=False,\n )\n # count how many layers upsample the images\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=False,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n if norm_num_groups is not None:\n self.conv_norm_out = nn.GroupNorm(\n num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps\n )\n self.conv_act = nn.SiLU()\n else:\n self.conv_norm_out = None\n self.conv_act = None\n\n conv_out_padding = (conv_out_kernel - 1) // 2\n self.conv_out = nn.Conv2d(\n block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding\n )\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, value=False):\n self.gradient_checkpointing = value\n self.mid_block.gradient_checkpointing = value\n for module in self.down_blocks + self.up_blocks:\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value \n \n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n condition_latent: torch.Tensor,\n mask: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n motion = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, num_frames, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet3DConditionOutput`] instead of a plain tuple.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Returns:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet3DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2**self.num_upsamplers\n sample = torch.cat([condition_latent, sample], dim=2)\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n num_frames = sample.shape[2]\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n if self.motion_strength and motion is not None:\n timestep_cond = self.motion_proj(motion).to(dtype=self.dtype)\n emb = self.time_embedding(t_emb, timestep_cond)\n #emb += self.motion_embedding(m_emb)\n else:\n emb = self.time_embedding(t_emb, timestep_cond)\n emb = emb.repeat_interleave(repeats=num_frames, dim=0)\n encoder_hidden_states = encoder_hidden_states.repeat_interleave(repeats=num_frames, dim=0)\n\n # 2. pre-process\n if self.motion_mask and mask is not None:\n mask = repeat(mask , 'b 1 1 h w -> (t b) 1 f h w', t=sample.shape[0]//mask.shape[0], f=sample.shape[2])\n sample = torch.cat([mask, sample], dim=1)\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in2(sample)\n else:\n sample = sample.permute(0, 2, 1, 3, 4).reshape((sample.shape[0] * num_frames, -1) + sample.shape[3:])\n sample = self.conv_in(sample)\n\n if num_frames > 1:\n if self.gradient_checkpointing:\n sample = transformer_g_c(self.transformer_in, sample, num_frames)\n else:\n sample = self.transformer_in(sample, num_frames=num_frames).sample\n\n # 3. down\n down_block_res_samples = (sample,)\n for i, downsample_block in enumerate(self.down_blocks):\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb, num_frames=num_frames)\n \n down_block_res_samples += res_samples\n\n if down_block_additional_residuals is not None:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n\n if mid_block_additional_residual is not None:\n sample = sample + mid_block_additional_residual\n\n # 5. up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets) :]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n num_frames=num_frames,\n cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n upsample_size=upsample_size,\n num_frames=num_frames,\n )\n\n # 6. post-process\n if self.conv_norm_out:\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n\n sample = self.conv_out(sample)\n\n # reshape to (batch, channel, framerate, width, height)\n sample = sample[None, :].reshape((-1, num_frames) + sample.shape[1:]).permute(0, 2, 1, 3, 4)\n sample = sample[:,:,1:]\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)" }, { "identifier": "LatentToVideoPipeline", "path": "models/pipeline.py", "snippet": "class LatentToVideoPipeline(TextToVideoSDPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt = None,\n height= None,\n width= None,\n num_frames: int = 16,\n num_inference_steps: int = 50,\n guidance_scale= 9.0,\n negative_prompt= None,\n eta: float = 0.0,\n generator= None,\n latents= None,\n prompt_embeds= None,\n negative_prompt_embeds= None,\n output_type= \"np\",\n return_dict: bool = True,\n callback= None,\n callback_steps: int = 1,\n cross_attention_kwargs= None,\n condition_latent=None,\n mask=None,\n timesteps=None,\n motion=None,\n ):\n r\"\"\"\n Function invoked when calling the pipeline for generation.\n\n Args:\n prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts to guide the video generation. If not defined, one has to pass `prompt_embeds`.\n instead.\n height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The height in pixels of the generated video.\n width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):\n The width in pixels of the generated video.\n num_frames (`int`, *optional*, defaults to 16):\n The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds\n amounts to 2 seconds of video.\n num_inference_steps (`int`, *optional*, defaults to 50):\n The number of denoising steps. More denoising steps usually lead to a higher quality videos at the\n expense of slower inference.\n guidance_scale (`float`, *optional*, defaults to 7.5):\n Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).\n `guidance_scale` is defined as `w` of equation 2. of [Imagen\n Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >\n 1`. Higher guidance scale encourages to generate videos that are closely linked to the text `prompt`,\n usually at the expense of lower video quality.\n negative_prompt (`str` or `List[str]`, *optional*):\n The prompt or prompts not to guide the video generation. If not defined, one has to pass\n `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is\n less than `1`).\n eta (`float`, *optional*, defaults to 0.0):\n Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to\n [`schedulers.DDIMScheduler`], will be ignored for others.\n generator (`torch.Generator` or `List[torch.Generator]`, *optional*):\n One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)\n to make generation deterministic.\n latents (`torch.FloatTensor`, *optional*):\n Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for video\n generation. Can be used to tweak the same generation with different prompts. If not provided, a latents\n tensor will ge generated by sampling using the supplied random `generator`. Latents should be of shape\n `(batch_size, num_channel, num_frames, height, width)`.\n prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not\n provided, text embeddings will be generated from `prompt` input argument.\n negative_prompt_embeds (`torch.FloatTensor`, *optional*):\n Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt\n weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input\n argument.\n output_type (`str`, *optional*, defaults to `\"np\"`):\n The output format of the generate video. Choose between `torch.FloatTensor` or `np.array`.\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`~pipelines.stable_diffusion.TextToVideoSDPipelineOutput`] instead of a\n plain tuple.\n callback (`Callable`, *optional*):\n A function that will be called every `callback_steps` steps during inference. The function will be\n called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.\n callback_steps (`int`, *optional*, defaults to 1):\n The frequency at which the `callback` function will be called. If not specified, the callback will be\n called at every step.\n cross_attention_kwargs (`dict`, *optional*):\n A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under\n `self.processor` in\n [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).\n\n Examples:\n\n Returns:\n [`~pipelines.stable_diffusion.TextToVideoSDPipelineOutput`] or `tuple`:\n [`~pipelines.stable_diffusion.TextToVideoSDPipelineOutput`] if `return_dict` is True, otherwise a `tuple.\n When returning a tuple, the first element is a list with the generated frames.\n \"\"\"\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n num_images_per_prompt = 1\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt, height, width, callback_steps, negative_prompt, prompt_embeds, negative_prompt_embeds\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n #device = self._execution_device\n device = latents.device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds = self._encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n\n # 4. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n if timesteps is None:\n timesteps = self.scheduler.timesteps\n else:\n num_inference_steps = len(timesteps)\n # 5. Prepare latent variables. do nothing\n\n # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 7. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n uncondition_latent = condition_latent\n condition_latent = torch.cat([uncondition_latent, condition_latent]) if do_classifier_free_guidance else condition_latent \n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n if motion is not None:\n motion = torch.tensor(motion, device=device)\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n condition_latent=condition_latent,\n mask=mask,\n motion=motion\n ).sample\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # reshape latents\n bsz, channel, frames, width, height = latents.shape\n latents = latents.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n noise_pred = noise_pred.permute(0, 2, 1, 3, 4).reshape(bsz * frames, channel, width, height)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample\n\n # reshape latents back\n latents = latents[None, :].reshape(bsz, frames, channel, width, height).permute(0, 2, 1, 3, 4)\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n video_tensor = self.decode_latents(latents)\n\n if output_type == \"pt\":\n video = video_tensor\n else:\n video = tensor2vid(video_tensor)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (video, latents)\n\n return TextToVideoSDPipelineOutput(frames=video)" }, { "identifier": "LoraHandler", "path": "utils/lora_handler.py", "snippet": "class LoraHandler(object):\n def __init__(\n self, \n version: LORA_VERSIONS = LoraVersions.cloneofsimo, \n use_unet_lora: bool = False,\n use_text_lora: bool = False,\n save_for_webui: bool = False,\n only_for_webui: bool = False,\n lora_bias: str = 'none',\n unet_replace_modules: list = ['UNet3DConditionModel'],\n text_encoder_replace_modules: list = ['CLIPEncoderLayer']\n ):\n self.version = version\n self.lora_loader = self.get_lora_func(func_type=LoraFuncTypes.loader)\n self.lora_injector = self.get_lora_func(func_type=LoraFuncTypes.injector)\n self.lora_bias = lora_bias\n self.use_unet_lora = use_unet_lora\n self.use_text_lora = use_text_lora\n self.save_for_webui = save_for_webui\n self.only_for_webui = only_for_webui\n self.unet_replace_modules = unet_replace_modules\n self.text_encoder_replace_modules = text_encoder_replace_modules\n self.use_lora = any([use_text_lora, use_unet_lora])\n\n if self.use_lora:\n print(f\"Using LoRA Version: {self.version}\")\n\n def is_cloneofsimo_lora(self):\n return self.version == LoraVersions.cloneofsimo\n\n def is_stable_lora(self):\n return self.version == LoraVersions.stable_lora\n\n def get_lora_func(self, func_type: LORA_FUNC_TYPES = LoraFuncTypes.loader):\n\n if self.is_cloneofsimo_lora():\n\n if func_type == LoraFuncTypes.loader:\n return monkeypatch_or_replace_lora_extended\n\n if func_type == LoraFuncTypes.injector:\n return inject_trainable_lora_extended\n\n if self.is_stable_lora():\n\n if func_type == LoraFuncTypes.loader:\n return load_lora\n\n if func_type == LoraFuncTypes.injector:\n return add_lora_to\n \n assert \"LoRA Version does not exist.\"\n\n def check_lora_ext(self, lora_file: str):\n return lora_file.endswith(tuple(LORA_FILE_TYPES))\n\n def get_lora_file_path(\n self, \n lora_path: str, \n model: Union[UNet3DConditionModel, CLIPTextModel]\n ):\n if os.path.exists(lora_path):\n lora_filenames = [fns for fns in os.listdir(lora_path)]\n is_lora = self.check_lora_ext(lora_path)\n\n is_unet = isinstance(model, UNet3DConditionModel)\n is_text = isinstance(model, CLIPTextModel)\n idx = 0 if is_unet else 1\n\n base_name = FILE_BASENAMES[idx]\n \n for lora_filename in lora_filenames:\n is_lora = self.check_lora_ext(lora_filename)\n if not is_lora:\n continue\n \n if base_name in lora_filename:\n return os.path.join(lora_path, lora_filename)\n\n return None\n\n def handle_lora_load(self, file_name:str, lora_loader_args: dict = None):\n self.lora_loader(**lora_loader_args)\n print(f\"Successfully loaded LoRA from: {file_name}\")\n \n def load_lora(self, model, lora_path: str = '', lora_loader_args: dict = None,):\n try:\n lora_file = self.get_lora_file_path(lora_path, model)\n\n if lora_file is not None:\n lora_loader_args.update({\"lora_path\": lora_file})\n self.handle_lora_load(lora_file, lora_loader_args)\n\n else:\n print(f\"Could not load LoRAs for {model.__class__.__name__}. Injecting new ones instead...\")\n\n except Exception as e:\n print(f\"An error occured while loading a LoRA file: {e}\")\n \n def get_lora_func_args(self, lora_path, use_lora, model, replace_modules, r, dropout, lora_bias):\n return_dict = lora_args.copy()\n \n if self.is_cloneofsimo_lora():\n return_dict = filter_dict(return_dict, keys=CLONE_OF_SIMO_KEYS)\n return_dict.update({\n \"model\": model,\n \"loras\": self.get_lora_file_path(lora_path, model),\n \"target_replace_module\": replace_modules,\n \"r\": r\n })\n\n if self.is_stable_lora():\n KEYS = ['model', 'lora_path']\n return_dict = filter_dict(return_dict, KEYS)\n \n return_dict.update({'model': model, 'lora_path': lora_path})\n\n return return_dict\n\n def do_lora_injection(\n self, \n model, \n replace_modules, \n bias='none',\n dropout=0,\n r=4,\n lora_loader_args=None,\n ): \n REPLACE_MODULES = replace_modules\n\n params = None\n negation = None\n is_injection_hybrid = False\n \n if self.is_cloneofsimo_lora():\n is_injection_hybrid = True\n injector_args = lora_loader_args\n\n params, negation = self.lora_injector(**injector_args) \n for _up, _down in extract_lora_ups_down(\n model, \n target_replace_module=REPLACE_MODULES):\n\n if all(x is not None for x in [_up, _down]):\n print(f\"Lora successfully injected into {model.__class__.__name__}.\")\n\n break\n\n return params, negation, is_injection_hybrid\n\n if self.is_stable_lora():\n injector_args = lora_args.copy()\n injector_args = filter_dict(injector_args, keys=STABLE_LORA_KEYS)\n\n SEARCH_CLASS = [torch.nn.Linear, torch.nn.Conv2d, torch.nn.Conv3d, torch.nn.Embedding]\n\n injector_args.update({\n \"model\": model,\n \"target_module\": REPLACE_MODULES,\n \"search_class\": SEARCH_CLASS,\n \"r\": r,\n \"dropout\": dropout,\n \"lora_bias\": self.lora_bias\n })\n\n activator = self.lora_injector(**injector_args)\n activator()\n\n return params, negation, is_injection_hybrid\n\n def add_lora_to_model(self, use_lora, model, replace_modules, dropout=0.0, lora_path='', r=16):\n\n params = None\n negation = None\n\n lora_loader_args = self.get_lora_func_args(\n lora_path,\n use_lora,\n model,\n replace_modules,\n r,\n dropout,\n self.lora_bias\n )\n if use_lora:\n params, negation, is_injection_hybrid = self.do_lora_injection(\n model, \n replace_modules, \n bias=self.lora_bias,\n lora_loader_args=lora_loader_args,\n dropout=dropout,\n r=r\n )\n\n if not is_injection_hybrid:\n self.load_lora(model, lora_path=lora_path, lora_loader_args=lora_loader_args)\n \n params = model if params is None else params\n return params, negation\n \n\n def deactivate_lora_train(self, models, deactivate=True):\n \"\"\"\n Usage: Use before and after sampling previews.\n Currently only available for Stable LoRA.\n \"\"\"\n if self.is_stable_lora():\n set_mode_group(models, not deactivate)\n\n def save_cloneofsimo_lora(self, model, save_path, step):\n \n def save_lora(model, name, condition, replace_modules, step, save_path): \n if condition and replace_modules is not None:\n save_path = f\"{save_path}/{step}_{name}.pt\"\n save_lora_weight(model, save_path, replace_modules)\n\n save_lora(\n model.unet, \n FILE_BASENAMES[0], \n self.use_unet_lora, \n self.unet_replace_modules, \n step,\n save_path, \n )\n save_lora(\n model.text_encoder, \n FILE_BASENAMES[1], \n self.use_text_lora, \n self.text_encoder_replace_modules, \n step, \n save_path\n )\n\n train_patch_pipe(model, self.use_unet_lora, self.use_text_lora)\n\n def save_stable_lora(\n self, \n model, \n step, \n name, \n save_path = '', \n save_for_webui=False,\n only_for_webui=False\n ):\n import uuid\n\n save_filename = f\"{step}_{name}\"\n lora_metadata = metadata = {\n \"stable_lora_text_to_video\": \"v1\", \n \"lora_name\": name + \"_\" + uuid.uuid4().hex.lower()[:5]\n }\n save_lora(\n unet=model.unet,\n text_encoder=model.text_encoder,\n save_text_weights=self.use_text_lora,\n output_dir=save_path,\n lora_filename=save_filename,\n lora_bias=self.lora_bias,\n save_for_webui=self.save_for_webui,\n only_webui=self.only_for_webui,\n metadata=lora_metadata,\n unet_dict_converter=convert_unet_state_dict,\n text_dict_converter=convert_text_enc_state_dict_v20\n )\n\n def save_lora_weights(self, model: None, save_path: str ='',step: str = ''):\n save_path = f\"{save_path}/lora\"\n os.makedirs(save_path, exist_ok=True)\n\n if self.is_cloneofsimo_lora():\n if any([self.save_for_webui, self.only_for_webui]):\n warnings.warn(\n \"\"\"\n You have 'save_for_webui' enabled, but are using cloneofsimo's LoRA implemention.\n Only 'stable_lora' is supported for saving to a compatible webui file.\n \"\"\"\n )\n self.save_cloneofsimo_lora(model, save_path, step)\n\n if self.is_stable_lora():\n name = 'lora_text_to_video'\n self.save_stable_lora(model, step, name, save_path)" }, { "identifier": "LORA_VERSIONS", "path": "utils/lora_handler.py", "snippet": "LORA_VERSIONS = [LoraVersions.stable_lora, LoraVersions.cloneofsimo]" }, { "identifier": "read_mask", "path": "utils/common.py", "snippet": "def read_mask(json_path, label=[\"mask\"]):\n j = json.load(open(json_path)) \n if type(label) != list:\n labels = [label]\n height = j['imageHeight']\n width = j['imageWidth']\n mask = np.zeros([height, width], dtype=np.uint8)\n for shape in j['shapes']:\n if shape['label'] in label:\n x1, y1 = shape['points'][0]\n x2, y2 = shape['points'][1]\n mask[int(y1):int(y2), int(x1):int(x2)] = 255\n return mask" }, { "identifier": "generate_random_mask", "path": "utils/common.py", "snippet": "def generate_random_mask(image):\n # Create a blank mask with the same size as the image\n b, c , h, w = image.shape\n mask = np.zeros([b, h, w], dtype=np.uint8)\n \n # Generate random coordinates for the mask\n num_points = np.random.randint(3, 10) # Randomly choose the number of points to generate\n points = np.random.randint(0, min(h, w), size=(num_points, 2)) # Randomly generate the points\n # Draw a filled polygon on the mask using the random points\n for i in range(b):\n width = random.randint(w//4, w)\n height = random.randint(h//4, h)\n x = random.randint(0, w-width)\n y = random.randint(0, h-height)\n points=np.array([[x, y], [x+width, y], [x+width, y+height], [x, y+height]])\n mask[i] = cv2.fillPoly(mask[i], [points], 255)\n \n # Apply the mask to the image\n #masked_image = cv2.bitwise_and(image, image, mask=mask)\n return mask " }, { "identifier": "slerp", "path": "utils/common.py", "snippet": "def slerp(z1, z2, alpha):\n theta = torch.acos(torch.sum(z1 * z2) / (torch.norm(z1) * torch.norm(z2)))\n return (\n torch.sin((1 - alpha) * theta) / torch.sin(theta) * z1\n + torch.sin(alpha * theta) / torch.sin(theta) * z2\n )" }, { "identifier": "calculate_motion_score", "path": "utils/common.py", "snippet": "def calculate_motion_score(frame_imgs, calculate_edges=False, color=\"RGB\") -> float:\n # Convert image into HSV colorspace.\n _last_frame = None\n\n _weights = [1.0, 1.0, 1.0, 0.0]\n score = 0\n for frame_img in frame_imgs:\n if color == \"RGB\":\n hue, sat, lum = cv2.split(cv2.cvtColor(frame_img, cv2.COLOR_RGB2HSV))\n else:\n hue, sat, lum = cv2.split(cv2.cvtColor(frame_img, cv2.COLOR_BGR2HSV))\n # Performance: Only calculate edges if we have to.\n edges = _detect_edges(lum) if calculate_edges else None\n if _last_frame == None:\n _last_frame = (hue, sat, lum, edges)\n continue\n\n score_components = [\n _mean_pixel_distance(hue, _last_frame[0]),\n _mean_pixel_distance(sat, _last_frame[1]),\n _mean_pixel_distance(lum, _last_frame[2]),\n 0.0 if edges is None else _mean_pixel_distance(edges, _last_frame[3]),\n ]\n\n frame_score: float = (\n sum(component * weight for (component, weight) in zip(score_components, _weights))\n / sum(abs(weight) for weight in _weights))\n score += frame_score\n _last_frame = (hue, sat, lum, edges)\n\n return round(score/(len(frame_imgs)-1) * 10)" }, { "identifier": "read_video", "path": "utils/common.py", "snippet": "def read_video(video_path, frame_number=-1):\n # Open the video file\n cap = cv2.VideoCapture(video_path)\n count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) \n if frame_number == -1:\n frame_number = count\n else:\n frame_number = min(frame_number, count)\n frames = []\n for i in range(frame_number):\n ret, ref_frame = cap.read()\n ref_frame = cv2.cvtColor(ref_frame, cv2.COLOR_BGR2RGB)\n if not ret:\n raise ValueError(\"Failed to read video file\")\n frames.append(ref_frame)\n return frames" }, { "identifier": "calculate_motion_precision", "path": "utils/common.py", "snippet": "def calculate_motion_precision(frames, mask):\n moved_mask = get_moved_area_mask(frames, move_th=20, th=0)\n moved = moved_mask == 255\n gt = mask == 255\n precision = np.sum(moved & gt) / np.sum(moved)\n return precision" }, { "identifier": "calculate_latent_motion_score", "path": "utils/common.py", "snippet": "def calculate_latent_motion_score(latents):\n #latents b, c f, h, w\n diff=torch.abs(latents[:,:,1:]-latents[:,:,:-1])\n motion_score = torch.sum(torch.mean(diff, dim=[2,3,4]), dim=1) * 10\n return motion_score" }, { "identifier": "DDPM_forward", "path": "utils/common.py", "snippet": "def DDPM_forward(x0, step, num_frames, scheduler):\n device = x0.device\n t = scheduler.timesteps[-1]\n xt = repeat(x0, 'b c 1 h w -> b c f h w', f = num_frames)\n\n eps = torch.randn_like(xt)\n alpha_vec = torch.prod(scheduler.alphas[t:])\n xt = torch.sqrt(alpha_vec) * xt + torch.sqrt(1-alpha_vec) * eps\n return xt, None" }, { "identifier": "DDPM_forward_timesteps", "path": "utils/common.py", "snippet": "def DDPM_forward_timesteps(x0, step, num_frames, scheduler):\n '''larger step -> smaller t -> smaller alphas[t:] -> smaller xt -> smaller x0'''\n\n device = x0.device\n # timesteps are reversed\n timesteps = scheduler.timesteps[len(scheduler.timesteps)-step:]\n t = timesteps[0]\n\n if x0.shape[2] == 1:\n xt = repeat(x0, 'b c 1 h w -> b c f h w', f = num_frames)\n else:\n xt = x0\n noise = torch.randn(xt.shape, dtype=xt.dtype, device=device)\n # t to tensor of batch size \n t = torch.tensor([t]*xt.shape[0], device=device)\n xt = scheduler.add_noise(xt, noise, t)\n return xt, timesteps" }, { "identifier": "DDPM_forward_mask", "path": "utils/common.py", "snippet": "def DDPM_forward_mask(x0, step, num_frames, scheduler, mask):\n '''larger step -> smaller t -> smaller alphas[t:] -> smaller xt -> smaller x0'''\n device = x0.device\n dtype = x0.dtype\n b, c, f, h, w = x0.shape\n\n move_xt, timesteps = DDPM_forward_timesteps(x0, step, num_frames, scheduler)\n mask = T.ToTensor()(mask).to(dtype).to(device)\n mask = T.Resize([h, w], antialias=False)(mask)\n mask = rearrange(mask, 'b h w -> b 1 1 h w')\n freeze_xt = repeat(x0, 'b c 1 h w -> b c f h w', f = num_frames)\n initial = freeze_xt * (1-mask) + move_xt * mask\n return initial, timesteps" }, { "identifier": "motion_mask_loss", "path": "utils/common.py", "snippet": "def motion_mask_loss(latents, mask):\n diff = torch.abs(latents[:,:,1:] - latents[:,:,:-1])\n loss = torch.sum(torch.mean(diff * (1-mask), dim=[2,3,4]), dim=1)\n return loss" }, { "identifier": "generate_center_mask", "path": "utils/common.py", "snippet": "def generate_center_mask(image):\n # Create a blank mask with the same size as the image\n b, c , h, w = image.shape\n mask = np.zeros([b, h, w], dtype=np.uint8)\n \n # Generate random coordinates for the mask\n for i in range(b):\n width = int(w/10)\n height = int(h/10)\n mask[i][height:-height,width:-width] = 255\n # Apply the mask to the image\n #masked_image = cv2.bitwise_and(image, image, mask=mask)\n return mask " }, { "identifier": "tensor_to_vae_latent", "path": "utils/common.py", "snippet": "def tensor_to_vae_latent(t, vae):\n video_length = t.shape[1]\n\n t = rearrange(t, \"b f c h w -> (b f) c h w\")\n latents = vae.encode(t).latent_dist.sample()\n latents = rearrange(latents, \"(b f) c h w -> b c f h w\", f=video_length)\n latents = latents * 0.18215\n\n return latents" } ]
import argparse import datetime import logging import inspect import math import os import json import gc import copy import random import cv2 import torch import torch.nn.functional as F import torch.utils.checkpoint import torchvision.transforms as T import diffusers import transformers import numpy as np import imageio import itertools import bitsandbytes as bnb from typing import Dict, Optional, Tuple from omegaconf import OmegaConf from tqdm.auto import tqdm from PIL import Image from accelerate import Accelerator from accelerate.logging import get_logger from accelerate.utils import set_seed from diffusers.models import AutoencoderKL from diffusers import DPMSolverMultistepScheduler, DDPMScheduler from diffusers.image_processor import VaeImageProcessor from diffusers.optimization import get_scheduler from diffusers.utils import check_min_version, export_to_video from diffusers.utils.import_utils import is_xformers_available from diffusers.models.attention_processor import AttnProcessor2_0, Attention from diffusers.models.attention import BasicTransformerBlock from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth import tensor2vid from transformers import CLIPTextModel, CLIPTokenizer from transformers.models.clip.modeling_clip import CLIPEncoder from utils.dataset import VideoJsonDataset, SingleVideoDataset, \ ImageDataset, VideoFolderDataset, CachedDataset, VideoBLIPDataset from einops import rearrange, repeat from models.unet_3d_condition_mask import UNet3DConditionModel from models.pipeline import LatentToVideoPipeline from utils.lora_handler import LoraHandler, LORA_VERSIONS from utils.common import read_mask, generate_random_mask, slerp, calculate_motion_score, \ read_video, calculate_motion_precision, calculate_latent_motion_score, \ DDPM_forward, DDPM_forward_timesteps, DDPM_forward_mask, motion_mask_loss, \ generate_center_mask, tensor_to_vae_latent from xformers.ops import MemoryEfficientAttentionFlashAttentionOp
17,639
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = []
already_printed_trainables = False logger = get_logger(__name__, log_level="INFO") def create_logging(logging, logger, accelerator): logging.basicConfig( format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", datefmt="%m/%d/%Y %H:%M:%S", level=logging.INFO, ) logger.info(accelerator.state, main_process_only=False) def accelerate_set_verbose(accelerator): if accelerator.is_local_main_process: transformers.utils.logging.set_verbosity_warning() diffusers.utils.logging.set_verbosity_info() else: transformers.utils.logging.set_verbosity_error() diffusers.utils.logging.set_verbosity_error() def get_train_dataset(dataset_types, train_data, tokenizer): train_datasets = []
dataset_cls = [VideoJsonDataset, SingleVideoDataset, ImageDataset, VideoFolderDataset, VideoBLIPDataset]
5
2023-12-07 08:26:29+00:00
24k
allenai/Holodeck
modules/object_selector.py
[ { "identifier": "DFS_Solver_Floor", "path": "modules/floor_objects.py", "snippet": "class DFS_Solver_Floor():\n def __init__(self, grid_size, random_seed=0, max_duration=5, constraint_bouns=0.2):\n self.grid_size = grid_size\n self.random_seed = random_seed\n self.max_duration = max_duration # maximum allowed time in seconds\n self.constraint_bouns = constraint_bouns\n self.start_time = None\n self.solutions = []\n self.vistualize = False\n\n # Define the functions in a dictionary to avoid if-else conditions\n self.func_dict = {\n \"global\": {\n \"edge\": self.place_edge\n },\n \"relative\": self.place_relative,\n \"direction\": self.place_face,\n \"alignment\": self.place_alignment_center,\n \"distance\": self.place_distance\n }\n\n self.constraint_type2weight = {\n \"global\": 1.0,\n \"relative\": 0.5,\n \"direction\": 0.5,\n \"alignment\": 0.5,\n \"distance\": 1.8,\n }\n\n self.edge_bouns = 0.0 # worth more than one constraint\n\n\n def get_solution(self, bounds, objects_list, constraints, initial_state, use_milp=False):\n self.start_time = time.time()\n if use_milp:\n # iterate through the constraints list\n # for each constraint type \"distance\", add the same constraint to the target object\n new_constraints = constraints.copy()\n for object_name, object_constraints in constraints.items():\n for constraint in object_constraints:\n if constraint[\"type\"] == \"distance\":\n target_object_name = constraint[\"target\"]\n if target_object_name in constraints.keys():\n # if there is already a distance constraint of target object_name, continue\n if any(constraint[\"type\"] == \"distance\" and constraint[\"target\"] == object_name for constraint in constraints[target_object_name]): continue\n new_constraint = constraint.copy()\n new_constraint[\"target\"] = object_name\n new_constraints[target_object_name].append(new_constraint)\n # iterate through the constraints list\n # for each constraint type \"left of\" or \"right of\", add the same constraint to the target object\n #for object_name, object_constraints in constraints.items():\n # for constraint in object_constraints: if constraint[\"type\"] == \"relative\":\n # if constraint[\"constraint\"] == \"left of\":\n constraints = new_constraints\n\n try:\n self.milp_dfs(bounds, objects_list, constraints, initial_state, 10)\n except SolutionFound as e:\n print(f\"Time taken: {time.time() - self.start_time}\")\n \n else:\n grid_points = self.create_grids(bounds)\n grid_points = self.remove_points(grid_points, initial_state)\n try:\n self.dfs(bounds, objects_list, constraints, grid_points, initial_state, 30)\n except SolutionFound as e:\n print(f\"Time taken: {time.time() - self.start_time}\")\n \n print(f\"Number of solutions found: {len(self.solutions)}\")\n max_solution = self.get_max_solution(self.solutions)\n\n if not use_milp and self.vistualize:\n self.visualize_grid(bounds, grid_points, max_solution)\n\n return max_solution\n \n\n def get_max_solution(self, solutions):\n path_weights = []\n for solution in solutions:\n path_weights.append(sum([obj[-1] for obj in solution.values()]))\n max_index = np.argmax(path_weights)\n return solutions[max_index]\n\n\n def dfs(self, room_poly, objects_list, constraints, grid_points, placed_objects, branch_factor):\n if len(objects_list) == 0:\n self.solutions.append(placed_objects)\n return placed_objects\n \n if time.time() - self.start_time > self.max_duration:\n print(f\"Time limit reached.\")\n raise SolutionFound(self.solutions)\n \n object_name, object_dim = objects_list[0]\n placements = self.get_possible_placements(room_poly, object_dim, constraints[object_name], grid_points, placed_objects)\n \n if len(placements) == 0 and len(placed_objects) != 0:\n self.solutions.append(placed_objects)\n\n paths = []\n if branch_factor > 1: random.shuffle(placements) # shuffle the placements of the first object\n\n for placement in placements[:branch_factor]:\n placed_objects_updated = copy.deepcopy(placed_objects)\n placed_objects_updated[object_name] = placement\n grid_points_updated = self.remove_points(grid_points, placed_objects_updated)\n\n sub_paths = self.dfs(room_poly, objects_list[1:], constraints, grid_points_updated, placed_objects_updated, 1)\n paths.extend(sub_paths)\n\n return paths\n\n \n def get_possible_placements(self, room_poly, object_dim, constraints, grid_points, placed_objects):\n solutions = self.filter_collision(placed_objects, self.get_all_solutions(room_poly, grid_points, object_dim))\n solutions = self.filter_facing_wall(room_poly, solutions, object_dim)\n edge_solutions = self.place_edge(room_poly, copy.deepcopy(solutions), object_dim)\n\n if len(edge_solutions) == 0: return edge_solutions\n\n global_constraint = next((constraint for constraint in constraints if constraint[\"type\"] == \"global\"), None)\n\n if global_constraint is None: global_constraint = {\"type\": \"global\", \"constraint\": \"edge\"}\n\n if global_constraint[\"constraint\"] == \"edge\":\n candidate_solutions = copy.deepcopy(edge_solutions) # edge is hard constraint\n else:\n if len(constraints) > 1: candidate_solutions = solutions + edge_solutions # edge is soft constraint\n else: candidate_solutions = copy.deepcopy(solutions) # the first object\n\n candidate_solutions = self.filter_collision(placed_objects, candidate_solutions) # filter again after global constraint\n\n if candidate_solutions == []: return candidate_solutions\n random.shuffle(candidate_solutions)\n placement2score = {tuple(solution[:3]): solution[-1] for solution in candidate_solutions}\n\n # add a bias to edge solutions\n for solution in candidate_solutions:\n if solution in edge_solutions and len(constraints) >= 1:\n placement2score[tuple(solution[:3])] += self.edge_bouns\n \n for constraint in constraints:\n if \"target\" not in constraint: continue\n\n func = self.func_dict.get(constraint[\"type\"])\n valid_solutions = func(constraint[\"constraint\"], placed_objects[constraint[\"target\"]], candidate_solutions)\n \n weight = self.constraint_type2weight[constraint[\"type\"]]\n if constraint[\"type\"] == \"distance\":\n for solution in valid_solutions:\n bouns = solution[-1]\n placement2score[tuple(solution[:3])] += bouns * weight\n else:\n for solution in valid_solutions:\n placement2score[tuple(solution[:3])] += self.constraint_bouns * weight\n\n # normalize the scores\n for placement in placement2score: placement2score[placement] /= max(len(constraints), 1)\n\n sorted_placements = sorted(placement2score, key=placement2score.get, reverse=True)\n sorted_solutions = [list(placement) + [placement2score[placement]] for placement in sorted_placements]\n\n return sorted_solutions\n\n\n def create_grids(self, room_poly):\n # get the min and max bounds of the room\n min_x, min_z, max_x, max_z = room_poly.bounds\n\n # create grid points\n grid_points = []\n for x in range(int(min_x), int(max_x), self.grid_size):\n for y in range(int(min_z), int(max_z), self.grid_size):\n point = Point(x, y)\n if room_poly.contains(point):\n grid_points.append((x, y))\n\n return grid_points\n \n\n def remove_points(self, grid_points, objects_dict):\n # Create an r-tree index\n idx = index.Index()\n\n # Populate the index with bounding boxes of the objects\n for i, (_, _, obj, _) in enumerate(objects_dict.values()):\n idx.insert(i, Polygon(obj).bounds)\n \n # Create Shapely Polygon objects only once\n polygons = [Polygon(obj) for _, _, obj, _ in objects_dict.values()]\n\n valid_points = []\n \n for point in grid_points:\n p = Point(point)\n # Get a list of potential candidates\n candidates = [polygons[i] for i in idx.intersection(p.bounds)]\n # Check if point is in any of the candidate polygons\n if not any(candidate.contains(p) for candidate in candidates):\n valid_points.append(point)\n \n return valid_points\n \n\n def get_all_solutions(self, room_poly, grid_points, object_dim):\n obj_length, obj_width = object_dim\n obj_half_length, obj_half_width = obj_length / 2, obj_width / 2\n\n rotation_adjustments = {\n 0: ((-obj_half_length, -obj_half_width), (obj_half_length, obj_half_width)),\n 90: ((-obj_half_width, -obj_half_length), (obj_half_width, obj_half_length)),\n 180: ((-obj_half_length, obj_half_width), (obj_half_length, -obj_half_width)),\n 270: ((obj_half_width, -obj_half_length), (-obj_half_width, obj_half_length)),\n }\n\n solutions = []\n for rotation in [0, 90, 180, 270]:\n for point in grid_points:\n center_x, center_y = point\n lower_left_adjustment, upper_right_adjustment = rotation_adjustments[rotation]\n lower_left = (center_x + lower_left_adjustment[0], center_y + lower_left_adjustment[1])\n upper_right = (center_x + upper_right_adjustment[0], center_y + upper_right_adjustment[1])\n obj_box = box(*lower_left, *upper_right)\n\n if room_poly.contains(obj_box):\n solutions.append([point, rotation, tuple(obj_box.exterior.coords[:]), 1])\n \n return solutions\n \n\n def filter_collision(self, objects_dict, solutions):\n valid_solutions = []\n object_polygons = [Polygon(obj_coords) for _, _, obj_coords, _ in list(objects_dict.values())]\n for solution in solutions:\n sol_obj_coords = solution[2]\n sol_obj = Polygon(sol_obj_coords)\n if not any(sol_obj.intersects(obj) for obj in object_polygons):\n valid_solutions.append(solution)\n return valid_solutions\n\n \n def filter_facing_wall(self, room_poly, solutions, obj_dim):\n valid_solutions = []\n obj_width = obj_dim[1]\n obj_half_width = obj_width / 2\n\n front_center_adjustments = {\n 0: (0, obj_half_width),\n 90: (obj_half_width, 0),\n 180: (0, -obj_half_width),\n 270: (-obj_half_width, 0),\n }\n\n valid_solutions = []\n for solution in solutions:\n center_x, center_y = solution[0]\n rotation = solution[1]\n\n front_center_adjustment = front_center_adjustments[rotation]\n front_center_x, front_center_y = center_x + front_center_adjustment[0], center_y + front_center_adjustment[1]\n\n front_center_distance = room_poly.boundary.distance(Point(front_center_x, front_center_y))\n\n if front_center_distance >= 30: # TODO: make this a parameter\n valid_solutions.append(solution)\n\n return valid_solutions\n \n\n def place_edge(self, room_poly, solutions, obj_dim):\n valid_solutions = []\n obj_width = obj_dim[1]\n obj_half_width = obj_width / 2\n\n back_center_adjustments = {\n 0: (0, -obj_half_width),\n 90: (-obj_half_width, 0),\n 180: (0, obj_half_width),\n 270: (obj_half_width, 0),\n }\n\n for solution in solutions:\n center_x, center_y = solution[0]\n rotation = solution[1]\n\n back_center_adjustment = back_center_adjustments[rotation]\n back_center_x, back_center_y = center_x + back_center_adjustment[0], center_y + back_center_adjustment[1]\n\n back_center_distance = room_poly.boundary.distance(Point(back_center_x, back_center_y))\n center_distance = room_poly.boundary.distance(Point(center_x, center_y))\n\n if back_center_distance <= self.grid_size and back_center_distance < center_distance:\n solution[-1] += self.constraint_bouns\n # valid_solutions.append(solution) # those are still valid solutions, but we need to move the object to the edge\n\n # move the object to the edge\n center2back_vector = np.array([back_center_x - center_x, back_center_y - center_y])\n center2back_vector /= np.linalg.norm(center2back_vector)\n offset = center2back_vector * (back_center_distance + 4.5) # add a small distance to avoid the object cross the wall\n solution[0] = (center_x + offset[0], center_y + offset[1])\n solution[2] = ((solution[2][0][0] + offset[0], solution[2][0][1] + offset[1]), \\\n (solution[2][1][0] + offset[0], solution[2][1][1] + offset[1]), \\\n (solution[2][2][0] + offset[0], solution[2][2][1] + offset[1]), \\\n (solution[2][3][0] + offset[0], solution[2][3][1] + offset[1]))\n valid_solutions.append(solution)\n\n return valid_solutions\n \n\n def place_corner(self, room_poly, solutions, obj_dim):\n obj_length, obj_width = obj_dim\n obj_half_length, _ = obj_length / 2, obj_width / 2\n\n rotation_center_adjustments = {\n 0: ((-obj_half_length, 0), (obj_half_length, 0)),\n 90: ((0, obj_half_length), (0, -obj_half_length)),\n 180: ((obj_half_length, 0), (-obj_half_length, 0)),\n 270: ((0, -obj_half_length), (0, obj_half_length))\n }\n\n edge_solutions = self.place_edge(room_poly, solutions, obj_dim)\n\n valid_solutions = []\n\n for solution in edge_solutions:\n (center_x, center_y), rotation = solution[:2]\n (dx_left, dy_left), (dx_right, dy_right) = rotation_center_adjustments[rotation]\n\n left_center_x, left_center_y = center_x + dx_left, center_y + dy_left\n right_center_x, right_center_y = center_x + dx_right, center_y + dy_right\n \n left_center_distance = room_poly.boundary.distance(Point(left_center_x, left_center_y))\n right_center_distance = room_poly.boundary.distance(Point(right_center_x, right_center_y))\n\n if min(left_center_distance, right_center_distance) < self.grid_size:\n solution[-1] += self.constraint_bouns\n valid_solutions.append(solution)\n\n return valid_solutions\n \n\n def place_relative(self, place_type, target_object, solutions):\n valid_solutions = []\n _, target_rotation, target_coords, _ = target_object\n target_polygon = Polygon(target_coords)\n\n min_x, min_y, max_x, max_y = target_polygon.bounds\n mean_x = (min_x + max_x) / 2\n mean_y = (min_y + max_y) / 2\n\n comparison_dict = {\n 'left of': {\n 0: lambda sol_center: sol_center[0] < min_x and min_y <= sol_center[1] <= max_y,\n 90: lambda sol_center: sol_center[1] > max_y and min_x <= sol_center[0] <= max_x,\n 180: lambda sol_center: sol_center[0] > max_x and min_y <= sol_center[1] <= max_y,\n 270: lambda sol_center: sol_center[1] < min_y and min_x <= sol_center[0] <= max_x,\n },\n 'right of': {\n 0: lambda sol_center: sol_center[0] > max_x and min_y <= sol_center[1] <= max_y,\n 90: lambda sol_center: sol_center[1] < min_y and min_x <= sol_center[0] <= max_x,\n 180: lambda sol_center: sol_center[0] < min_x and min_y <= sol_center[1] <= max_y,\n 270: lambda sol_center: sol_center[1] > max_y and min_x <= sol_center[0] <= max_x,\n },\n 'in front of': {\n 0: lambda sol_center: sol_center[1] > max_y and mean_x - self.grid_size < sol_center[0] < mean_x + self.grid_size, # in front of and centered\n 90: lambda sol_center: sol_center[0] > max_x and mean_y - self.grid_size < sol_center[1] < mean_y + self.grid_size,\n 180: lambda sol_center: sol_center[1] < min_y and mean_x - self.grid_size < sol_center[0] < mean_x + self.grid_size,\n 270: lambda sol_center: sol_center[0] < min_x and mean_y - self.grid_size < sol_center[1] < mean_y + self.grid_size,\n },\n 'behind': {\n 0: lambda sol_center: sol_center[1] < min_y and min_x <= sol_center[0] <= max_x,\n 90: lambda sol_center: sol_center[0] < min_x and min_y <= sol_center[1] <= max_y,\n 180: lambda sol_center: sol_center[1] > max_y and min_x <= sol_center[0] <= max_x,\n 270: lambda sol_center: sol_center[0] > max_x and min_y <= sol_center[1] <= max_y,\n },\n \"side of\": {\n 0: lambda sol_center: min_y <= sol_center[1] <= max_y,\n 90: lambda sol_center: min_x <= sol_center[0] <= max_x,\n 180: lambda sol_center: min_y <= sol_center[1] <= max_y,\n 270: lambda sol_center: min_x <= sol_center[0] <= max_x\n }\n }\n \n compare_func = comparison_dict.get(place_type).get(target_rotation)\n\n for solution in solutions:\n sol_center = solution[0]\n\n if compare_func(sol_center):\n solution[-1] += self.constraint_bouns\n valid_solutions.append(solution)\n \n return valid_solutions\n \n\n def place_distance(self, distance_type, target_object, solutions):\n target_coords = target_object[2]\n target_poly = Polygon(target_coords)\n distances = []\n valid_solutions = []\n for solution in solutions:\n sol_coords = solution[2]\n sol_poly = Polygon(sol_coords)\n distance = target_poly.distance(sol_poly)\n distances.append(distance)\n\n solution[-1] = distance\n valid_solutions.append(solution)\n \n min_distance = min(distances)\n max_distance = max(distances)\n\n if distance_type == \"near\":\n if min_distance < 80:\n points = [(min_distance, 1), (80, 0), (max_distance, 0)]\n else:\n points = [(min_distance, 0), (max_distance, 0)]\n\n elif distance_type == \"far\":\n points = [(min_distance, 0), (max_distance, 1)]\n \n x = [point[0] for point in points]\n y = [point[1] for point in points]\n\n f = interp1d(x, y, kind='linear', fill_value='extrapolate')\n \n for solution in valid_solutions:\n distance = solution[-1]\n solution[-1] = float(f(distance))\n\n return valid_solutions\n \n\n def place_face(self, face_type, target_object, solutions):\n if face_type == \"face to\":\n return self.place_face_to(target_object, solutions)\n \n elif face_type == \"face same as\":\n return self.place_face_same(target_object, solutions)\n \n elif face_type == \"face opposite to\":\n return self.place_face_opposite(target_object, solutions)\n \n\n def place_face_to(self, target_object, solutions):\n # Define unit vectors for each rotation\n unit_vectors = {\n 0: np.array([0., 1.]), # Facing up\n 90: np.array([1., 0.]), # Facing right\n 180: np.array([0., -1.]), # Facing down\n 270: np.array([-1., 0.]) # Facing left\n }\n \n target_coords = target_object[2]\n target_poly = Polygon(target_coords)\n \n valid_solutions = []\n \n for solution in solutions:\n sol_center = solution[0]\n sol_rotation = solution[1]\n\n # Define an arbitrarily large point in the direction of the solution's rotation\n far_point = sol_center + 1e6 * unit_vectors[sol_rotation]\n\n # Create a half-line from the solution's center to the far point\n half_line = LineString([sol_center, far_point])\n\n # Check if the half-line intersects with the target polygon\n if half_line.intersects(target_poly):\n solution[-1] += self.constraint_bouns\n valid_solutions.append(solution)\n \n return valid_solutions\n \n\n def place_face_same(self, target_object, solutions):\n target_rotation = target_object[1]\n valid_solutions = []\n \n for solution in solutions:\n sol_rotation = solution[1]\n if sol_rotation == target_rotation:\n solution[-1] += self.constraint_bouns\n valid_solutions.append(solution)\n\n return valid_solutions\n \n\n def place_face_opposite(self, target_object, solutions):\n target_rotation = (target_object[1] + 180) % 360\n valid_solutions = []\n \n for solution in solutions:\n sol_rotation = solution[1]\n if sol_rotation == target_rotation:\n solution[-1] += self.constraint_bouns\n valid_solutions.append(solution)\n \n return valid_solutions\n\n\n def place_alignment_center(self, alignment_type, target_object, solutions):\n target_center = target_object[0]\n valid_solutions = []\n eps = 5\n for solution in solutions:\n sol_center = solution[0]\n if abs(sol_center[0] - target_center[0]) < eps or abs(sol_center[1] - target_center[1]) < eps:\n solution[-1] += self.constraint_bouns\n valid_solutions.append(solution)\n return valid_solutions\n\n\n def visualize_grid(self, room_poly, grid_points, solutions):\n plt.rcParams[\"font.family\"] = \"Times New Roman\"\n plt.rcParams[\"font.size\"] = 22\n\n # create a new figure\n fig, ax = plt.subplots()\n\n # draw the room\n x, y = room_poly.exterior.xy\n ax.plot(x, y, '-', label='Room', color='black', linewidth=2)\n\n # draw the grid points\n grid_x = [point[0] for point in grid_points]\n grid_y = [point[1] for point in grid_points]\n ax.plot(grid_x, grid_y, 'o', markersize=2, color=\"grey\")\n\n # draw the solutions\n for object_name, solution in solutions.items():\n center, rotation, box_coords = solution[:3]\n center_x, center_y = center\n\n # create a polygon for the solution\n obj_poly = Polygon(box_coords)\n x, y = obj_poly.exterior.xy\n ax.plot(x, y, '-', linewidth=2, color='black')\n\n # ax.text(center_x, center_y, object_name, fontsize=18, ha='center')\n\n # set arrow direction based on rotation\n if rotation == 0:\n ax.arrow(center_x, center_y, 0, 25, head_width=10, fc='black')\n elif rotation == 90:\n ax.arrow(center_x, center_y, 25, 0, head_width=10, fc='black')\n elif rotation == 180:\n ax.arrow(center_x, center_y, 0, -25, head_width=10, fc='black')\n elif rotation == 270:\n ax.arrow(center_x, center_y, -25, 0, head_width=10, fc='black')\n # axis off\n ax.axis('off')\n ax.set_aspect('equal', 'box') # to keep the ratios equal along x and y axis\n create_time = str(datetime.datetime.now()).replace(\" \", \"-\").replace(\":\", \"-\").replace(\".\", \"-\")\n plt.savefig(f\"{create_time}.pdf\", bbox_inches='tight', dpi=300)\n plt.show()\n \n\n def milp_dfs(self, room_poly, all_objects_list, constraints, placed_objects, branch_factor=1):\n if len(all_objects_list) == 0:\n self.solutions.append(placed_objects)\n return placed_objects\n \n if time.time() - self.start_time > self.max_duration:\n print(f\"Time limit reached.\")\n raise SolutionFound(self.solutions)\n \n def milp_solve(soft_constraints_list, hard_constraints_list, verbose=False):\n problem = cp.Problem(cp.Maximize(sum(soft_constraints_list)), hard_constraints_list)\n if verbose:\n print('solving milp using GUROBI ...')\n problem.solve(solver=cp.GUROBI, reoptimize=True, verbose=False)\n return problem.value\n \n def parse_object_properties(object_properties):\n x, y = object_properties[0]\n rotation = int(object_properties[1] or 0)\n # set rotation to the closest 90 degree\n rotation = int(round(rotation / 90) * 90)\n assert rotation in [0, 90, 180, 270]\n object_bbox = object_properties[2]\n min_x = min([point[0] for point in object_bbox])\n max_x = max([point[0] for point in object_bbox])\n min_y = min([point[1] for point in object_bbox])\n max_y = max([point[1] for point in object_bbox])\n object_dim = (max_x - min_x, max_y - min_y) if rotation == 0 or rotation == 180 else (max_y - min_y, max_x - min_x)\n return x, y, rotation, object_dim\n\n def find_object_dim(target_object_name, objects_list, placed_objects):\n target_object_dim = None\n for object_name_1, object_dim_1 in objects_list:\n if object_name_1 == target_object_name: \n target_object_dim = object_dim_1\n return target_object_dim\n\n if not None: \n for object_name_1, object_properties in placed_objects.items():\n if object_name_1 == target_object_name:\n x, y, rotation, target_object_dim = parse_object_properties(object_properties)\n return target_object_dim\n return None\n\n\n found_a_solution = False\n # randomly select a set of objects from all_objects_list\n # start with the largest object + more objects --> gradually reduce the number of objects \n for branch_idx in range(branch_factor):\n # sample a set of objects from a list that contains the first object\n \n k = random.randint(0, min(5, len(all_objects_list)-1))\n objects_list = [all_objects_list[0]] + random.sample(all_objects_list[1:], k)\n\n hard_constraints_list = []\n soft_constraints_list = [0]\n\n # formulate the milp problem\n # object_name, object_dim = objects_list[0]\n # x, y, rotate_180, rotate_90\n variables_dict = {object[0]: [cp.Variable(), cp.Variable(), cp.Variable(boolean=True), cp.Variable(boolean=True)] for object in objects_list}\n # add placed objects into variables dict even though they are not variables\n for object, object_properties in placed_objects.items():\n x, y = object_properties[0]\n rotation = int(object_properties[1])\n variables_dict[object] = [x, y, rotation == 180, rotation == 90 or rotation == 270]\n\n # Initialize a list of variables, each variable represents the coordinate for each object\n room_min_x, room_min_y, room_max_x, room_max_y = room_poly.bounds\n # Add boundary constraints to all objects\n for object_name, object_dim in objects_list:\n hard_constraints_list.extend(create_boundary_constraints(variables_dict[object_name],\n object_dim,\n (room_min_x, room_min_y, room_max_x, room_max_y)))\n # Add pariwise collision constraints\n for object_name_1, object_dim_1 in objects_list:\n for object_name_2, object_dim_2 in objects_list:\n if object_name_1 == object_name_2: continue\n # collision constraints should be hard constraints\n hard_constraints_list.extend(create_nooverlap_constraints(variables_dict[object_name_1],\n variables_dict[object_name_2],\n object_dim_1,\n object_dim_2))\n\n # Add pariwise collision constraints with placed objects\n for object_name_1, object_dim_1 in objects_list:\n for object_name_2, object_properties_2 in placed_objects.items():\n # bbox is a list of four points\n x, y, rotation, object_dim_2 = parse_object_properties(object_properties_2)\n\n hard_constraints_list.extend(create_nooverlap_constraints(variables_dict[object_name_1],\n [x, y, rotation == 180, rotation == 90 or rotation == 270],\n object_dim_1, object_dim_2))\n\n # default constraints / heuristics?\n for object_name, object_dim in objects_list:\n # encourage dispersement of assets\n all_other_objects_list = [x[0] for x in objects_list if x[0] != object_name] + list(placed_objects.keys())\n for target_object_name in all_other_objects_list:\n hard_constraints, soft_constraints = create_distance_constraints(variables_dict[object_name],\n variables_dict[target_object_name],\n upper_bound=[room_max_x-room_min_x, room_max_y-room_min_y],\n type='far')\n assert len(soft_constraints) == 1\n # soft_constraints[0] *= 0.001\n hard_constraints_list.extend(hard_constraints)\n soft_constraints_list.extend(soft_constraints)\n\n\n # use cvxpy to solve for the hard constraints\n for object_name, object_dim in objects_list:\n\n # by default - add soft edge constraints although this might make the solver take a longer time\n if not any(constraint['type'] == 'global' for constraint in constraints[object_name]):\n hard_constraints, soft_constraints = create_edge_constraints(variables_dict[object_name],\n object_dim,\n room_dim=(room_min_x, room_min_y, room_max_x, room_max_y),\n hard=False)\n soft_constraints[0] *= 100\n hard_constraints_list.extend(hard_constraints)\n soft_constraints_list.extend(soft_constraints)\n\n\n\n for constraint in constraints[object_name]:\n if constraint['type'] == 'global': \n if constraint['constraint'] == 'edge': # hard constraints\n hard_constraints, soft_constraints = create_edge_constraints(variables_dict[object_name],\n object_dim,\n room_dim=(room_min_x, room_min_y, room_max_x, room_max_y),\n hard=True)\n hard_constraints_list.extend(hard_constraints)\n soft_constraints_list.extend(soft_constraints)\n\n if constraint['type'] == 'direction':\n assert constraint['constraint'] == 'face to'\n target_object_name = constraint['target']\n target_object_dim = find_object_dim(target_object_name, objects_list, placed_objects)\n if target_object_dim:\n hard_constraints_list.extend(create_directional_constraints(variables_dict[object_name],\n variables_dict[target_object_name],\n object_dim,\n target_object_dim))\n\n if constraint['type'] == 'alignment':\n assert constraint['constraint'] == 'center aligned'\n target_object_name = constraint['target']\n target_object_dim = find_object_dim(target_object_name, objects_list, placed_objects)\n if target_object_dim:\n hard_constraints_list.extend(create_alignment_constraints(variables_dict[object_name],\n variables_dict[target_object_name],\n object_dim,\n target_object_dim))\n\n if constraint['type'] == 'distance':\n target_object_name = constraint['target']\n target_object_dim = find_object_dim(target_object_name, objects_list, placed_objects)\n if target_object_dim:\n hard_constraints, soft_constraints = create_distance_constraints(variables_dict[object_name],\n variables_dict[target_object_name],\n upper_bound=[room_max_x-room_min_x, room_max_y-room_min_y],\n type=constraint['constraint'])\n hard_constraints_list.extend(hard_constraints)\n soft_constraints_list.extend(soft_constraints)\n assert len(soft_constraints) == 1\n # higher weighting\n soft_constraints[0] *= 0.01\n\n if constraint['type'] == 'relative':\n target_object_name = constraint['target']\n target_object_dim = find_object_dim(target_object_name, objects_list, placed_objects)\n if target_object_dim:\n hard_constraints_list.extend(create_relative_constraints(variables_dict[object_name],\n variables_dict[target_object_name],\n object_dim,\n target_object_dim,\n constraint['constraint']))\n\n result = milp_solve(soft_constraints_list, hard_constraints_list, verbose=False)\n if result is None or math.isnan(result) or math.isinf(result):\n continue\n\n found_a_solution = True\n print(result, [x[0] for x in objects_list])\n\n # we fonud a valid solution\n # convert the placements to the same format as the dfs solver\n placed_objects_updated = copy.deepcopy(placed_objects)\n for object_name, object_dim in objects_list:\n # (x, y), rotation, bbox, score\n x = variables_dict[object_name][0].value.item()\n y = variables_dict[object_name][1].value.item()\n rotate_180 = variables_dict[object_name][2].value\n rotate_90 = variables_dict[object_name][3].value \n if not rotate_180: rotate_180 = 0\n if not rotate_90: rotate_90 = 0\n\n # bbox has taken into account of the rotation\n if rotate_90:\n bbox = [(x - object_dim[1]/2, y - object_dim[0]/2),\n (x + object_dim[1]/2, y - object_dim[0]/2),\n (x + object_dim[1]/2, y + object_dim[0]/2),\n (x - object_dim[1]/2, y + object_dim[0]/2)]\n else:\n bbox = [(x - object_dim[0]/2, y - object_dim[1]/2),\n (x + object_dim[0]/2, y - object_dim[1]/2),\n (x + object_dim[0]/2, y + object_dim[1]/2),\n (x - object_dim[0]/2, y + object_dim[1]/2)]\n\n placed_objects_updated[object_name] = [(x,y), rotate_180 * 180 + rotate_90 * 90, bbox,\n len(constraints[object_name])]\n\n # remove all elemnts in objects_list from all_objects_list\n self.milp_dfs(room_poly, [x for x in all_objects_list if x not in objects_list], constraints, placed_objects_updated, branch_factor=1)\n \n if not found_a_solution and len(placed_objects) != 0:\n self.solutions.append(placed_objects)\n \n\n def test_dfs_placement(self):\n room_vertices = ((0, 0), (0, 500), (500, 500), (500, 0))\n room_poly = Polygon(room_vertices)\n grid_points = self.create_grids(room_poly)\n objects = {\"door\": ((50, 50), 0, ((0, 0), (100, 0), (100, 100), (0, 100)), 1)}\n grid_points = self.remove_points(grid_points, objects)\n # self.visualize_grid(room_poly, grid_points, objects)\n\n object_dim = (200, 100)\n solutions = self.get_all_solutions(room_poly, grid_points, object_dim)\n solutions = self.filter_collision(objects, solutions)\n solutions = self.place_edge(room_poly, solutions, object_dim)\n\n # for i, solution in enumerate(solutions):\n # objects[f\"sofa-{i}\"] = solution\n # self.visualize_grid(room_poly, grid_points, objects)\n\n random.seed(0)\n objects[\"sofa\"] = random.choice(solutions)\n # self.visualize_grid(room_poly, grid_points, objects)\n object_1_dim = (100, 50)\n\n solutions_1 = self.get_all_solutions(room_poly, grid_points, object_1_dim)\n solutions_1 = self.filter_collision(objects, solutions_1)\n\n # random.seed(42)\n # for i, solution in enumerate(random.sample(solutions_1, 25)):\n # objects[f\"coffee table-{i}\"] = solution\n \n # objects[f\"coffee table\"] = [(300, 350), 0, ((350.0, 325.0), (350.0, 375.0), (250.0, 375.0), (250.0, 325.0), (350.0, 325.0)), 1.0]\n # self.visualize_grid(room_poly, grid_points, objects)\n \n solutions_1 = self.place_face_to(objects[\"sofa\"], solutions_1)\n solutions_1 = self.place_relative(\"in front of\", objects[\"sofa\"], solutions_1)\n solutions_1 = self.place_alignment_center(\"center alignment\", objects[\"sofa\"], solutions_1)\n solutions_1 = self.place_distance(\"near\", objects[\"sofa\"], solutions_1)\n objects[f\"coffee table\"] = solutions_1[-1]\n self.visualize_grid(room_poly, grid_points, objects)\n\n\n def test_milp_placement(self, simple=False, use_milp=True):\n room_vertices = ((0, 0), (0, 600), (800, 600), (800, 0))\n room_poly = Polygon(room_vertices)\n grid_points = self.create_grids(room_poly)\n\n if not simple:\n constraints = {'sofa-0': [{'type': 'global', 'constraint': 'edge'}],\n 'sofa-1': [{'type': 'global', 'constraint': 'edge'},\n {'type': 'distance', 'constraint': 'near', 'target': 'sofa-0'},\n {'type': 'alignment', 'constraint': 'center aligned', 'target': 'sofa-0'}],\n 'tv stand-0': [{'type': 'global', 'constraint': 'edge'},\n {'type': 'distance', 'constraint': 'far', 'target': 'sofa-1'},\n {'type': 'alignment', 'constraint': 'center aligned', 'target': 'sofa-1'}],\n 'coffee table-0': [{'type': 'global', 'constraint': 'middle'},\n {'type': 'distance', 'constraint': 'near', 'target': 'sofa-0'},\n {'type': 'relative', 'constraint': 'in front of', 'target': 'sofa-0'},\n {'type': 'alignment', 'constraint': 'center aligned', 'target': 'sofa-0'},\n {'type': 'alignment', 'constraint': 'center aligned', 'target': 'sofa-0'},\n {'type': 'direction', 'constraint': 'face to', 'target': 'tv stand-0'}],\n 'coffee table-1': [{'type': 'global', 'constraint': 'middle'},\n {'type': 'distance', 'constraint': 'near', 'target': 'sofa-1'},\n {'type': 'relative', 'constraint': 'in front of', 'target': 'sofa-1'},\n {'type': 'alignment', 'constraint': 'center aligned', 'target': 'sofa-1'},\n {'type': 'alignment', 'constraint': 'center aligned', 'target': 'sofa-1'},\n {'type': 'direction', 'constraint': 'face to', 'target': 'tv stand-0'}],\n 'side table-0': [{'type': 'global', 'constraint': 'edge'},\n {'type': 'distance', 'constraint': 'near', 'target': 'sofa-0'},\n {'type': 'relative', 'constraint': 'side of', 'target': 'sofa-0'}],\n 'side table-1': [{'type': 'global', 'constraint': 'edge'},\n {'type': 'distance', 'constraint': 'near', 'target': 'sofa-1'},\n {'type': 'relative', 'constraint': 'side of', 'target': 'sofa-1'}],\n 'armchair-0': [{'type': 'global', 'constraint': 'middle'},\n {'type': 'distance', 'constraint': 'near', 'target': 'coffee table-0'},\n {'type': 'direction', 'constraint': 'face to', 'target': 'coffee table-0'},\n {'type': 'direction', 'constraint': 'face to', 'target': 'coffee table-0'}],\n 'armchair-1': [{'type': 'global', 'constraint': 'middle'},\n {'type': 'distance', 'constraint': 'near', 'target': 'coffee table-1'},\n {'type': 'direction', 'constraint': 'face to', 'target': 'coffee table-1'},\n {'type': 'direction', 'constraint': 'face to', 'target': 'coffee table-1'}],\n 'bookshelf-0': [{'type': 'global', 'constraint': 'edge'},\n {'type': 'distance', 'constraint': 'far', 'target': 'tv stand-0'}],\n 'bookshelf-1': [{'type': 'global', 'constraint': 'edge'},\n {'type': 'distance', 'constraint': 'far', 'target': 'bookshelf-0'},\n {'type': 'alignment', 'constraint': 'center aligned', 'target': 'bookshelf-0'}]}\n\n initial_state = {'door-0': ((586.7550200520433, 550.0), 0, [(640.8300346432603, 500.0), (532.6800054608262, 500.0), (532.6800054608262, 600.0), (640.8300346432603, 600.0)], 1)}\n\n objects = [('sofa-0', (301.6667297651499, 106.48952360032415)),\n ('sofa-1', (301.6667297651499, 106.48952360032415)),\n ('tv stand-0', (201.0964714933229, 59.39910836195032)),\n ('coffee table-0', (69.15754261308616, 126.69169450358964)),\n ('coffee table-1', (69.15754261308616, 126.69169450358964)),\n ('side table-0', (61.74632023132328, 61.74453745262855)),\n ('side table-1', (61.74632023132328, 61.74453745262855)),\n ('armchair-0', (79.0368498902692, 89.4893987892571)),\n ('armchair-1', (79.0368498902692, 89.4893987892571)),\n ('bookshelf-0', (67.94689517917222, 43.8934937031396)),\n ('bookshelf-1', (67.94689517917222, 43.8934937031396))]\n solution = self.get_solution(room_poly, objects, constraints, initial_state, use_milp=use_milp)\n else:\n constraints = {'dining table': [{'type': 'global', 'constraint': 'edge'},\n {'type': 'distance', 'constraint': 'far', 'target': 'door'},\n {'type': 'distance', 'constraint': 'near', 'target': 'chair'}],\n 'chair': [{'type': 'relative', 'constraint': 'side of', 'target': 'dining table'}]\n }\n initial_state = {\"door\": ((50, 50), 0, ((0, 0), (100, 0), (100, 100), (0, 100)), 1)}\n objects = [(\"dining table\", (100, 50)), (\"chair\", (50, 50))]\n solution = self.get_solution(room_poly, objects, constraints, initial_state, use_milp=use_milp)\n\n print('milp solution:', len(solution))\n for object_name, object_properties in solution.items():\n print(object_name, object_properties)\n # if object_properties[2] == 90 or object_properties[2] == 270:\n self.visualize_grid(room_poly, grid_points, solution)" }, { "identifier": "DFS_Solver_Wall", "path": "modules/wall_objects.py", "snippet": "class DFS_Solver_Wall():\n def __init__(self, grid_size, random_seed=0, max_duration=5, constraint_bouns=100):\n self.grid_size = grid_size\n self.random_seed = random_seed\n self.max_duration = max_duration # maximum allowed time in seconds\n self.constraint_bouns = constraint_bouns\n self.start_time = None\n self.solutions = []\n self.visualize = False\n\n\n def get_solution(self, room_poly, wall_objects_list, constraints, initial_state):\n grid_points = self.create_grids(room_poly)\n\n self.start_time = time.time()\n try:\n self.dfs(room_poly, wall_objects_list, constraints, grid_points, initial_state)\n except SolutionFound as e:\n print(f\"Time taken: {time.time() - self.start_time}\")\n \n max_solution = self.get_max_solution(self.solutions)\n \n if self.visualize: self.visualize_grid(room_poly, grid_points, max_solution)\n return max_solution\n\n\n def get_max_solution(self, solutions):\n path_weights = []\n for solution in solutions:\n path_weights.append(sum([obj[-1] for obj in solution.values()]))\n max_index = np.argmax(path_weights)\n return solutions[max_index]\n\n\n def dfs(self, room_poly, wall_objects_list, constraints, grid_points, placed_objects):\n if len(wall_objects_list) == 0:\n self.solutions.append(placed_objects)\n return placed_objects\n \n if time.time() - self.start_time > self.max_duration:\n print(f\"Time limit reached.\")\n raise SolutionFound(self.solutions)\n \n object_name, object_dim = wall_objects_list[0]\n placements = self.get_possible_placements(room_poly, object_dim, constraints[object_name], grid_points, placed_objects)\n \n if len(placements) == 0:\n self.solutions.append(placed_objects)\n\n paths = []\n for placement in placements:\n placed_objects_updated = copy.deepcopy(placed_objects)\n placed_objects_updated[object_name] = placement\n\n sub_paths = self.dfs(room_poly, wall_objects_list[1:], constraints, grid_points, placed_objects_updated)\n paths.extend(sub_paths)\n\n return paths\n \n\n def get_possible_placements(self, room_poly, object_dim, constraint, grid_points, placed_objects):\n all_solutions = self.filter_collision(placed_objects, self.get_all_solutions(room_poly, grid_points, object_dim, constraint[\"height\"]))\n random.shuffle(all_solutions)\n target_floor_object_name = constraint[\"target_floor_object_name\"]\n if target_floor_object_name is not None and target_floor_object_name in placed_objects:\n all_solutions = self.score_solution_by_distance(all_solutions, placed_objects[target_floor_object_name])\n # order solutions by distance to target floor object\n all_solutions = sorted(all_solutions, key=lambda x: x[-1], reverse=True)\n return all_solutions\n\n\n def create_grids(self, room_poly):\n # Get the coordinates of the polygon\n poly_coords = list(room_poly.exterior.coords)\n\n grid_points = []\n # Iterate over each pair of points (edges of the polygon)\n for i in range(len(poly_coords) - 1):\n line = LineString([poly_coords[i], poly_coords[i + 1]])\n line_length = line.length\n\n # Create points along the edge at intervals of grid size\n for j in range(0, int(line_length), self.grid_size):\n point_on_line = substring(line, j, j) # Get a point at distance j from the start of the line\n if point_on_line:\n grid_points.append((point_on_line.x, point_on_line.y))\n \n return grid_points\n \n\n def get_all_solutions(self, room_poly, grid_points, object_dim, height):\n obj_length, obj_height, obj_width = object_dim\n obj_half_length = obj_length / 2\n\n rotation_adjustments = {\n 0: ((-obj_half_length, 0), (obj_half_length, obj_width)),\n 90: ((0, -obj_half_length), (obj_width, obj_half_length)),\n 180: ((-obj_half_length, -obj_width), (obj_half_length, 0)),\n 270: ((-obj_width, -obj_half_length), (0, obj_half_length))\n }\n\n solutions = []\n for rotation in [0, 90, 180, 270]:\n for point in grid_points:\n center_x, center_y = point\n lower_left_adjustment, upper_right_adjustment = rotation_adjustments[rotation]\n lower_left = (center_x + lower_left_adjustment[0], center_y + lower_left_adjustment[1])\n upper_right = (center_x + upper_right_adjustment[0], center_y + upper_right_adjustment[1])\n obj_box = box(*lower_left, *upper_right)\n\n if room_poly.contains(obj_box):\n object_coords = obj_box.exterior.coords[:]\n coordinates_on_edge = [coord for coord in object_coords if room_poly.boundary.contains(Point(coord))]\n coordinates_on_edge = list(set(coordinates_on_edge))\n if len(coordinates_on_edge) >= 2:\n vertex_min = (lower_left[0], height, lower_left[1])\n vertex_max = (upper_right[0], height + obj_height, upper_right[1])\n\n solutions.append([vertex_min, vertex_max, rotation, tuple(obj_box.exterior.coords[:]), 1])\n \n return solutions\n \n\n def filter_collision(self, placed_objects, solutions):\n def intersect_3d(box1, box2):\n # box1 and box2 are dictionaries with 'min' and 'max' keys,\n # which are tuples representing the minimum and maximum corners of the 3D box.\n for i in range(3):\n if box1['max'][i] < box2['min'][i] or box1['min'][i] > box2['max'][i]:\n return False\n return True\n\n valid_solutions = []\n boxes = [{\"min\": vertex_min, \"max\": vertex_max} for vertex_min, vertex_max, rotation, box_coords, path_weight in placed_objects.values()]\n\n for solution in solutions:\n for box in boxes:\n if intersect_3d(box, {\"min\": solution[0], \"max\": solution[1]}):\n break\n else:\n valid_solutions.append(solution)\n \n return valid_solutions\n \n\n def score_solution_by_distance(self, solutions, target_object):\n distances = []\n scored_solutions = []\n for solution in solutions:\n center_x, center_y, center_z = (solution[0][0]+solution[1][0])/2, (solution[0][1]+solution[1][1])/2, (solution[0][2]+solution[1][2])/2\n target_x, target_y, target_z = (target_object[0][0]+target_object[1][0])/2, (target_object[0][1]+target_object[1][1])/2, (target_object[0][2]+target_object[1][2])/2\n distance = np.sqrt((center_x - target_x)**2 + (center_y - target_y)**2 + (center_z - target_z)**2)\n distances.append(distance)\n scored_solution = solution.copy()\n scored_solution[-1] = solution[-1] + self.constraint_bouns * (1/distance)\n scored_solutions.append(scored_solution)\n return scored_solutions\n \n \n def visualize_grid(self, room_poly, grid_points, solutions):\n # create a new figure\n fig, ax = plt.subplots()\n\n # draw the room\n x, y = room_poly.exterior.xy\n ax.plot(x, y, 'b-', label='Room')\n\n # draw the grid points\n grid_x = [point[0] for point in grid_points]\n grid_y = [point[1] for point in grid_points]\n ax.plot(grid_x, grid_y, 'ro', markersize=2)\n\n # draw the solutions\n for object_name, solution in solutions.items():\n vertex_min, vertex_max, rotation, box_coords = solution[:-1]\n center_x, center_y = (vertex_min[0]+vertex_max[0])/2, (vertex_min[2]+vertex_max[2])/2\n\n # create a polygon for the solution\n obj_poly = Polygon(box_coords)\n x, y = obj_poly.exterior.xy\n ax.plot(x, y, 'g-', linewidth=2)\n\n ax.text(center_x, center_y, object_name, fontsize=12, ha='center')\n\n # set arrow direction based on rotation\n if rotation == 0:\n ax.arrow(center_x, center_y, 0, 25, head_width=10, fc='g')\n elif rotation == 90:\n ax.arrow(center_x, center_y, 25, 0, head_width=10, fc='g')\n elif rotation == 180:\n ax.arrow(center_x, center_y, 0, -25, head_width=10, fc='g')\n elif rotation == 270:\n ax.arrow(center_x, center_y, -25, 0, head_width=10, fc='g')\n\n ax.set_aspect('equal', 'box') # to keep the ratios equal along x and y axis\n plt.show()" } ]
import re import copy import json import torch import random import multiprocessing import torch.nn.functional as F import modules.prompts as prompts from typing import Dict from colorama import Fore from shapely import Polygon from langchain import PromptTemplate from modules.floor_objects import DFS_Solver_Floor from modules.wall_objects import DFS_Solver_Wall
16,351
valid_candidates = [] for candidate in candidates: dimension = self.database[candidate[0]]["assetMetadata"]["boundingBox"] size = [dimension["x"], dimension["y"], dimension["z"]] if size[2] > size[0]: size = [size[2], size[1], size[0]] # make sure that x > z if size[0] > room_size[0] * self.object_size_tolerance: continue if size[1] > room_size[1] * self.object_size_tolerance: continue if size[2] > room_size[2] * self.object_size_tolerance: continue if size[0] * size[2] > room_size[0] * room_size[2] * 0.5: continue # TODO: consider using the floor area instead of the room area valid_candidates.append(candidate) return valid_candidates def check_thin_object(self, candidates): valid_candidates = [] for candidate in candidates: dimension = self.database[candidate[0]]["assetMetadata"]["boundingBox"] size = [dimension["x"], dimension["y"], dimension["z"]] if size[2] > min(size[0], size[1]) * self.thin_threshold: continue valid_candidates.append(candidate) return valid_candidates def random_select(self, candidates): if self.random_selection: selected_candidate = random.choice(candidates) else: scores = [candidate[1] for candidate in candidates] scores_tensor = torch.Tensor(scores) probas = F.softmax(scores_tensor, dim=0) # TODO: consider using normalized scores selected_index = torch.multinomial(probas, 1).item() selected_candidate = candidates[selected_index] return selected_candidate def update_floor_capacity(self, room2floor_capacity, scene): for room in scene["rooms"]: room_vertices = room["vertices"] room_poly = Polygon(room_vertices) for door in scene["doors"]: for door_vertices in door["doorBoxes"]: door_poly = Polygon(door_vertices) door_center = door_poly.centroid door_area = door_poly.area if room_poly.contains(door_center): room2floor_capacity[room["id"]][1] += door_area * 0.6 if scene["open_walls"] != []: for open_wall_vertices in scene["open_walls"]["openWallBoxes"]: open_wall_poly = Polygon(open_wall_vertices) open_wall_center = open_wall_poly.centroid if room_poly.contains(open_wall_center): room2floor_capacity[room["id"]][1] += open_wall_poly.area * 0.6 return room2floor_capacity def update_wall_capacity(self, room2wall_capacity, scene): for room in scene["rooms"]: room_vertices = room["vertices"] room_poly = Polygon(room_vertices) for window in scene["windows"]: for window_vertices in window["windowBoxes"]: window_poly = Polygon(window_vertices) window_center = window_poly.centroid window_x = window_poly.bounds[2] - window_poly.bounds[0] window_y = window_poly.bounds[3] - window_poly.bounds[1] window_width = max(window_x, window_y) if room_poly.contains(window_center): room2wall_capacity[room["id"]][1] += window_width * 0.6 if scene["open_walls"] != []: for open_wall_vertices in scene["open_walls"]["openWallBoxes"]: open_wall_poly = Polygon(open_wall_vertices) open_wall_center = open_wall_poly.centroid open_wall_x = open_wall_poly.bounds[2] - open_wall_poly.bounds[0] open_wall_y = open_wall_poly.bounds[3] - open_wall_poly.bounds[1] open_wall_width = max(open_wall_x, open_wall_y) if room_poly.contains(open_wall_center): room2wall_capacity[room["id"]][1] += open_wall_width * 0.6 return room2wall_capacity def check_floor_placement(self, candidates, room_vertices, scene): room_x = max([vertex[0] for vertex in room_vertices]) - min([vertex[0] for vertex in room_vertices]) room_z = max([vertex[1] for vertex in room_vertices]) - min([vertex[1] for vertex in room_vertices]) grid_size = int(max(room_x // 20, room_z // 20)) solver = DFS_Solver_Floor(grid_size=grid_size) room_poly = Polygon(room_vertices) initial_state = self.get_initial_state_floor(room_vertices, scene, add_window=False) grid_points = solver.create_grids(room_poly) grid_points = solver.remove_points(grid_points, initial_state) valid_candidates = [] for candidate in candidates: object_size = self.database[candidate[0]]["assetMetadata"]["boundingBox"] object_dim = (object_size["x"]*100 + self.size_buffer, object_size["z"]*100 + self.size_buffer) solutions = solver.get_all_solutions(room_poly, grid_points, object_dim) solutions = solver.filter_collision(initial_state, solutions) solutions = solver.place_edge(room_poly, solutions, object_dim) if solutions != []: valid_candidates.append(candidate) else: print(f"Floor Object {candidate[0]} (size: {object_dim}) cannot be placed in room"); continue return valid_candidates def check_wall_placement(self, candidates, room_vertices, scene): room_x = max([vertex[0] for vertex in room_vertices]) - min([vertex[0] for vertex in room_vertices]) room_z = max([vertex[1] for vertex in room_vertices]) - min([vertex[1] for vertex in room_vertices]) grid_size = int(max(room_x // 20, room_z // 20))
class ObjectSelector: def __init__(self, object_retriever, llm): # object retriever self.object_retriever = object_retriever self.database = object_retriever.database # language model and prompt templates self.llm = llm self.object_selection_template_1 = prompts.object_selection_prompt_new_1 self.object_selection_template_2 = PromptTemplate(input_variables=["object_selection_prompt_new_1", "object_selection_1", "room"], template=prompts.object_selection_prompt_new_2) # hyperparameters self.floor_capacity_ratio = 0.4 self.wall_capacity_ratio = 0.5 self.object_size_tolerance = 0.8 self.similarity_threshold_floor = 31 # need to be tuned self.similarity_threshold_wall = 31 # need to be tuned self.thin_threshold = 3 self.used_assets = [] self.consider_size = True self.size_buffer = 10 self.random_selection = False self.reuse_selection = False self.multiprocessing = True def select_objects(self, scene, additional_requirements="N/A"): rooms_types = [room["roomType"] for room in scene["rooms"]] room2area = {room["roomType"]: self.get_room_area(room) for room in scene["rooms"]} room2size = {room["roomType"]: self.get_room_size(room, scene["wall_height"]) for room in scene["rooms"]} room2perimeter = {room["roomType"]: self.get_room_perimeter(room) for room in scene["rooms"]} room2vertices = {room["roomType"]: [(x * 100, y * 100) for (x, y) in room["vertices"]] for room in scene["rooms"]} room2floor_capacity = {room_type: [room_area * self.floor_capacity_ratio, 0] for room_type, room_area in room2area.items()} room2floor_capacity = self.update_floor_capacity(room2floor_capacity, scene) room2wall_capacity = {room_type: [room_perimeter * self.wall_capacity_ratio, 0] for room_type, room_perimeter in room2perimeter.items()} selected_objects = {room["roomType"]: {"floor": [], "wall": []} for room in scene["rooms"]} if "object_selection_plan" in scene: object_selection_plan = scene["object_selection_plan"] if self.reuse_selection: selected_objects = scene["selected_objects"] else: for room_type in rooms_types: floor_objects, _, wall_objects, _ = self.get_objects_by_room(object_selection_plan[room_type], scene, room2size[room_type], room2floor_capacity[room_type], room2wall_capacity[room_type], room2vertices[room_type]) selected_objects[room_type]["floor"] = floor_objects selected_objects[room_type]["wall"] = wall_objects else: object_selection_plan = {room["roomType"]: [] for room in scene["rooms"]} packed_args = [(room_type, scene, additional_requirements, room2size, room2floor_capacity, room2wall_capacity, room2vertices) for room_type in rooms_types] if self.multiprocessing: pool = multiprocessing.Pool(processes=4) results = pool.map(self.plan_room, packed_args) pool.close() pool.join() else: results = [self.plan_room(args) for args in packed_args] for room_type, result in results: selected_objects[room_type]["floor"] = result["floor"] selected_objects[room_type]["wall"] = result["wall"] object_selection_plan[room_type] = result["plan"] print(f"\n{Fore.GREEN}AI: Here is the object selection plan:\n{object_selection_plan}{Fore.RESET}") return object_selection_plan, selected_objects def plan_room(self, args): room_type, scene, additional_requirements, room2size, room2floor_capacity, room2wall_capacity, room2vertices = args print(f"\n{Fore.GREEN}AI: Selecting objects for {room_type}...{Fore.RESET}\n") result = {} room_size_str = f"{int(room2size[room_type][0])*100}cm in length, {int(room2size[room_type][1])*100}cm in width, {int(room2size[room_type][2])*100}cm in height" prompt_1 = self.object_selection_template_1.replace("INPUT", scene["query"]).replace("ROOM_TYPE", room_type).replace("ROOM_SIZE", room_size_str).replace("REQUIREMENTS", additional_requirements) # print(f"\nUser: {prompt_1}\n") output_1 = self.llm(prompt_1).lower() plan_1 = self.extract_json(output_1) if plan_1 is None: print(f"Error while extracting the JSON for {room_type}.") return result floor_objects, floor_capacity, wall_objects, wall_capacity = self.get_objects_by_room(plan_1, scene, room2size[room_type], room2floor_capacity[room_type], room2wall_capacity[room_type], room2vertices[room_type]) if floor_capacity[1] / floor_capacity[0] >= 0.8: result["floor"] = floor_objects result["wall"] = wall_objects result["plan"] = plan_1 else: print(f"{Fore.RED}AI: The floor capacity of {room_type} is {floor_capacity[1]}m^2, which is less than 70% of the total floor capacity {floor_capacity[0]}m^2.{Fore.RESET}") prompt_2 = self.object_selection_template_2.format(object_selection_prompt_new_1=prompt_1, object_selection_1=output_1, room=room_type) output_2 = self.llm(prompt_2).lower() plan_2 = self.extract_json(output_2) new_plan = copy.deepcopy(plan_1) for object in plan_2: new_plan[object] = plan_2[object] floor_objects, _, wall_objects, _ = self.get_objects_by_room(new_plan, scene, room2size[room_type], room2floor_capacity[room_type], room2wall_capacity[room_type], room2vertices[room_type]) result["floor"] = floor_objects result["wall"] = wall_objects result["plan"] = new_plan return room_type, result def extract_json(self, input_string): # Using regex to identify the JSON structure in the string json_match = re.search(r'{.*}', input_string, re.DOTALL) if json_match: extracted_json = json_match.group(0) try: # Convert the extracted JSON string into a Python dictionary json_dict = json.loads(extracted_json) json_dict = self.check_dict(json_dict) return json_dict except json.JSONDecodeError: print(input_string) print("Error while decoding the JSON.") return None else: print("No valid JSON found.") return None def check_dict(self, dict): valid = True attributes = ["description", "location", "size", "quantity", "variance_type", "objects_on_top"] for key, value in dict.items(): if not isinstance(key, str): valid = False; break if not isinstance(value, Dict): valid = False; break for attribute in attributes: if attribute not in value: valid = False; break if not isinstance(value["description"], str): valid = False; break if value["location"] not in ["floor", "wall"]: dict[key]["location"] = "floor" if not isinstance(value["size"], list) or len(value["size"]) != 3 or not all(isinstance(i, int) for i in value["size"]): dict[key]["size"] = None if not isinstance(value["quantity"], int): dict[key]["quantity"] = 1 if not isinstance(value["variance_type"], str) or value["variance_type"] not in ["same", "varied"]: dict[key]["variance_type"] = "same" if not isinstance(value["objects_on_top"], list): dict[key]["objects_on_top"] = [] for i, child in enumerate(value["objects_on_top"]): if not isinstance(child, Dict): valid = False; break for attribute in ["object_name", "quantity", "variance_type"]: if attribute not in child: valid = False; break if not isinstance(child["object_name"], str): valid = False; break if not isinstance(child["quantity"], int): dict[key]["objects_on_top"][i]["quantity"] = 1 if not isinstance(child["variance_type"], str) or child["variance_type"] not in ["same", "varied"]: dict[key]["objects_on_top"][i]["variance_type"] = "same" if not valid: return None else: return dict def get_objects_by_room(self, parsed_plan, scene, room_size, floor_capacity, wall_capacity, vertices): # get the floor and wall objects floor_object_list = [] wall_object_list = [] for object_name, object_info in parsed_plan.items(): object_info["object_name"] = object_name if object_info["location"] == "floor": floor_object_list.append(object_info) else: wall_object_list.append(object_info) floor_objects, floor_capacity = self.get_floor_objects(floor_object_list, floor_capacity, room_size, vertices, scene) wall_objects, wall_capacity = self.get_wall_objects(wall_object_list, wall_capacity, room_size, vertices, scene) return floor_objects, floor_capacity, wall_objects, wall_capacity def get_room_size(self, room, wall_height): floor_polygon = room["floorPolygon"] x_values = [point['x'] for point in floor_polygon] z_values = [point['z'] for point in floor_polygon] x_dim = max(x_values) - min(x_values) z_dim = max(z_values) - min(z_values) if x_dim > z_dim: return (x_dim, wall_height, z_dim) else: return (z_dim, wall_height, x_dim) def get_room_area(self, room): room_vertices = room["vertices"] room_polygon = Polygon(room_vertices) return room_polygon.area def get_room_perimeter(self, room): room_vertices = room["vertices"] room_polygon = Polygon(room_vertices) return room_polygon.length def get_floor_objects(self, floor_object_list, floor_capacity, room_size, room_vertices, scene): selected_floor_objects_all = [] for floor_object in floor_object_list: object_type = floor_object["object_name"] object_description = floor_object["description"] object_size = floor_object["size"] quantity = min(floor_object["quantity"], 10) variance_type = floor_object["variance_type"] candidates = self.object_retriever.retrieve([f"a 3D model of {object_type}, {object_description}"], self.similarity_threshold_floor) # check on floor objects candidates = [candidate for candidate in candidates if self.database[candidate[0]]["annotations"]["onFloor"] == True] # only select objects on the floor candidates = [candidate for candidate in candidates if self.database[candidate[0]]["annotations"]["onCeiling"] == False] # only select objects not on the ceiling # ignore doors and windows and frames candidates = [candidate for candidate in candidates if "door" not in self.database[candidate[0]]["annotations"]["category"].lower()] candidates = [candidate for candidate in candidates if "window" not in self.database[candidate[0]]["annotations"]["category"].lower()] candidates = [candidate for candidate in candidates if "frame" not in self.database[candidate[0]]["annotations"]["category"].lower()] # check if the object is too big candidates = self.check_object_size(candidates, room_size) # check if object can be placed on the floor candidates = self.check_floor_placement(candidates[:20], room_vertices, scene) # No candidates found if len(candidates) == 0: print("No candidates found for {} {}".format(object_type, object_description)); continue # remove used assets top_one_candidate = candidates[0] if len(candidates) > 1: candidates = [candidate for candidate in candidates if candidate[0] not in self.used_assets] if len(candidates) == 0: candidates = [top_one_candidate] # consider object size difference if object_size is not None and self.consider_size: candidates = self.object_retriever.compute_size_difference(object_size, candidates) candidates = candidates[:10] # only select top 10 candidates selected_asset_ids = [] if variance_type == "same": selected_candidate = self.random_select(candidates) selected_asset_id = selected_candidate[0] selected_asset_ids = [selected_asset_id] * quantity elif variance_type == "varied": for i in range(quantity): selected_candidate = self.random_select(candidates) selected_asset_id = selected_candidate[0] selected_asset_ids.append(selected_asset_id) if len(candidates) > 1: candidates.remove(selected_candidate) for i in range(quantity): selected_asset_id = selected_asset_ids[i] object_name = f"{object_type}-{i}" selected_floor_objects_all.append((object_name, selected_asset_id)) # reselect objects if they exceed floor capacity, consider the diversity of objects selected_floor_objects = [] while True: if len(selected_floor_objects_all) == 0: break current_selected_asset_ids = [] current_number_of_objects = len(selected_floor_objects) for object_name, selected_asset_id in selected_floor_objects_all: if selected_asset_id not in current_selected_asset_ids: selected_asset_size = self.database[selected_asset_id]["assetMetadata"]["boundingBox"] selected_asset_capacity = selected_asset_size["x"] * selected_asset_size["z"] if floor_capacity[1] + selected_asset_capacity > floor_capacity[0] and len(selected_floor_objects) > 0: print(f"{object_type} {object_description} exceeds floor capacity") else: current_selected_asset_ids.append(selected_asset_id) selected_floor_objects.append((object_name, selected_asset_id)) selected_floor_objects_all.remove((object_name, selected_asset_id)) floor_capacity = (floor_capacity[0], floor_capacity[1] + selected_asset_capacity) if len(selected_floor_objects) == current_number_of_objects: print("No more objects can be added"); break # sort objects by object type object_type2objects = {} for object_name, selected_asset_id in selected_floor_objects: object_type = object_name.split("-")[0] if object_type not in object_type2objects: object_type2objects[object_type] = [] object_type2objects[object_type].append((object_name, selected_asset_id)) selected_floor_objects_ordered = [] for object_type in object_type2objects: selected_floor_objects_ordered += sorted(object_type2objects[object_type]) return selected_floor_objects_ordered, floor_capacity def get_wall_objects(self, wall_object_list, wall_capacity, room_size, room_vertices, scene): selected_wall_objects_all = [] for wall_object in wall_object_list: object_type = wall_object["object_name"] object_description = wall_object["description"] object_size = wall_object["size"] quantity = min(wall_object["quantity"], 10) variance_type = wall_object["variance_type"] candidates = self.object_retriever.retrieve([f"a 3D model of {object_type}, {object_description}"], self.similarity_threshold_wall) # check on wall objects candidates = [candidate for candidate in candidates if self.database[candidate[0]]["annotations"]["onWall"] == True] # only select objects on the wall # ignore doors and windows candidates = [candidate for candidate in candidates if "door" not in self.database[candidate[0]]["annotations"]["category"].lower()] candidates = [candidate for candidate in candidates if "window" not in self.database[candidate[0]]["annotations"]["category"].lower()] # check if the object is too big candidates = self.check_object_size(candidates, room_size) # check thin objects candidates = self.check_thin_object(candidates) # check if object can be placed on the wall candidates = self.check_wall_placement(candidates[:20], room_vertices, scene) if len(candidates) == 0: print("No candidates found for {} {}".format(object_type, object_description)); continue # remove used assets top_one_candidate = candidates[0] if len(candidates) > 1: candidates = [candidate for candidate in candidates if candidate[0] not in self.used_assets] if len(candidates) == 0: candidates = [top_one_candidate] # consider object size difference if object_size is not None and self.consider_size: candidates = self.object_retriever.compute_size_difference(object_size, candidates) candidates = candidates[:10] # only select top 10 candidates selected_asset_ids = [] if variance_type == "same": selected_candidate = self.random_select(candidates) selected_asset_id = selected_candidate[0] selected_asset_ids = [selected_asset_id] * quantity elif variance_type == "varied": for i in range(quantity): selected_candidate = self.random_select(candidates) selected_asset_id = selected_candidate[0] selected_asset_ids.append(selected_asset_id) if len(candidates) > 1: candidates.remove(selected_candidate) for i in range(quantity): selected_asset_id = selected_asset_ids[i] object_name = f"{object_type}-{i}" selected_wall_objects_all.append((object_name, selected_asset_id)) # reselect objects if they exceed wall capacity, consider the diversity of objects selected_wall_objects = [] while True: if len(selected_wall_objects_all) == 0: break current_selected_asset_ids = [] current_number_of_objects = len(selected_wall_objects) for object_name, selected_asset_id in selected_wall_objects_all: if selected_asset_id not in current_selected_asset_ids: selected_asset_size = self.database[selected_asset_id]["assetMetadata"]["boundingBox"] selected_asset_capacity = selected_asset_size["x"] if wall_capacity[1] + selected_asset_capacity > wall_capacity[0] and len(selected_wall_objects) > 0: print(f"{object_type} {object_description} exceeds wall capacity") else: current_selected_asset_ids.append(selected_asset_id) selected_wall_objects.append((object_name, selected_asset_id)) selected_wall_objects_all.remove((object_name, selected_asset_id)) wall_capacity = (wall_capacity[0], wall_capacity[1] + selected_asset_capacity) if len(selected_wall_objects) == current_number_of_objects: print("No more objects can be added"); break # sort objects by object type object_type2objects = {} for object_name, selected_asset_id in selected_wall_objects: object_type = object_name.split("-")[0] if object_type not in object_type2objects: object_type2objects[object_type] = [] object_type2objects[object_type].append((object_name, selected_asset_id)) selected_wall_objects_ordered = [] for object_type in object_type2objects: selected_wall_objects_ordered += sorted(object_type2objects[object_type]) return selected_wall_objects_ordered, wall_capacity def check_object_size(self, candidates, room_size): valid_candidates = [] for candidate in candidates: dimension = self.database[candidate[0]]["assetMetadata"]["boundingBox"] size = [dimension["x"], dimension["y"], dimension["z"]] if size[2] > size[0]: size = [size[2], size[1], size[0]] # make sure that x > z if size[0] > room_size[0] * self.object_size_tolerance: continue if size[1] > room_size[1] * self.object_size_tolerance: continue if size[2] > room_size[2] * self.object_size_tolerance: continue if size[0] * size[2] > room_size[0] * room_size[2] * 0.5: continue # TODO: consider using the floor area instead of the room area valid_candidates.append(candidate) return valid_candidates def check_thin_object(self, candidates): valid_candidates = [] for candidate in candidates: dimension = self.database[candidate[0]]["assetMetadata"]["boundingBox"] size = [dimension["x"], dimension["y"], dimension["z"]] if size[2] > min(size[0], size[1]) * self.thin_threshold: continue valid_candidates.append(candidate) return valid_candidates def random_select(self, candidates): if self.random_selection: selected_candidate = random.choice(candidates) else: scores = [candidate[1] for candidate in candidates] scores_tensor = torch.Tensor(scores) probas = F.softmax(scores_tensor, dim=0) # TODO: consider using normalized scores selected_index = torch.multinomial(probas, 1).item() selected_candidate = candidates[selected_index] return selected_candidate def update_floor_capacity(self, room2floor_capacity, scene): for room in scene["rooms"]: room_vertices = room["vertices"] room_poly = Polygon(room_vertices) for door in scene["doors"]: for door_vertices in door["doorBoxes"]: door_poly = Polygon(door_vertices) door_center = door_poly.centroid door_area = door_poly.area if room_poly.contains(door_center): room2floor_capacity[room["id"]][1] += door_area * 0.6 if scene["open_walls"] != []: for open_wall_vertices in scene["open_walls"]["openWallBoxes"]: open_wall_poly = Polygon(open_wall_vertices) open_wall_center = open_wall_poly.centroid if room_poly.contains(open_wall_center): room2floor_capacity[room["id"]][1] += open_wall_poly.area * 0.6 return room2floor_capacity def update_wall_capacity(self, room2wall_capacity, scene): for room in scene["rooms"]: room_vertices = room["vertices"] room_poly = Polygon(room_vertices) for window in scene["windows"]: for window_vertices in window["windowBoxes"]: window_poly = Polygon(window_vertices) window_center = window_poly.centroid window_x = window_poly.bounds[2] - window_poly.bounds[0] window_y = window_poly.bounds[3] - window_poly.bounds[1] window_width = max(window_x, window_y) if room_poly.contains(window_center): room2wall_capacity[room["id"]][1] += window_width * 0.6 if scene["open_walls"] != []: for open_wall_vertices in scene["open_walls"]["openWallBoxes"]: open_wall_poly = Polygon(open_wall_vertices) open_wall_center = open_wall_poly.centroid open_wall_x = open_wall_poly.bounds[2] - open_wall_poly.bounds[0] open_wall_y = open_wall_poly.bounds[3] - open_wall_poly.bounds[1] open_wall_width = max(open_wall_x, open_wall_y) if room_poly.contains(open_wall_center): room2wall_capacity[room["id"]][1] += open_wall_width * 0.6 return room2wall_capacity def check_floor_placement(self, candidates, room_vertices, scene): room_x = max([vertex[0] for vertex in room_vertices]) - min([vertex[0] for vertex in room_vertices]) room_z = max([vertex[1] for vertex in room_vertices]) - min([vertex[1] for vertex in room_vertices]) grid_size = int(max(room_x // 20, room_z // 20)) solver = DFS_Solver_Floor(grid_size=grid_size) room_poly = Polygon(room_vertices) initial_state = self.get_initial_state_floor(room_vertices, scene, add_window=False) grid_points = solver.create_grids(room_poly) grid_points = solver.remove_points(grid_points, initial_state) valid_candidates = [] for candidate in candidates: object_size = self.database[candidate[0]]["assetMetadata"]["boundingBox"] object_dim = (object_size["x"]*100 + self.size_buffer, object_size["z"]*100 + self.size_buffer) solutions = solver.get_all_solutions(room_poly, grid_points, object_dim) solutions = solver.filter_collision(initial_state, solutions) solutions = solver.place_edge(room_poly, solutions, object_dim) if solutions != []: valid_candidates.append(candidate) else: print(f"Floor Object {candidate[0]} (size: {object_dim}) cannot be placed in room"); continue return valid_candidates def check_wall_placement(self, candidates, room_vertices, scene): room_x = max([vertex[0] for vertex in room_vertices]) - min([vertex[0] for vertex in room_vertices]) room_z = max([vertex[1] for vertex in room_vertices]) - min([vertex[1] for vertex in room_vertices]) grid_size = int(max(room_x // 20, room_z // 20))
solver = DFS_Solver_Wall(grid_size=grid_size)
1
2023-12-08 19:19:57+00:00
24k
modelscope/richdreamer
threestudio/models/geometry/tetrahedra_sdf_grid.py
[ { "identifier": "BaseExplicitGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseExplicitGeometry(BaseGeometry):\n @dataclass\n class Config(BaseGeometry.Config):\n radius: float = 1.0\n\n cfg: Config\n\n def configure(self) -> None:\n self.bbox: Float[Tensor, \"2 3\"]\n self.register_buffer(\n \"bbox\",\n torch.as_tensor(\n [\n [-self.cfg.radius, -self.cfg.radius, -self.cfg.radius],\n [self.cfg.radius, self.cfg.radius, self.cfg.radius],\n ],\n dtype=torch.float32,\n ),\n )" }, { "identifier": "BaseGeometry", "path": "threestudio/models/geometry/base.py", "snippet": "class BaseGeometry(BaseModule):\n @dataclass\n class Config(BaseModule.Config):\n pass\n\n cfg: Config\n\n @staticmethod\n def create_from(\n other: \"BaseGeometry\", cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> \"BaseGeometry\":\n raise TypeError(\n f\"Cannot create {BaseGeometry.__name__} from {other.__class__.__name__}\"\n )\n\n def export(self, *args, **kwargs) -> Dict[str, Any]:\n return {}" }, { "identifier": "contract_to_unisphere", "path": "threestudio/models/geometry/base.py", "snippet": "def contract_to_unisphere(\n x: Float[Tensor, \"... 3\"], bbox: Float[Tensor, \"2 3\"], unbounded: bool = False\n) -> Float[Tensor, \"... 3\"]:\n if unbounded:\n x = scale_tensor(x, bbox, (0, 1))\n x = x * 2 - 1 # aabb is at [-1, 1]\n mag = x.norm(dim=-1, keepdim=True)\n mask = mag.squeeze(-1) > 1\n x[mask] = (2 - 1 / mag[mask]) * (x[mask] / mag[mask])\n x = x / 4 + 0.5 # [-inf, inf] is at [0, 1]\n else:\n x = scale_tensor(x, bbox, (0, 1))\n return x" }, { "identifier": "ImplicitSDF", "path": "threestudio/models/geometry/implicit_sdf.py", "snippet": "class ImplicitSDF(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: Union[\n float, str\n ] = 0.01 # in [float, \"progressive\"]\n shape_init: Optional[str] = None\n shape_init_params: Optional[Any] = None\n shape_init_mesh_up: str = \"+z\"\n shape_init_mesh_front: str = \"+x\"\n force_shape_init: bool = False\n sdf_bias: Union[float, str] = 0.0\n sdf_bias_params: Optional[Any] = None\n\n # no need to removal outlier for SDF\n isosurface_remove_outliers: bool = False\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.sdf_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n if self.cfg.isosurface_deformable_grid:\n assert (\n self.cfg.isosurface_method == \"mt\"\n ), \"isosurface_deformable_grid only works with mt\"\n self.deformation_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n self.finite_difference_normal_eps: Optional[float] = None\n\n def initialize_shape(self) -> None:\n if self.cfg.shape_init is None and not self.cfg.force_shape_init:\n return\n\n # do not initialize shape if weights are provided\n if self.cfg.weights is not None and not self.cfg.force_shape_init:\n return\n\n if self.cfg.sdf_bias != 0.0:\n threestudio.warn(\n \"shape_init and sdf_bias are both specified, which may lead to unexpected results.\"\n )\n\n get_gt_sdf: Callable[[Float[Tensor, \"N 3\"]], Float[Tensor, \"N 1\"]]\n assert isinstance(self.cfg.shape_init, str)\n if self.cfg.shape_init == \"ellipsoid\":\n assert (\n isinstance(self.cfg.shape_init_params, Sized)\n and len(self.cfg.shape_init_params) == 3\n )\n size = torch.as_tensor(self.cfg.shape_init_params).to(self.device)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return ((points_rand / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n\n get_gt_sdf = func\n elif self.cfg.shape_init == \"sphere\":\n assert isinstance(self.cfg.shape_init_params, float)\n radius = self.cfg.shape_init_params\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius\n\n get_gt_sdf = func\n elif self.cfg.shape_init.startswith(\"mesh:\"):\n assert isinstance(self.cfg.shape_init_params, float)\n mesh_path = self.cfg.shape_init[5:]\n if not os.path.exists(mesh_path):\n raise ValueError(f\"Mesh file {mesh_path} does not exist.\")\n\n import trimesh\n\n scene = trimesh.load(mesh_path)\n if isinstance(scene, trimesh.Trimesh):\n mesh = scene\n elif isinstance(scene, trimesh.scene.Scene):\n mesh = trimesh.Trimesh()\n for obj in scene.geometry.values():\n mesh = trimesh.util.concatenate([mesh, obj])\n else:\n raise ValueError(f\"Unknown mesh type at {mesh_path}.\")\n\n # move to center\n centroid = mesh.vertices.mean(0)\n mesh.vertices = mesh.vertices - centroid\n\n # align to up-z and front-x\n dirs = [\"+x\", \"+y\", \"+z\", \"-x\", \"-y\", \"-z\"]\n dir2vec = {\n \"+x\": np.array([1, 0, 0]),\n \"+y\": np.array([0, 1, 0]),\n \"+z\": np.array([0, 0, 1]),\n \"-x\": np.array([-1, 0, 0]),\n \"-y\": np.array([0, -1, 0]),\n \"-z\": np.array([0, 0, -1]),\n }\n if (\n self.cfg.shape_init_mesh_up not in dirs\n or self.cfg.shape_init_mesh_front not in dirs\n ):\n raise ValueError(\n f\"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}.\"\n )\n if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]:\n raise ValueError(\n \"shape_init_mesh_up and shape_init_mesh_front must be orthogonal.\"\n )\n z_, x_ = (\n dir2vec[self.cfg.shape_init_mesh_up],\n dir2vec[self.cfg.shape_init_mesh_front],\n )\n y_ = np.cross(z_, x_)\n std2mesh = np.stack([x_, y_, z_], axis=0).T\n mesh2std = np.linalg.inv(std2mesh)\n\n # scaling\n scale = np.abs(mesh.vertices).max()\n mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params\n mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T\n\n from pysdf import SDF\n\n sdf = SDF(mesh.vertices, mesh.faces)\n\n def func(points_rand: Float[Tensor, \"N 3\"]) -> Float[Tensor, \"N 1\"]:\n # add a negative signed here\n # as in pysdf the inside of the shape has positive signed distance\n return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to(\n points_rand\n )[..., None]\n\n get_gt_sdf = func\n\n else:\n raise ValueError(\n f\"Unknown shape initialization type: {self.cfg.shape_init}\"\n )\n\n # Initialize SDF to a given shape when no weights are provided or force_shape_init is True\n optim = torch.optim.Adam(self.parameters(), lr=1e-3)\n from tqdm import tqdm\n\n for _ in tqdm(\n range(1000),\n desc=f\"Initializing SDF to a(n) {self.cfg.shape_init}:\",\n disable=get_rank() != 0,\n ):\n points_rand = (\n torch.rand((10000, 3), dtype=torch.float32).to(self.device) * 2.0 - 1.0\n )\n sdf_gt = get_gt_sdf(points_rand)\n sdf_pred = self.forward_sdf(points_rand)\n loss = F.mse_loss(sdf_pred, sdf_gt)\n optim.zero_grad()\n loss.backward()\n optim.step()\n\n # explicit broadcast to ensure param consistency across ranks\n for param in self.parameters():\n broadcast(param, src=0)\n\n def get_shifted_sdf(\n self, points: Float[Tensor, \"*N Di\"], sdf: Float[Tensor, \"*N 1\"]\n ) -> Float[Tensor, \"*N 1\"]:\n sdf_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.sdf_bias == \"ellipsoid\":\n assert (\n isinstance(self.cfg.sdf_bias_params, Sized)\n and len(self.cfg.sdf_bias_params) == 3\n )\n size = torch.as_tensor(self.cfg.sdf_bias_params).to(points)\n sdf_bias = ((points / size) ** 2).sum(\n dim=-1, keepdim=True\n ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid\n elif self.cfg.sdf_bias == \"sphere\":\n assert isinstance(self.cfg.sdf_bias_params, float)\n radius = self.cfg.sdf_bias_params\n sdf_bias = (points**2).sum(dim=-1, keepdim=True).sqrt() - radius\n elif isinstance(self.cfg.sdf_bias, float):\n sdf_bias = self.cfg.sdf_bias\n else:\n raise ValueError(f\"Unknown sdf bias {self.cfg.sdf_bias}\")\n return sdf + sdf_bias\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).view(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n output = {\"sdf\": sdf}\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n assert self.finite_difference_normal_eps is not None\n eps: float = self.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 6 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (\n 0.5\n * (sdf_offset[..., 0::2, 0] - sdf_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n sdf_offset: Float[Tensor, \"... 3 1\"] = self.forward_sdf(\n points_offset\n )\n sdf_grad = (sdf_offset[..., 0::1, 0] - sdf) / eps\n normal = F.normalize(sdf_grad, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n sdf_grad = normal\n elif self.cfg.normal_type == \"analytic\":\n sdf_grad = -torch.autograd.grad(\n sdf,\n points_unscaled,\n grad_outputs=torch.ones_like(sdf),\n create_graph=True,\n )[0]\n normal = F.normalize(sdf_grad, dim=-1)\n if not grad_enabled:\n sdf_grad = sdf_grad.detach()\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update(\n {\"normal\": normal, \"shading_normal\": normal, \"sdf_grad\": sdf_grad}\n )\n return output\n\n def forward_sdf(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n sdf = self.sdf_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n return sdf\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n sdf = self.sdf_network(enc).reshape(*points.shape[:-1], 1)\n sdf = self.get_shifted_sdf(points_unscaled, sdf)\n deformation: Optional[Float[Tensor, \"*N 3\"]] = None\n if self.cfg.isosurface_deformable_grid:\n deformation = self.deformation_network(enc).reshape(*points.shape[:-1], 3)\n return sdf, deformation\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return field - threshold\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n def update_step(self, epoch: int, global_step: int, on_load_weights: bool = False):\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n if isinstance(self.cfg.finite_difference_normal_eps, float):\n self.finite_difference_normal_eps = (\n self.cfg.finite_difference_normal_eps\n )\n elif self.cfg.finite_difference_normal_eps == \"progressive\":\n # progressive finite difference eps from Neuralangelo\n # https://arxiv.org/abs/2306.03092\n hg_conf: Any = self.cfg.pos_encoding_config\n assert (\n hg_conf.otype == \"ProgressiveBandHashGrid\"\n ), \"finite_difference_normal_eps=progressive only works with ProgressiveBandHashGrid\"\n current_level = min(\n hg_conf.start_level\n + max(global_step - hg_conf.start_step, 0) // hg_conf.update_steps,\n hg_conf.n_levels,\n )\n grid_res = hg_conf.base_resolution * hg_conf.per_level_scale ** (\n current_level - 1\n )\n grid_size = 2 * self.cfg.radius / grid_res\n if grid_size != self.finite_difference_normal_eps:\n threestudio.info(\n f\"Update finite_difference_normal_eps to {grid_size}\"\n )\n self.finite_difference_normal_eps = grid_size\n else:\n raise ValueError(\n f\"Unknown finite_difference_normal_eps={self.cfg.finite_difference_normal_eps}\"\n )\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, **kwargs\n ) -> BaseGeometry:\n return other" }, { "identifier": "ImplicitVolume", "path": "threestudio/models/geometry/implicit_volume.py", "snippet": "class ImplicitVolume(BaseImplicitGeometry):\n @dataclass\n class Config(BaseImplicitGeometry.Config):\n n_input_dims: int = 3\n n_feature_dims: int = 3\n density_activation: Optional[str] = \"softplus\"\n density_bias: Union[float, str] = \"blob_magic3d\"\n density_blob_scale: float = 10.0\n density_blob_std: float = 0.5\n pos_encoding_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"HashGrid\",\n \"n_levels\": 16,\n \"n_features_per_level\": 2,\n \"log2_hashmap_size\": 19,\n \"base_resolution\": 16,\n \"per_level_scale\": 1.447269237440378,\n }\n )\n mlp_network_config: dict = field(\n default_factory=lambda: {\n \"otype\": \"VanillaMLP\",\n \"activation\": \"ReLU\",\n \"output_activation\": \"none\",\n \"n_neurons\": 64,\n \"n_hidden_layers\": 1,\n }\n )\n normal_type: Optional[\n str\n ] = \"finite_difference\" # in ['pred', 'finite_difference', 'finite_difference_laplacian']\n finite_difference_normal_eps: float = 0.01\n\n # automatically determine the threshold\n isosurface_threshold: Union[float, str] = 25.0\n\n cfg: Config\n\n def configure(self) -> None:\n super().configure()\n self.encoding = get_encoding(\n self.cfg.n_input_dims, self.cfg.pos_encoding_config\n )\n self.density_network = get_mlp(\n self.encoding.n_output_dims, 1, self.cfg.mlp_network_config\n )\n if self.cfg.n_feature_dims > 0:\n self.feature_network = get_mlp(\n self.encoding.n_output_dims,\n self.cfg.n_feature_dims,\n self.cfg.mlp_network_config,\n )\n if self.cfg.normal_type == \"pred\":\n self.normal_network = get_mlp(\n self.encoding.n_output_dims, 3, self.cfg.mlp_network_config\n )\n\n def get_activated_density(\n self, points: Float[Tensor, \"*N Di\"], density: Float[Tensor, \"*N 1\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Float[Tensor, \"*N 1\"]]:\n density_bias: Union[float, Float[Tensor, \"*N 1\"]]\n if self.cfg.density_bias == \"blob_dreamfusion\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * torch.exp(\n -0.5 * (points**2).sum(dim=-1) / self.cfg.density_blob_std**2\n )[..., None]\n )\n elif self.cfg.density_bias == \"blob_magic3d\":\n # pre-activation density bias\n density_bias = (\n self.cfg.density_blob_scale\n * (\n 1\n - torch.sqrt((points**2).sum(dim=-1)) / self.cfg.density_blob_std\n )[..., None]\n )\n elif isinstance(self.cfg.density_bias, float):\n density_bias = self.cfg.density_bias\n else:\n raise ValueError(f\"Unknown density bias {self.cfg.density_bias}\")\n raw_density: Float[Tensor, \"*N 1\"] = density + density_bias\n density = get_activation(self.cfg.density_activation)(raw_density)\n return raw_density, density\n\n def initialize_shape(self) -> None:\n pass\n\n def forward(\n self, points: Float[Tensor, \"*N Di\"], output_normal: bool = False\n ) -> Dict[str, Float[Tensor, \"...\"]]:\n grad_enabled = torch.is_grad_enabled()\n\n if output_normal and self.cfg.normal_type == \"analytic\":\n torch.set_grad_enabled(True)\n points.requires_grad_(True)\n\n points_unscaled = points # points in the original scale\n points = contract_to_unisphere(\n points, self.bbox, self.unbounded\n ) # points normalized to (0, 1)\n\n enc = self.encoding(points.view(-1, self.cfg.n_input_dims))\n density = self.density_network(enc).view(*points.shape[:-1], 1)\n raw_density, density = self.get_activated_density(points_unscaled, density)\n\n output = {\n \"density\": density,\n \"points_scaled\": points.view(-1, self.cfg.n_input_dims),\n }\n\n if self.cfg.n_feature_dims > 0:\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n output.update({\"features\": features})\n\n if output_normal:\n if (\n self.cfg.normal_type == \"finite_difference\"\n or self.cfg.normal_type == \"finite_difference_laplacian\"\n ):\n # TODO: use raw density\n eps = self.cfg.finite_difference_normal_eps\n if self.cfg.normal_type == \"finite_difference_laplacian\":\n offsets: Float[Tensor, \"6 3\"] = torch.as_tensor(\n [\n [eps, 0.0, 0.0],\n [-eps, 0.0, 0.0],\n [0.0, eps, 0.0],\n [0.0, -eps, 0.0],\n [0.0, 0.0, eps],\n [0.0, 0.0, -eps],\n ]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 6 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 6 1\"] = self.forward_density(\n points_offset\n )\n normal = (\n -0.5\n * (density_offset[..., 0::2, 0] - density_offset[..., 1::2, 0])\n / eps\n )\n else:\n offsets: Float[Tensor, \"3 3\"] = torch.as_tensor(\n [[eps, 0.0, 0.0], [0.0, eps, 0.0], [0.0, 0.0, eps]]\n ).to(points_unscaled)\n points_offset: Float[Tensor, \"... 3 3\"] = (\n points_unscaled[..., None, :] + offsets\n ).clamp(-self.cfg.radius, self.cfg.radius)\n density_offset: Float[Tensor, \"... 3 1\"] = self.forward_density(\n points_offset\n )\n normal = -(density_offset[..., 0::1, 0] - density) / eps\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"pred\":\n normal = self.normal_network(enc).view(*points.shape[:-1], 3)\n normal = F.normalize(normal, dim=-1)\n elif self.cfg.normal_type == \"analytic\":\n normal = -torch.autograd.grad(\n density,\n points_unscaled,\n grad_outputs=torch.ones_like(density),\n create_graph=True,\n )[0]\n normal = F.normalize(normal, dim=-1)\n if not grad_enabled:\n normal = normal.detach()\n else:\n raise AttributeError(f\"Unknown normal type {self.cfg.normal_type}\")\n output.update({\"normal\": normal, \"shading_normal\": normal})\n\n torch.set_grad_enabled(grad_enabled)\n return output\n\n def forward_density(self, points: Float[Tensor, \"*N Di\"]) -> Float[Tensor, \"*N 1\"]:\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n\n density = self.density_network(\n self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n ).reshape(*points.shape[:-1], 1)\n\n _, density = self.get_activated_density(points_unscaled, density)\n return density\n\n def forward_field(\n self, points: Float[Tensor, \"*N Di\"]\n ) -> Tuple[Float[Tensor, \"*N 1\"], Optional[Float[Tensor, \"*N 3\"]]]:\n if self.cfg.isosurface_deformable_grid:\n threestudio.warn(\n f\"{self.__class__.__name__} does not support isosurface_deformable_grid. Ignoring.\"\n )\n density = self.forward_density(points)\n return density, None\n\n def forward_level(\n self, field: Float[Tensor, \"*N 1\"], threshold: float\n ) -> Float[Tensor, \"*N 1\"]:\n return -(field - threshold)\n\n def export(self, points: Float[Tensor, \"*N Di\"], **kwargs) -> Dict[str, Any]:\n out: Dict[str, Any] = {}\n if self.cfg.n_feature_dims == 0:\n return out\n points_unscaled = points\n points = contract_to_unisphere(points_unscaled, self.bbox, self.unbounded)\n enc = self.encoding(points.reshape(-1, self.cfg.n_input_dims))\n features = self.feature_network(enc).view(\n *points.shape[:-1], self.cfg.n_feature_dims\n )\n out.update(\n {\n \"features\": features,\n }\n )\n return out\n\n @staticmethod\n @torch.no_grad()\n def create_from(\n other: BaseGeometry,\n cfg: Optional[Union[dict, DictConfig]] = None,\n copy_net: bool = True,\n **kwargs,\n ) -> \"ImplicitVolume\":\n if isinstance(other, ImplicitVolume):\n instance = ImplicitVolume(cfg, **kwargs)\n instance.encoding.load_state_dict(other.encoding.state_dict())\n instance.density_network.load_state_dict(other.density_network.state_dict())\n if copy_net:\n if (\n instance.cfg.n_feature_dims > 0\n and other.cfg.n_feature_dims == instance.cfg.n_feature_dims\n ):\n instance.feature_network.load_state_dict(\n other.feature_network.state_dict()\n )\n if (\n instance.cfg.normal_type == \"pred\"\n and other.cfg.normal_type == \"pred\"\n ):\n instance.normal_network.load_state_dict(\n other.normal_network.state_dict()\n )\n return instance\n else:\n raise TypeError(\n f\"Cannot create {ImplicitVolume.__name__} from {other.__class__.__name__}\"\n )" }, { "identifier": "MarchingTetrahedraHelper", "path": "threestudio/models/isosurface.py", "snippet": "class MarchingTetrahedraHelper(IsosurfaceHelper):\n def __init__(self, resolution: int, tets_path: str):\n super().__init__()\n self.resolution = resolution\n self.tets_path = tets_path\n\n self.triangle_table: Float[Tensor, \"...\"]\n self.register_buffer(\n \"triangle_table\",\n torch.as_tensor(\n [\n [-1, -1, -1, -1, -1, -1],\n [1, 0, 2, -1, -1, -1],\n [4, 0, 3, -1, -1, -1],\n [1, 4, 2, 1, 3, 4],\n [3, 1, 5, -1, -1, -1],\n [2, 3, 0, 2, 5, 3],\n [1, 4, 0, 1, 5, 4],\n [4, 2, 5, -1, -1, -1],\n [4, 5, 2, -1, -1, -1],\n [4, 1, 0, 4, 5, 1],\n [3, 2, 0, 3, 5, 2],\n [1, 3, 5, -1, -1, -1],\n [4, 1, 2, 4, 3, 1],\n [3, 0, 4, -1, -1, -1],\n [2, 0, 1, -1, -1, -1],\n [-1, -1, -1, -1, -1, -1],\n ],\n dtype=torch.long,\n ),\n persistent=False,\n )\n self.num_triangles_table: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"num_triangles_table\",\n torch.as_tensor(\n [0, 1, 1, 2, 1, 2, 2, 1, 1, 2, 2, 1, 2, 1, 1, 0], dtype=torch.long\n ),\n persistent=False,\n )\n self.base_tet_edges: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"base_tet_edges\",\n torch.as_tensor([0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3], dtype=torch.long),\n persistent=False,\n )\n\n tets = np.load(self.tets_path)\n self._grid_vertices: Float[Tensor, \"...\"]\n self.register_buffer(\n \"_grid_vertices\",\n torch.from_numpy(tets[\"vertices\"]).float(),\n persistent=False,\n )\n self.indices: Integer[Tensor, \"...\"]\n self.register_buffer(\n \"indices\", torch.from_numpy(tets[\"indices\"]).long(), persistent=False\n )\n\n self._all_edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n\n def normalize_grid_deformation(\n self, grid_vertex_offsets: Float[Tensor, \"Nv 3\"]\n ) -> Float[Tensor, \"Nv 3\"]:\n return (\n (self.points_range[1] - self.points_range[0])\n / (self.resolution) # half tet size is approximately 1 / self.resolution\n * torch.tanh(grid_vertex_offsets)\n ) # FIXME: hard-coded activation\n\n @property\n def grid_vertices(self) -> Float[Tensor, \"Nv 3\"]:\n return self._grid_vertices\n\n @property\n def all_edges(self) -> Integer[Tensor, \"Ne 2\"]:\n if self._all_edges is None:\n # compute edges on GPU, or it would be VERY SLOW (basically due to the unique operation)\n edges = torch.tensor(\n [0, 1, 0, 2, 0, 3, 1, 2, 1, 3, 2, 3],\n dtype=torch.long,\n device=self.indices.device,\n )\n _all_edges = self.indices[:, edges].reshape(-1, 2)\n _all_edges_sorted = torch.sort(_all_edges, dim=1)[0]\n _all_edges = torch.unique(_all_edges_sorted, dim=0)\n self._all_edges = _all_edges\n return self._all_edges\n\n def sort_edges(self, edges_ex2):\n with torch.no_grad():\n order = (edges_ex2[:, 0] > edges_ex2[:, 1]).long()\n order = order.unsqueeze(dim=1)\n\n a = torch.gather(input=edges_ex2, index=order, dim=1)\n b = torch.gather(input=edges_ex2, index=1 - order, dim=1)\n\n return torch.stack([a, b], -1)\n\n def _forward(self, pos_nx3, sdf_n, tet_fx4):\n with torch.no_grad():\n occ_n = sdf_n > 0\n occ_fx4 = occ_n[tet_fx4.reshape(-1)].reshape(-1, 4)\n occ_sum = torch.sum(occ_fx4, -1)\n valid_tets = (occ_sum > 0) & (occ_sum < 4)\n occ_sum = occ_sum[valid_tets]\n\n # find all vertices\n all_edges = tet_fx4[valid_tets][:, self.base_tet_edges].reshape(-1, 2)\n all_edges = self.sort_edges(all_edges)\n unique_edges, idx_map = torch.unique(all_edges, dim=0, return_inverse=True)\n\n unique_edges = unique_edges.long()\n mask_edges = occ_n[unique_edges.reshape(-1)].reshape(-1, 2).sum(-1) == 1\n mapping = (\n torch.ones(\n (unique_edges.shape[0]), dtype=torch.long, device=pos_nx3.device\n )\n * -1\n )\n mapping[mask_edges] = torch.arange(\n mask_edges.sum(), dtype=torch.long, device=pos_nx3.device\n )\n idx_map = mapping[idx_map] # map edges to verts\n\n interp_v = unique_edges[mask_edges]\n edges_to_interp = pos_nx3[interp_v.reshape(-1)].reshape(-1, 2, 3)\n edges_to_interp_sdf = sdf_n[interp_v.reshape(-1)].reshape(-1, 2, 1)\n edges_to_interp_sdf[:, -1] *= -1\n\n denominator = edges_to_interp_sdf.sum(1, keepdim=True)\n\n edges_to_interp_sdf = torch.flip(edges_to_interp_sdf, [1]) / denominator\n verts = (edges_to_interp * edges_to_interp_sdf).sum(1)\n\n idx_map = idx_map.reshape(-1, 6)\n\n v_id = torch.pow(2, torch.arange(4, dtype=torch.long, device=pos_nx3.device))\n tetindex = (occ_fx4[valid_tets] * v_id.unsqueeze(0)).sum(-1)\n num_triangles = self.num_triangles_table[tetindex]\n\n # Generate triangle indices\n faces = torch.cat(\n (\n torch.gather(\n input=idx_map[num_triangles == 1],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 1]][:, :3],\n ).reshape(-1, 3),\n torch.gather(\n input=idx_map[num_triangles == 2],\n dim=1,\n index=self.triangle_table[tetindex[num_triangles == 2]][:, :6],\n ).reshape(-1, 3),\n ),\n dim=0,\n )\n\n return verts, faces\n\n def forward(\n self,\n level: Float[Tensor, \"N3 1\"],\n deformation: Optional[Float[Tensor, \"N3 3\"]] = None,\n ) -> Mesh:\n if deformation is not None:\n grid_vertices = self.grid_vertices + self.normalize_grid_deformation(\n deformation\n )\n else:\n grid_vertices = self.grid_vertices\n\n v_pos, t_pos_idx = self._forward(grid_vertices, level, self.indices)\n\n mesh = Mesh(\n v_pos=v_pos,\n t_pos_idx=t_pos_idx,\n # extras\n grid_vertices=grid_vertices,\n tet_edges=self.all_edges,\n grid_level=level,\n grid_deformation=deformation,\n )\n\n return mesh" }, { "identifier": "Mesh", "path": "threestudio/models/mesh.py", "snippet": "class Mesh:\n def __init__(\n self, v_pos: Float[Tensor, \"Nv 3\"], t_pos_idx: Integer[Tensor, \"Nf 3\"], **kwargs\n ) -> None:\n self.v_pos: Float[Tensor, \"Nv 3\"] = v_pos\n self.t_pos_idx: Integer[Tensor, \"Nf 3\"] = t_pos_idx\n self._v_nrm: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tng: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._v_tex: Optional[Float[Tensor, \"Nt 3\"]] = None\n self._t_tex_idx: Optional[Float[Tensor, \"Nf 3\"]] = None\n self._v_rgb: Optional[Float[Tensor, \"Nv 3\"]] = None\n self._edges: Optional[Integer[Tensor, \"Ne 2\"]] = None\n self.extras: Dict[str, Any] = {}\n for k, v in kwargs.items():\n self.add_extra(k, v)\n\n def add_extra(self, k, v) -> None:\n self.extras[k] = v\n\n def remove_outlier(self, outlier_n_faces_threshold: Union[int, float]) -> Mesh:\n if self.requires_grad:\n threestudio.debug(\"Mesh is differentiable, not removing outliers\")\n return self\n\n # use trimesh to first split the mesh into connected components\n # then remove the components with less than n_face_threshold faces\n import trimesh\n\n # construct a trimesh object\n mesh = trimesh.Trimesh(\n vertices=self.v_pos.detach().cpu().numpy(),\n faces=self.t_pos_idx.detach().cpu().numpy(),\n )\n\n # split the mesh into connected components\n components = mesh.split(only_watertight=False)\n # log the number of faces in each component\n threestudio.debug(\n \"Mesh has {} components, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n\n n_faces_threshold: int\n if isinstance(outlier_n_faces_threshold, float):\n # set the threshold to the number of faces in the largest component multiplied by outlier_n_faces_threshold\n n_faces_threshold = int(\n max([c.faces.shape[0] for c in components]) * outlier_n_faces_threshold\n )\n else:\n # set the threshold directly to outlier_n_faces_threshold\n n_faces_threshold = outlier_n_faces_threshold\n\n # log the threshold\n threestudio.debug(\n \"Removing components with less than {} faces\".format(n_faces_threshold)\n )\n\n # remove the components with less than n_face_threshold faces\n components = [c for c in components if c.faces.shape[0] >= n_faces_threshold]\n\n # log the number of faces in each component after removing outliers\n threestudio.debug(\n \"Mesh has {} components after removing outliers, with faces: {}\".format(\n len(components), [c.faces.shape[0] for c in components]\n )\n )\n # merge the components\n mesh = trimesh.util.concatenate(components)\n\n # convert back to our mesh format\n v_pos = torch.from_numpy(mesh.vertices).to(self.v_pos)\n t_pos_idx = torch.from_numpy(mesh.faces).to(self.t_pos_idx)\n\n clean_mesh = Mesh(v_pos, t_pos_idx)\n # keep the extras unchanged\n\n if len(self.extras) > 0:\n clean_mesh.extras = self.extras\n threestudio.debug(\n f\"The following extra attributes are inherited from the original mesh unchanged: {list(self.extras.keys())}\"\n )\n return clean_mesh\n\n @property\n def requires_grad(self):\n return self.v_pos.requires_grad\n\n @property\n def v_nrm(self):\n if self._v_nrm is None:\n self._v_nrm = self._compute_vertex_normal()\n return self._v_nrm\n\n @property\n def v_tng(self):\n if self._v_tng is None:\n self._v_tng = self._compute_vertex_tangent()\n return self._v_tng\n\n @property\n def v_tex(self):\n if self._v_tex is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._v_tex\n\n @property\n def t_tex_idx(self):\n if self._t_tex_idx is None:\n self._v_tex, self._t_tex_idx = self._unwrap_uv()\n return self._t_tex_idx\n\n @property\n def v_rgb(self):\n return self._v_rgb\n\n @property\n def edges(self):\n if self._edges is None:\n self._edges = self._compute_edges()\n return self._edges\n\n def _compute_vertex_normal(self):\n i0 = self.t_pos_idx[:, 0]\n i1 = self.t_pos_idx[:, 1]\n i2 = self.t_pos_idx[:, 2]\n\n v0 = self.v_pos[i0, :]\n v1 = self.v_pos[i1, :]\n v2 = self.v_pos[i2, :]\n\n face_normals = torch.cross(v1 - v0, v2 - v0)\n\n # Splat face normals to vertices\n v_nrm = torch.zeros_like(self.v_pos)\n v_nrm.scatter_add_(0, i0[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i1[:, None].repeat(1, 3), face_normals)\n v_nrm.scatter_add_(0, i2[:, None].repeat(1, 3), face_normals)\n\n # Normalize, replace zero (degenerated) normals with some default value\n v_nrm = torch.where(\n dot(v_nrm, v_nrm) > 1e-20, v_nrm, torch.as_tensor([0.0, 0.0, 1.0]).to(v_nrm)\n )\n v_nrm = F.normalize(v_nrm, dim=1)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(v_nrm))\n\n return v_nrm\n\n def _compute_vertex_tangent(self):\n vn_idx = [None] * 3\n pos = [None] * 3\n tex = [None] * 3\n for i in range(0, 3):\n pos[i] = self.v_pos[self.t_pos_idx[:, i]]\n tex[i] = self.v_tex[self.t_tex_idx[:, i]]\n # t_nrm_idx is always the same as t_pos_idx\n vn_idx[i] = self.t_pos_idx[:, i]\n\n tangents = torch.zeros_like(self.v_nrm)\n tansum = torch.zeros_like(self.v_nrm)\n\n # Compute tangent space for each triangle\n uve1 = tex[1] - tex[0]\n uve2 = tex[2] - tex[0]\n pe1 = pos[1] - pos[0]\n pe2 = pos[2] - pos[0]\n\n nom = pe1 * uve2[..., 1:2] - pe2 * uve1[..., 1:2]\n denom = uve1[..., 0:1] * uve2[..., 1:2] - uve1[..., 1:2] * uve2[..., 0:1]\n\n # Avoid division by zero for degenerated texture coordinates\n tang = nom / torch.where(\n denom > 0.0, torch.clamp(denom, min=1e-6), torch.clamp(denom, max=-1e-6)\n )\n\n # Update all 3 vertices\n for i in range(0, 3):\n idx = vn_idx[i][:, None].repeat(1, 3)\n tangents.scatter_add_(0, idx, tang) # tangents[n_i] = tangents[n_i] + tang\n tansum.scatter_add_(\n 0, idx, torch.ones_like(tang)\n ) # tansum[n_i] = tansum[n_i] + 1\n tangents = tangents / tansum\n\n # Normalize and make sure tangent is perpendicular to normal\n tangents = F.normalize(tangents, dim=1)\n tangents = F.normalize(tangents - dot(tangents, self.v_nrm) * self.v_nrm)\n\n if torch.is_anomaly_enabled():\n assert torch.all(torch.isfinite(tangents))\n\n return tangents\n\n def _unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n threestudio.info(\"Using xatlas to perform UV unwrapping, may take a while ...\")\n\n import xatlas\n\n atlas = xatlas.Atlas()\n atlas.add_mesh(\n self.v_pos.detach().cpu().numpy(),\n self.t_pos_idx.cpu().numpy(),\n )\n co = xatlas.ChartOptions()\n po = xatlas.PackOptions()\n for k, v in xatlas_chart_options.items():\n setattr(co, k, v)\n for k, v in xatlas_pack_options.items():\n setattr(po, k, v)\n atlas.generate(co, po)\n vmapping, indices, uvs = atlas.get_mesh(0)\n vmapping = (\n torch.from_numpy(\n vmapping.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n uvs = torch.from_numpy(uvs).to(self.v_pos.device).float()\n indices = (\n torch.from_numpy(\n indices.astype(np.uint64, casting=\"same_kind\").view(np.int64)\n )\n .to(self.v_pos.device)\n .long()\n )\n return uvs, indices\n\n def unwrap_uv(\n self, xatlas_chart_options: dict = {}, xatlas_pack_options: dict = {}\n ):\n self._v_tex, self._t_tex_idx = self._unwrap_uv(\n xatlas_chart_options, xatlas_pack_options\n )\n\n def set_vertex_color(self, v_rgb):\n assert v_rgb.shape[0] == self.v_pos.shape[0]\n self._v_rgb = v_rgb\n\n def _compute_edges(self):\n # Compute edges\n edges = torch.cat(\n [\n self.t_pos_idx[:, [0, 1]],\n self.t_pos_idx[:, [1, 2]],\n self.t_pos_idx[:, [2, 0]],\n ],\n dim=0,\n )\n edges = edges.sort()[0]\n edges = torch.unique(edges, dim=0)\n return edges\n\n def normal_consistency(self) -> Float[Tensor, \"\"]:\n edge_nrm: Float[Tensor, \"Ne 2 3\"] = self.v_nrm[self.edges]\n nc = (\n 1.0 - torch.cosine_similarity(edge_nrm[:, 0], edge_nrm[:, 1], dim=-1)\n ).mean()\n return nc\n\n def _laplacian_uniform(self):\n # from stable-dreamfusion\n # https://github.com/ashawkey/stable-dreamfusion/blob/8fb3613e9e4cd1ded1066b46e80ca801dfb9fd06/nerf/renderer.py#L224\n verts, faces = self.v_pos, self.t_pos_idx\n\n V = verts.shape[0]\n F = faces.shape[0]\n\n # Neighbor indices\n ii = faces[:, [1, 2, 0]].flatten()\n jj = faces[:, [2, 0, 1]].flatten()\n adj = torch.stack([torch.cat([ii, jj]), torch.cat([jj, ii])], dim=0).unique(\n dim=1\n )\n adj_values = torch.ones(adj.shape[1]).to(verts)\n\n # Diagonal indices\n diag_idx = adj[0]\n\n # Build the sparse matrix\n idx = torch.cat((adj, torch.stack((diag_idx, diag_idx), dim=0)), dim=1)\n values = torch.cat((-adj_values, adj_values))\n\n # The coalesce operation sums the duplicate indices, resulting in the\n # correct diagonal\n return torch.sparse_coo_tensor(idx, values, (V, V)).coalesce()\n\n def laplacian(self) -> Float[Tensor, \"\"]:\n with torch.no_grad():\n L = self._laplacian_uniform()\n loss = L.mm(self.v_pos)\n loss = loss.norm(dim=1)\n loss = loss.mean()\n return loss" }, { "identifier": "get_encoding", "path": "threestudio/models/networks.py", "snippet": "def get_encoding(n_input_dims: int, config) -> nn.Module:\n # input suppose to be range [0, 1]\n encoding: nn.Module\n if config.otype == \"ProgressiveBandFrequency\":\n encoding = ProgressiveBandFrequency(n_input_dims, config_to_primitive(config))\n elif config.otype == \"ProgressiveBandHashGrid\":\n encoding = ProgressiveBandHashGrid(n_input_dims, config_to_primitive(config))\n else:\n encoding = TCNNEncoding(n_input_dims, config_to_primitive(config))\n encoding = CompositeEncoding(\n encoding,\n include_xyz=config.get(\"include_xyz\", False),\n xyz_scale=2.0,\n xyz_offset=-1.0,\n ) # FIXME: hard coded\n return encoding" }, { "identifier": "get_mlp", "path": "threestudio/models/networks.py", "snippet": "def get_mlp(n_input_dims, n_output_dims, config) -> nn.Module:\n network: nn.Module\n if config.otype == \"VanillaMLP\":\n network = VanillaMLP(n_input_dims, n_output_dims, config_to_primitive(config))\n elif config.otype == \"SphereInitVanillaMLP\":\n network = SphereInitVanillaMLP(\n n_input_dims, n_output_dims, config_to_primitive(config)\n )\n else:\n assert (\n config.get(\"sphere_init\", False) is False\n ), \"sphere_init=True only supported by VanillaMLP\"\n network = TCNNNetwork(n_input_dims, n_output_dims, config_to_primitive(config))\n return network" }, { "identifier": "broadcast", "path": "threestudio/utils/misc.py", "snippet": "def broadcast(tensor, src=0):\n if not _distributed_available():\n return tensor\n else:\n torch.distributed.broadcast(tensor, src=src)\n return tensor" }, { "identifier": "get_rank", "path": "threestudio/utils/misc.py", "snippet": "def get_rank():\n # SLURM_PROCID can be set even if SLURM is not managing the multiprocessing,\n # therefore LOCAL_RANK needs to be checked first\n rank_keys = (\"RANK\", \"LOCAL_RANK\", \"SLURM_PROCID\", \"JSM_NAMESPACE_RANK\")\n for key in rank_keys:\n rank = os.environ.get(key)\n if rank is not None:\n return int(rank)\n return 0" }, { "identifier": "scale_tensor", "path": "threestudio/utils/ops.py", "snippet": "def scale_tensor(\n dat: Num[Tensor, \"... D\"], inp_scale: ValidScale, tgt_scale: ValidScale\n):\n if inp_scale is None:\n inp_scale = (0, 1)\n if tgt_scale is None:\n tgt_scale = (0, 1)\n if isinstance(tgt_scale, Tensor):\n assert dat.shape[-1] == tgt_scale.shape[-1]\n dat = (dat - inp_scale[0]) / (inp_scale[1] - inp_scale[0])\n dat = dat * (tgt_scale[1] - tgt_scale[0]) + tgt_scale[0]\n return dat" } ]
import numpy as np import os import torch import torch.nn as nn import torch.nn.functional as F import threestudio import trimesh from dataclasses import dataclass, field from threestudio.models.geometry.base import (BaseExplicitGeometry, BaseGeometry, contract_to_unisphere,) from threestudio.models.geometry.implicit_sdf import ImplicitSDF from threestudio.models.geometry.implicit_volume import ImplicitVolume from threestudio.models.isosurface import MarchingTetrahedraHelper from threestudio.models.mesh import Mesh from threestudio.models.networks import get_encoding, get_mlp from threestudio.utils.misc import broadcast, get_rank from threestudio.utils.ops import scale_tensor from threestudio.utils.typing import * from pysdf import SDF
15,743
# scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() # instance.isosurface_bbox = mesh.extras["bbox"] instance.isosurface_bbox = mesh.extras["bbox"] * instance.cfg.nerf_scale instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
@threestudio.register("tetrahedra-sdf-grid") class TetrahedraSDFGrid(BaseExplicitGeometry): @dataclass class Config(BaseExplicitGeometry.Config): isosurface_resolution: int = 128 isosurface_deformable_grid: bool = True isosurface_remove_outliers: bool = False isosurface_outlier_n_faces_threshold: Union[int, float] = 0.01 nerf_scale: float = 1.0 n_input_dims: int = 3 n_feature_dims: int = 3 pos_encoding_config: dict = field( default_factory=lambda: { "otype": "HashGrid", "n_levels": 16, "n_features_per_level": 2, "log2_hashmap_size": 19, "base_resolution": 16, "per_level_scale": 1.447269237440378, } ) mlp_network_config: dict = field( default_factory=lambda: { "otype": "VanillaMLP", "activation": "ReLU", "output_activation": "none", "n_neurons": 64, "n_hidden_layers": 1, } ) shape_init: Optional[str] = None shape_init_params: Optional[Any] = None shape_init_mesh_up: str = "+z" shape_init_mesh_front: str = "+x" force_shape_init: bool = False geometry_only: bool = False fix_geometry: bool = False # sdf_bias: Union[float, str] = 0.0 # sdf_bias_params: Optional[Any] = None cfg: Config def configure(self) -> None: super().configure() # this should be saved to state_dict, register as buffer self.isosurface_bbox: Float[Tensor, "2 3"] self.register_buffer("isosurface_bbox", self.bbox.clone()) self.isosurface_helper = MarchingTetrahedraHelper( self.cfg.isosurface_resolution, f"load/tets/{self.cfg.isosurface_resolution}_tets.npz", ) self.sdf: Float[Tensor, "Nv 1"] self.deformation: Optional[Float[Tensor, "Nv 3"]] if not self.cfg.fix_geometry: self.register_parameter( "sdf", nn.Parameter( torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ) ), ) if self.cfg.isosurface_deformable_grid: self.register_parameter( "deformation", nn.Parameter( torch.zeros_like(self.isosurface_helper.grid_vertices) ), ) else: self.deformation = None else: self.register_buffer( "sdf", torch.zeros( (self.isosurface_helper.grid_vertices.shape[0], 1), dtype=torch.float32, ), ) if self.cfg.isosurface_deformable_grid: self.register_buffer( "deformation", torch.zeros_like(self.isosurface_helper.grid_vertices), ) else: self.deformation = None if not self.cfg.geometry_only: self.encoding = get_encoding( self.cfg.n_input_dims, self.cfg.pos_encoding_config ) self.feature_network = get_mlp( self.encoding.n_output_dims, self.cfg.n_feature_dims, self.cfg.mlp_network_config, ) self.mesh: Optional[Mesh] = None def initialize_shape(self) -> None: if self.cfg.shape_init is None and not self.cfg.force_shape_init: return # do not initialize shape if weights are provided if self.cfg.weights is not None and not self.cfg.force_shape_init: return get_gt_sdf: Callable[[Float[Tensor, "N 3"]], Float[Tensor, "N 1"]] assert isinstance(self.cfg.shape_init, str) if self.cfg.shape_init == "ellipsoid": assert ( isinstance(self.cfg.shape_init_params, Sized) and len(self.cfg.shape_init_params) == 3 ) size = torch.as_tensor(self.cfg.shape_init_params).to(self.device) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return ((points_rand / size) ** 2).sum( dim=-1, keepdim=True ).sqrt() - 1.0 # pseudo signed distance of an ellipsoid get_gt_sdf = func elif self.cfg.shape_init == "sphere": assert isinstance(self.cfg.shape_init_params, float) radius = self.cfg.shape_init_params def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: return (points_rand**2).sum(dim=-1, keepdim=True).sqrt() - radius get_gt_sdf = func elif self.cfg.shape_init.startswith("mesh:"): assert isinstance(self.cfg.shape_init_params, float) mesh_path = self.cfg.shape_init[5:] if not os.path.exists(mesh_path): raise ValueError(f"Mesh file {mesh_path} does not exist.") mesh = trimesh.load(mesh_path) # move to center centroid = mesh.vertices.mean(0) mesh.vertices = mesh.vertices - centroid # align to up-z and front-x dirs = ["+x", "+y", "+z", "-x", "-y", "-z"] dir2vec = { "+x": np.array([1, 0, 0]), "+y": np.array([0, 1, 0]), "+z": np.array([0, 0, 1]), "-x": np.array([-1, 0, 0]), "-y": np.array([0, -1, 0]), "-z": np.array([0, 0, -1]), } if ( self.cfg.shape_init_mesh_up not in dirs or self.cfg.shape_init_mesh_front not in dirs ): raise ValueError( f"shape_init_mesh_up and shape_init_mesh_front must be one of {dirs}." ) if self.cfg.shape_init_mesh_up[1] == self.cfg.shape_init_mesh_front[1]: raise ValueError( "shape_init_mesh_up and shape_init_mesh_front must be orthogonal." ) z_, x_ = ( dir2vec[self.cfg.shape_init_mesh_up], dir2vec[self.cfg.shape_init_mesh_front], ) y_ = np.cross(z_, x_) std2mesh = np.stack([x_, y_, z_], axis=0).T mesh2std = np.linalg.inv(std2mesh) # scaling scale = np.abs(mesh.vertices).max() mesh.vertices = mesh.vertices / scale * self.cfg.shape_init_params mesh.vertices = np.dot(mesh2std, mesh.vertices.T).T sdf = SDF(mesh.vertices, mesh.faces) def func(points_rand: Float[Tensor, "N 3"]) -> Float[Tensor, "N 1"]: # add a negative signed here # as in pysdf the inside of the shape has positive signed distance return torch.from_numpy(-sdf(points_rand.cpu().numpy())).to( points_rand )[..., None] get_gt_sdf = func else: raise ValueError( f"Unknown shape initialization type: {self.cfg.shape_init}" ) sdf_gt = get_gt_sdf( scale_tensor( self.isosurface_helper.grid_vertices, self.isosurface_helper.points_range, self.isosurface_bbox, ) ) self.sdf.data = sdf_gt # explicit broadcast to ensure param consistency across ranks for param in self.parameters(): broadcast(param, src=0) def isosurface(self) -> Mesh: # return cached mesh if fix_geometry is True to save computation if self.cfg.fix_geometry and self.mesh is not None: return self.mesh mesh = self.isosurface_helper(self.sdf, self.deformation) mesh.v_pos = scale_tensor( mesh.v_pos, self.isosurface_helper.points_range, self.isosurface_bbox ) if self.cfg.isosurface_remove_outliers: mesh = mesh.remove_outlier(self.cfg.isosurface_outlier_n_faces_threshold) self.mesh = mesh return mesh def forward( self, points: Float[Tensor, "*N Di"], output_normal: bool = False ) -> Dict[str, Float[Tensor, "..."]]: if self.cfg.geometry_only: return {} assert ( output_normal == False ), f"Normal output is not supported for {self.__class__.__name__}" points_unscaled = points # points in the original scale points = contract_to_unisphere(points, self.bbox) # points normalized to (0, 1) enc = self.encoding(points.view(-1, self.cfg.n_input_dims)) features = self.feature_network(enc).view( *points.shape[:-1], self.cfg.n_feature_dims ) return {"features": features} @staticmethod @torch.no_grad() def create_from( other: BaseGeometry, cfg: Optional[Union[dict, DictConfig]] = None, copy_net: bool = True, **kwargs, ) -> "TetrahedraSDFGrid": if isinstance(other, TetrahedraSDFGrid): instance = TetrahedraSDFGrid(cfg, **kwargs) assert instance.cfg.isosurface_resolution == other.cfg.isosurface_resolution instance.isosurface_bbox = other.isosurface_bbox.clone() instance.sdf.data = other.sdf.data.clone() if ( instance.cfg.isosurface_deformable_grid and other.cfg.isosurface_deformable_grid ): assert ( instance.deformation is not None and other.deformation is not None ) instance.deformation.data = other.deformation.data.clone() if ( not instance.cfg.geometry_only and not other.cfg.geometry_only and copy_net ): instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance elif isinstance(other, ImplicitVolume): instance = TetrahedraSDFGrid(cfg, **kwargs) if other.cfg.isosurface_method != "mt": other.cfg.isosurface_method = "mt" threestudio.warn( f"Override isosurface_method of the source geometry to 'mt'" ) if other.cfg.isosurface_resolution != instance.cfg.isosurface_resolution: other.cfg.isosurface_resolution = instance.cfg.isosurface_resolution threestudio.warn( f"Override isosurface_resolution of the source geometry to {instance.cfg.isosurface_resolution}" ) mesh = other.isosurface() # instance.isosurface_bbox = mesh.extras["bbox"] instance.isosurface_bbox = mesh.extras["bbox"] * instance.cfg.nerf_scale instance.sdf.data = ( mesh.extras["grid_level"].to(instance.sdf.data).clamp(-1, 1) ) if not instance.cfg.geometry_only and copy_net: instance.encoding.load_state_dict(other.encoding.state_dict()) instance.feature_network.load_state_dict( other.feature_network.state_dict() ) return instance
elif isinstance(other, ImplicitSDF):
3
2023-12-06 07:53:11+00:00
24k
rehg-lab/RAVE
annotator/oneformer/detectron2/modeling/meta_arch/retinanet.py
[ { "identifier": "configurable", "path": "annotator/oneformer/detectron2/config/config.py", "snippet": "def configurable(init_func=None, *, from_config=None):\r\n \"\"\"\r\n Decorate a function or a class's __init__ method so that it can be called\r\n with a :class:`CfgNode` object using a :func:`from_config` function that translates\r\n :class:`CfgNode` to arguments.\r\n\r\n Examples:\r\n ::\r\n # Usage 1: Decorator on __init__:\r\n class A:\r\n @configurable\r\n def __init__(self, a, b=2, c=3):\r\n pass\r\n\r\n @classmethod\r\n def from_config(cls, cfg): # 'cfg' must be the first argument\r\n # Returns kwargs to be passed to __init__\r\n return {\"a\": cfg.A, \"b\": cfg.B}\r\n\r\n a1 = A(a=1, b=2) # regular construction\r\n a2 = A(cfg) # construct with a cfg\r\n a3 = A(cfg, b=3, c=4) # construct with extra overwrite\r\n\r\n # Usage 2: Decorator on any function. Needs an extra from_config argument:\r\n @configurable(from_config=lambda cfg: {\"a: cfg.A, \"b\": cfg.B})\r\n def a_func(a, b=2, c=3):\r\n pass\r\n\r\n a1 = a_func(a=1, b=2) # regular call\r\n a2 = a_func(cfg) # call with a cfg\r\n a3 = a_func(cfg, b=3, c=4) # call with extra overwrite\r\n\r\n Args:\r\n init_func (callable): a class's ``__init__`` method in usage 1. The\r\n class must have a ``from_config`` classmethod which takes `cfg` as\r\n the first argument.\r\n from_config (callable): the from_config function in usage 2. It must take `cfg`\r\n as its first argument.\r\n \"\"\"\r\n\r\n if init_func is not None:\r\n assert (\r\n inspect.isfunction(init_func)\r\n and from_config is None\r\n and init_func.__name__ == \"__init__\"\r\n ), \"Incorrect use of @configurable. Check API documentation for examples.\"\r\n\r\n @functools.wraps(init_func)\r\n def wrapped(self, *args, **kwargs):\r\n try:\r\n from_config_func = type(self).from_config\r\n except AttributeError as e:\r\n raise AttributeError(\r\n \"Class with @configurable must have a 'from_config' classmethod.\"\r\n ) from e\r\n if not inspect.ismethod(from_config_func):\r\n raise TypeError(\"Class with @configurable must have a 'from_config' classmethod.\")\r\n\r\n if _called_with_cfg(*args, **kwargs):\r\n explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)\r\n init_func(self, **explicit_args)\r\n else:\r\n init_func(self, *args, **kwargs)\r\n\r\n return wrapped\r\n\r\n else:\r\n if from_config is None:\r\n return configurable # @configurable() is made equivalent to @configurable\r\n assert inspect.isfunction(\r\n from_config\r\n ), \"from_config argument of configurable must be a function!\"\r\n\r\n def wrapper(orig_func):\r\n @functools.wraps(orig_func)\r\n def wrapped(*args, **kwargs):\r\n if _called_with_cfg(*args, **kwargs):\r\n explicit_args = _get_args_from_config(from_config, *args, **kwargs)\r\n return orig_func(**explicit_args)\r\n else:\r\n return orig_func(*args, **kwargs)\r\n\r\n wrapped.from_config = from_config\r\n return wrapped\r\n\r\n return wrapper\r" }, { "identifier": "get_norm", "path": "annotator/oneformer/detectron2/layers/batch_norm.py", "snippet": "def get_norm(norm, out_channels):\r\n \"\"\"\r\n Args:\r\n norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;\r\n or a callable that takes a channel number and returns\r\n the normalization layer as a nn.Module.\r\n\r\n Returns:\r\n nn.Module or None: the normalization layer\r\n \"\"\"\r\n if norm is None:\r\n return None\r\n if isinstance(norm, str):\r\n if len(norm) == 0:\r\n return None\r\n norm = {\r\n \"BN\": BatchNorm2d,\r\n # Fixed in https://github.com/pytorch/pytorch/pull/36382\r\n \"SyncBN\": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,\r\n \"FrozenBN\": FrozenBatchNorm2d,\r\n \"GN\": lambda channels: nn.GroupNorm(32, channels),\r\n # for debugging:\r\n \"nnSyncBN\": nn.SyncBatchNorm,\r\n \"naiveSyncBN\": NaiveSyncBatchNorm,\r\n # expose stats_mode N as an option to caller, required for zero-len inputs\r\n \"naiveSyncBN_N\": lambda channels: NaiveSyncBatchNorm(channels, stats_mode=\"N\"),\r\n \"LN\": lambda channels: LayerNorm(channels),\r\n }[norm]\r\n return norm(out_channels)\r" }, { "identifier": "CycleBatchNormList", "path": "annotator/oneformer/detectron2/layers/batch_norm.py", "snippet": "class CycleBatchNormList(nn.ModuleList):\r\n \"\"\"\r\n Implement domain-specific BatchNorm by cycling.\r\n\r\n When a BatchNorm layer is used for multiple input domains or input\r\n features, it might need to maintain a separate test-time statistics\r\n for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`.\r\n\r\n This module implements it by using N separate BN layers\r\n and it cycles through them every time a forward() is called.\r\n\r\n NOTE: The caller of this module MUST guarantee to always call\r\n this module by multiple of N times. Otherwise its test-time statistics\r\n will be incorrect.\r\n \"\"\"\r\n\r\n def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs):\r\n \"\"\"\r\n Args:\r\n length: number of BatchNorm layers to cycle.\r\n bn_class: the BatchNorm class to use\r\n kwargs: arguments of the BatchNorm class, such as num_features.\r\n \"\"\"\r\n self._affine = kwargs.pop(\"affine\", True)\r\n super().__init__([bn_class(**kwargs, affine=False) for k in range(length)])\r\n if self._affine:\r\n # shared affine, domain-specific BN\r\n channels = self[0].num_features\r\n self.weight = nn.Parameter(torch.ones(channels))\r\n self.bias = nn.Parameter(torch.zeros(channels))\r\n self._pos = 0\r\n\r\n def forward(self, x):\r\n ret = self[self._pos](x)\r\n self._pos = (self._pos + 1) % len(self)\r\n\r\n if self._affine:\r\n w = self.weight.reshape(1, -1, 1, 1)\r\n b = self.bias.reshape(1, -1, 1, 1)\r\n return ret * w + b\r\n else:\r\n return ret\r\n\r\n def extra_repr(self):\r\n return f\"affine={self._affine}\"\r" }, { "identifier": "batched_nms", "path": "annotator/oneformer/detectron2/layers/nms.py", "snippet": "def batched_nms(\r\n boxes: torch.Tensor, scores: torch.Tensor, idxs: torch.Tensor, iou_threshold: float\r\n):\r\n \"\"\"\r\n Same as torchvision.ops.boxes.batched_nms, but with float().\r\n \"\"\"\r\n assert boxes.shape[-1] == 4\r\n # Note: Torchvision already has a strategy (https://github.com/pytorch/vision/issues/1311)\r\n # to decide whether to use coordinate trick or for loop to implement batched_nms. So we\r\n # just call it directly.\r\n # Fp16 does not have enough range for batched NMS, so adding float().\r\n return box_ops.batched_nms(boxes.float(), scores, idxs, iou_threshold)\r" }, { "identifier": "ShapeSpec", "path": "annotator/oneformer/detectron2/layers/shape_spec.py", "snippet": "class ShapeSpec:\r\n \"\"\"\r\n A simple structure that contains basic shape specification about a tensor.\r\n It is often used as the auxiliary inputs/outputs of models,\r\n to complement the lack of shape inference ability among pytorch modules.\r\n \"\"\"\r\n\r\n channels: Optional[int] = None\r\n height: Optional[int] = None\r\n width: Optional[int] = None\r\n stride: Optional[int] = None\r" }, { "identifier": "cat", "path": "annotator/oneformer/detectron2/layers/wrappers.py", "snippet": "def cat(tensors: List[torch.Tensor], dim: int = 0):\r\n \"\"\"\r\n Efficient version of torch.cat that avoids a copy if there is only a single element in a list\r\n \"\"\"\r\n assert isinstance(tensors, (list, tuple))\r\n if len(tensors) == 1:\r\n return tensors[0]\r\n return torch.cat(tensors, dim)\r" }, { "identifier": "Boxes", "path": "annotator/oneformer/detectron2/structures/boxes.py", "snippet": "class Boxes:\r\n \"\"\"\r\n This structure stores a list of boxes as a Nx4 torch.Tensor.\r\n It supports some common methods about boxes\r\n (`area`, `clip`, `nonempty`, etc),\r\n and also behaves like a Tensor\r\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\r\n\r\n Attributes:\r\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n\r\n def __init__(self, tensor: torch.Tensor):\r\n \"\"\"\r\n Args:\r\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\r\n \"\"\"\r\n if not isinstance(tensor, torch.Tensor):\r\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device(\"cpu\"))\r\n else:\r\n tensor = tensor.to(torch.float32)\r\n if tensor.numel() == 0:\r\n # Use reshape, so we don't end up creating a new tensor that does not depend on\r\n # the inputs (and consequently confuses jit)\r\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32)\r\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\r\n\r\n self.tensor = tensor\r\n\r\n def clone(self) -> \"Boxes\":\r\n \"\"\"\r\n Clone the Boxes.\r\n\r\n Returns:\r\n Boxes\r\n \"\"\"\r\n return Boxes(self.tensor.clone())\r\n\r\n def to(self, device: torch.device):\r\n # Boxes are assumed float32 and does not support to(dtype)\r\n return Boxes(self.tensor.to(device=device))\r\n\r\n def area(self) -> torch.Tensor:\r\n \"\"\"\r\n Computes the area of all the boxes.\r\n\r\n Returns:\r\n torch.Tensor: a vector with areas of each box.\r\n \"\"\"\r\n box = self.tensor\r\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\r\n return area\r\n\r\n def clip(self, box_size: Tuple[int, int]) -> None:\r\n \"\"\"\r\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\r\n and y coordinates to the range [0, height].\r\n\r\n Args:\r\n box_size (height, width): The clipping box's size.\r\n \"\"\"\r\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\r\n h, w = box_size\r\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\r\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\r\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\r\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\r\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\r\n\r\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\r\n \"\"\"\r\n Find boxes that are non-empty.\r\n A box is considered empty, if either of its side is no larger than threshold.\r\n\r\n Returns:\r\n Tensor:\r\n a binary vector which represents whether each box is empty\r\n (False) or non-empty (True).\r\n \"\"\"\r\n box = self.tensor\r\n widths = box[:, 2] - box[:, 0]\r\n heights = box[:, 3] - box[:, 1]\r\n keep = (widths > threshold) & (heights > threshold)\r\n return keep\r\n\r\n def __getitem__(self, item) -> \"Boxes\":\r\n \"\"\"\r\n Args:\r\n item: int, slice, or a BoolTensor\r\n\r\n Returns:\r\n Boxes: Create a new :class:`Boxes` by indexing.\r\n\r\n The following usage are allowed:\r\n\r\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\r\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\r\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\r\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\r\n\r\n Note that the returned Boxes might share storage with this Boxes,\r\n subject to Pytorch's indexing semantics.\r\n \"\"\"\r\n if isinstance(item, int):\r\n return Boxes(self.tensor[item].view(1, -1))\r\n b = self.tensor[item]\r\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\r\n return Boxes(b)\r\n\r\n def __len__(self) -> int:\r\n return self.tensor.shape[0]\r\n\r\n def __repr__(self) -> str:\r\n return \"Boxes(\" + str(self.tensor) + \")\"\r\n\r\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\r\n \"\"\"\r\n Args:\r\n box_size (height, width): Size of the reference box.\r\n boundary_threshold (int): Boxes that extend beyond the reference box\r\n boundary by more than boundary_threshold are considered \"outside\".\r\n\r\n Returns:\r\n a binary vector, indicating whether each box is inside the reference box.\r\n \"\"\"\r\n height, width = box_size\r\n inds_inside = (\r\n (self.tensor[..., 0] >= -boundary_threshold)\r\n & (self.tensor[..., 1] >= -boundary_threshold)\r\n & (self.tensor[..., 2] < width + boundary_threshold)\r\n & (self.tensor[..., 3] < height + boundary_threshold)\r\n )\r\n return inds_inside\r\n\r\n def get_centers(self) -> torch.Tensor:\r\n \"\"\"\r\n Returns:\r\n The box centers in a Nx2 array of (x, y).\r\n \"\"\"\r\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\r\n\r\n def scale(self, scale_x: float, scale_y: float) -> None:\r\n \"\"\"\r\n Scale the box with horizontal and vertical scaling factors\r\n \"\"\"\r\n self.tensor[:, 0::2] *= scale_x\r\n self.tensor[:, 1::2] *= scale_y\r\n\r\n @classmethod\r\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\r\n \"\"\"\r\n Concatenates a list of Boxes into a single Boxes\r\n\r\n Arguments:\r\n boxes_list (list[Boxes])\r\n\r\n Returns:\r\n Boxes: the concatenated Boxes\r\n \"\"\"\r\n assert isinstance(boxes_list, (list, tuple))\r\n if len(boxes_list) == 0:\r\n return cls(torch.empty(0))\r\n assert all([isinstance(box, Boxes) for box in boxes_list])\r\n\r\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\r\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\r\n return cat_boxes\r\n\r\n @property\r\n def device(self) -> device:\r\n return self.tensor.device\r\n\r\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\r\n # https://github.com/pytorch/pytorch/issues/18627\r\n @torch.jit.unused\r\n def __iter__(self):\r\n \"\"\"\r\n Yield a box as a Tensor of shape (4,) at a time.\r\n \"\"\"\r\n yield from self.tensor\r" }, { "identifier": "pairwise_iou", "path": "annotator/oneformer/detectron2/structures/boxes.py", "snippet": "def pairwise_iou(boxes1: Boxes, boxes2: Boxes) -> torch.Tensor:\r\n \"\"\"\r\n Given two lists of boxes of size N and M, compute the IoU\r\n (intersection over union) between **all** N x M pairs of boxes.\r\n The box order must be (xmin, ymin, xmax, ymax).\r\n\r\n Args:\r\n boxes1,boxes2 (Boxes): two `Boxes`. Contains N & M boxes, respectively.\r\n\r\n Returns:\r\n Tensor: IoU, sized [N,M].\r\n \"\"\"\r\n area1 = boxes1.area() # [N]\r\n area2 = boxes2.area() # [M]\r\n inter = pairwise_intersection(boxes1, boxes2)\r\n\r\n # handle empty boxes\r\n iou = torch.where(\r\n inter > 0,\r\n inter / (area1[:, None] + area2 - inter),\r\n torch.zeros(1, dtype=inter.dtype, device=inter.device),\r\n )\r\n return iou\r" }, { "identifier": "ImageList", "path": "annotator/oneformer/detectron2/structures/image_list.py", "snippet": "class ImageList(object):\r\n \"\"\"\r\n Structure that holds a list of images (of possibly\r\n varying sizes) as a single tensor.\r\n This works by padding the images to the same size.\r\n The original sizes of each image is stored in `image_sizes`.\r\n\r\n Attributes:\r\n image_sizes (list[tuple[int, int]]): each tuple is (h, w).\r\n During tracing, it becomes list[Tensor] instead.\r\n \"\"\"\r\n\r\n def __init__(self, tensor: torch.Tensor, image_sizes: List[Tuple[int, int]]):\r\n \"\"\"\r\n Arguments:\r\n tensor (Tensor): of shape (N, H, W) or (N, C_1, ..., C_K, H, W) where K >= 1\r\n image_sizes (list[tuple[int, int]]): Each tuple is (h, w). It can\r\n be smaller than (H, W) due to padding.\r\n \"\"\"\r\n self.tensor = tensor\r\n self.image_sizes = image_sizes\r\n\r\n def __len__(self) -> int:\r\n return len(self.image_sizes)\r\n\r\n def __getitem__(self, idx) -> torch.Tensor:\r\n \"\"\"\r\n Access the individual image in its original size.\r\n\r\n Args:\r\n idx: int or slice\r\n\r\n Returns:\r\n Tensor: an image of shape (H, W) or (C_1, ..., C_K, H, W) where K >= 1\r\n \"\"\"\r\n size = self.image_sizes[idx]\r\n return self.tensor[idx, ..., : size[0], : size[1]]\r\n\r\n @torch.jit.unused\r\n def to(self, *args: Any, **kwargs: Any) -> \"ImageList\":\r\n cast_tensor = self.tensor.to(*args, **kwargs)\r\n return ImageList(cast_tensor, self.image_sizes)\r\n\r\n @property\r\n def device(self) -> device:\r\n return self.tensor.device\r\n\r\n @staticmethod\r\n def from_tensors(\r\n tensors: List[torch.Tensor],\r\n size_divisibility: int = 0,\r\n pad_value: float = 0.0,\r\n padding_constraints: Optional[Dict[str, int]] = None,\r\n ) -> \"ImageList\":\r\n \"\"\"\r\n Args:\r\n tensors: a tuple or list of `torch.Tensor`, each of shape (Hi, Wi) or\r\n (C_1, ..., C_K, Hi, Wi) where K >= 1. The Tensors will be padded\r\n to the same shape with `pad_value`.\r\n size_divisibility (int): If `size_divisibility > 0`, add padding to ensure\r\n the common height and width is divisible by `size_divisibility`.\r\n This depends on the model and many models need a divisibility of 32.\r\n pad_value (float): value to pad.\r\n padding_constraints (optional[Dict]): If given, it would follow the format as\r\n {\"size_divisibility\": int, \"square_size\": int}, where `size_divisibility` will\r\n overwrite the above one if presented and `square_size` indicates the\r\n square padding size if `square_size` > 0.\r\n Returns:\r\n an `ImageList`.\r\n \"\"\"\r\n assert len(tensors) > 0\r\n assert isinstance(tensors, (tuple, list))\r\n for t in tensors:\r\n assert isinstance(t, torch.Tensor), type(t)\r\n assert t.shape[:-2] == tensors[0].shape[:-2], t.shape\r\n\r\n image_sizes = [(im.shape[-2], im.shape[-1]) for im in tensors]\r\n image_sizes_tensor = [shapes_to_tensor(x) for x in image_sizes]\r\n max_size = torch.stack(image_sizes_tensor).max(0).values\r\n\r\n if padding_constraints is not None:\r\n square_size = padding_constraints.get(\"square_size\", 0)\r\n if square_size > 0:\r\n # pad to square.\r\n max_size[0] = max_size[1] = square_size\r\n if \"size_divisibility\" in padding_constraints:\r\n size_divisibility = padding_constraints[\"size_divisibility\"]\r\n if size_divisibility > 1:\r\n stride = size_divisibility\r\n # the last two dims are H,W, both subject to divisibility requirement\r\n max_size = (max_size + (stride - 1)).div(stride, rounding_mode=\"floor\") * stride\r\n\r\n # handle weirdness of scripting and tracing ...\r\n if torch.jit.is_scripting():\r\n max_size: List[int] = max_size.to(dtype=torch.long).tolist()\r\n else:\r\n if torch.jit.is_tracing():\r\n image_sizes = image_sizes_tensor\r\n\r\n if len(tensors) == 1:\r\n # This seems slightly (2%) faster.\r\n # TODO: check whether it's faster for multiple images as well\r\n image_size = image_sizes[0]\r\n padding_size = [0, max_size[-1] - image_size[1], 0, max_size[-2] - image_size[0]]\r\n batched_imgs = F.pad(tensors[0], padding_size, value=pad_value).unsqueeze_(0)\r\n else:\r\n # max_size can be a tensor in tracing mode, therefore convert to list\r\n batch_shape = [len(tensors)] + list(tensors[0].shape[:-2]) + list(max_size)\r\n device = (\r\n None if torch.jit.is_scripting() else (\"cpu\" if torch.jit.is_tracing() else None)\r\n )\r\n batched_imgs = tensors[0].new_full(batch_shape, pad_value, device=device)\r\n batched_imgs = move_device_like(batched_imgs, tensors[0])\r\n for i, img in enumerate(tensors):\r\n # Use `batched_imgs` directly instead of `img, pad_img = zip(tensors, batched_imgs)`\r\n # Tracing mode cannot capture `copy_()` of temporary locals\r\n batched_imgs[i, ..., : img.shape[-2], : img.shape[-1]].copy_(img)\r\n\r\n return ImageList(batched_imgs.contiguous(), image_sizes)\r" }, { "identifier": "Instances", "path": "annotator/oneformer/detectron2/structures/instances.py", "snippet": "class Instances:\r\n \"\"\"\r\n This class represents a list of instances in an image.\r\n It stores the attributes of instances (e.g., boxes, masks, labels, scores) as \"fields\".\r\n All fields must have the same ``__len__`` which is the number of instances.\r\n\r\n All other (non-field) attributes of this class are considered private:\r\n they must start with '_' and are not modifiable by a user.\r\n\r\n Some basic usage:\r\n\r\n 1. Set/get/check a field:\r\n\r\n .. code-block:: python\r\n\r\n instances.gt_boxes = Boxes(...)\r\n print(instances.pred_masks) # a tensor of shape (N, H, W)\r\n print('gt_masks' in instances)\r\n\r\n 2. ``len(instances)`` returns the number of instances\r\n 3. Indexing: ``instances[indices]`` will apply the indexing on all the fields\r\n and returns a new :class:`Instances`.\r\n Typically, ``indices`` is a integer vector of indices,\r\n or a binary mask of length ``num_instances``\r\n\r\n .. code-block:: python\r\n\r\n category_3_detections = instances[instances.pred_classes == 3]\r\n confident_detections = instances[instances.scores > 0.9]\r\n \"\"\"\r\n\r\n def __init__(self, image_size: Tuple[int, int], **kwargs: Any):\r\n \"\"\"\r\n Args:\r\n image_size (height, width): the spatial size of the image.\r\n kwargs: fields to add to this `Instances`.\r\n \"\"\"\r\n self._image_size = image_size\r\n self._fields: Dict[str, Any] = {}\r\n for k, v in kwargs.items():\r\n self.set(k, v)\r\n\r\n @property\r\n def image_size(self) -> Tuple[int, int]:\r\n \"\"\"\r\n Returns:\r\n tuple: height, width\r\n \"\"\"\r\n return self._image_size\r\n\r\n def __setattr__(self, name: str, val: Any) -> None:\r\n if name.startswith(\"_\"):\r\n super().__setattr__(name, val)\r\n else:\r\n self.set(name, val)\r\n\r\n def __getattr__(self, name: str) -> Any:\r\n if name == \"_fields\" or name not in self._fields:\r\n raise AttributeError(\"Cannot find field '{}' in the given Instances!\".format(name))\r\n return self._fields[name]\r\n\r\n def set(self, name: str, value: Any) -> None:\r\n \"\"\"\r\n Set the field named `name` to `value`.\r\n The length of `value` must be the number of instances,\r\n and must agree with other existing fields in this object.\r\n \"\"\"\r\n with warnings.catch_warnings(record=True):\r\n data_len = len(value)\r\n if len(self._fields):\r\n assert (\r\n len(self) == data_len\r\n ), \"Adding a field of length {} to a Instances of length {}\".format(data_len, len(self))\r\n self._fields[name] = value\r\n\r\n def has(self, name: str) -> bool:\r\n \"\"\"\r\n Returns:\r\n bool: whether the field called `name` exists.\r\n \"\"\"\r\n return name in self._fields\r\n\r\n def remove(self, name: str) -> None:\r\n \"\"\"\r\n Remove the field called `name`.\r\n \"\"\"\r\n del self._fields[name]\r\n\r\n def get(self, name: str) -> Any:\r\n \"\"\"\r\n Returns the field called `name`.\r\n \"\"\"\r\n return self._fields[name]\r\n\r\n def get_fields(self) -> Dict[str, Any]:\r\n \"\"\"\r\n Returns:\r\n dict: a dict which maps names (str) to data of the fields\r\n\r\n Modifying the returned dict will modify this instance.\r\n \"\"\"\r\n return self._fields\r\n\r\n # Tensor-like methods\r\n def to(self, *args: Any, **kwargs: Any) -> \"Instances\":\r\n \"\"\"\r\n Returns:\r\n Instances: all fields are called with a `to(device)`, if the field has this method.\r\n \"\"\"\r\n ret = Instances(self._image_size)\r\n for k, v in self._fields.items():\r\n if hasattr(v, \"to\"):\r\n v = v.to(*args, **kwargs)\r\n ret.set(k, v)\r\n return ret\r\n\r\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Instances\":\r\n \"\"\"\r\n Args:\r\n item: an index-like object and will be used to index all the fields.\r\n\r\n Returns:\r\n If `item` is a string, return the data in the corresponding field.\r\n Otherwise, returns an `Instances` where all fields are indexed by `item`.\r\n \"\"\"\r\n if type(item) == int:\r\n if item >= len(self) or item < -len(self):\r\n raise IndexError(\"Instances index out of range!\")\r\n else:\r\n item = slice(item, None, len(self))\r\n\r\n ret = Instances(self._image_size)\r\n for k, v in self._fields.items():\r\n ret.set(k, v[item])\r\n return ret\r\n\r\n def __len__(self) -> int:\r\n for v in self._fields.values():\r\n # use __len__ because len() has to be int and is not friendly to tracing\r\n return v.__len__()\r\n raise NotImplementedError(\"Empty Instances does not support __len__!\")\r\n\r\n def __iter__(self):\r\n raise NotImplementedError(\"`Instances` object is not iterable!\")\r\n\r\n @staticmethod\r\n def cat(instance_lists: List[\"Instances\"]) -> \"Instances\":\r\n \"\"\"\r\n Args:\r\n instance_lists (list[Instances])\r\n\r\n Returns:\r\n Instances\r\n \"\"\"\r\n assert all(isinstance(i, Instances) for i in instance_lists)\r\n assert len(instance_lists) > 0\r\n if len(instance_lists) == 1:\r\n return instance_lists[0]\r\n\r\n image_size = instance_lists[0].image_size\r\n if not isinstance(image_size, torch.Tensor): # could be a tensor in tracing\r\n for i in instance_lists[1:]:\r\n assert i.image_size == image_size\r\n ret = Instances(image_size)\r\n for k in instance_lists[0]._fields.keys():\r\n values = [i.get(k) for i in instance_lists]\r\n v0 = values[0]\r\n if isinstance(v0, torch.Tensor):\r\n values = torch.cat(values, dim=0)\r\n elif isinstance(v0, list):\r\n values = list(itertools.chain(*values))\r\n elif hasattr(type(v0), \"cat\"):\r\n values = type(v0).cat(values)\r\n else:\r\n raise ValueError(\"Unsupported type {} for concatenation\".format(type(v0)))\r\n ret.set(k, values)\r\n return ret\r\n\r\n def __str__(self) -> str:\r\n s = self.__class__.__name__ + \"(\"\r\n s += \"num_instances={}, \".format(len(self))\r\n s += \"image_height={}, \".format(self._image_size[0])\r\n s += \"image_width={}, \".format(self._image_size[1])\r\n s += \"fields=[{}])\".format(\", \".join((f\"{k}: {v}\" for k, v in self._fields.items())))\r\n return s\r\n\r\n __repr__ = __str__\r" }, { "identifier": "get_event_storage", "path": "annotator/oneformer/detectron2/utils/events.py", "snippet": "def get_event_storage():\r\n \"\"\"\r\n Returns:\r\n The :class:`EventStorage` object that's currently being used.\r\n Throws an error if no :class:`EventStorage` is currently enabled.\r\n \"\"\"\r\n assert len(\r\n _CURRENT_STORAGE_STACK\r\n ), \"get_event_storage() has to be called inside a 'with EventStorage(...)' context!\"\r\n return _CURRENT_STORAGE_STACK[-1]\r" }, { "identifier": "build_anchor_generator", "path": "annotator/oneformer/detectron2/modeling/anchor_generator.py", "snippet": "def build_anchor_generator(cfg, input_shape):\r\n \"\"\"\r\n Built an anchor generator from `cfg.MODEL.ANCHOR_GENERATOR.NAME`.\r\n \"\"\"\r\n anchor_generator = cfg.MODEL.ANCHOR_GENERATOR.NAME\r\n return ANCHOR_GENERATOR_REGISTRY.get(anchor_generator)(cfg, input_shape)\r" }, { "identifier": "build_backbone", "path": "annotator/oneformer/detectron2/modeling/backbone/build.py", "snippet": "def build_backbone(cfg, input_shape=None):\r\n \"\"\"\r\n Build a backbone from `cfg.MODEL.BACKBONE.NAME`.\r\n\r\n Returns:\r\n an instance of :class:`Backbone`\r\n \"\"\"\r\n if input_shape is None:\r\n input_shape = ShapeSpec(channels=len(cfg.MODEL.PIXEL_MEAN))\r\n\r\n backbone_name = cfg.MODEL.BACKBONE.NAME\r\n backbone = BACKBONE_REGISTRY.get(backbone_name)(cfg, input_shape)\r\n assert isinstance(backbone, Backbone)\r\n return backbone\r" }, { "identifier": "Backbone", "path": "annotator/oneformer/detectron2/modeling/backbone/backbone.py", "snippet": "class Backbone(nn.Module, metaclass=ABCMeta):\r\n \"\"\"\r\n Abstract base class for network backbones.\r\n \"\"\"\r\n\r\n def __init__(self):\r\n \"\"\"\r\n The `__init__` method of any subclass can specify its own set of arguments.\r\n \"\"\"\r\n super().__init__()\r\n\r\n @abstractmethod\r\n def forward(self):\r\n \"\"\"\r\n Subclasses must override this method, but adhere to the same return type.\r\n\r\n Returns:\r\n dict[str->Tensor]: mapping from feature name (e.g., \"res2\") to tensor\r\n \"\"\"\r\n pass\r\n\r\n @property\r\n def size_divisibility(self) -> int:\r\n \"\"\"\r\n Some backbones require the input height and width to be divisible by a\r\n specific integer. This is typically true for encoder / decoder type networks\r\n with lateral connection (e.g., FPN) for which feature maps need to match\r\n dimension in the \"bottom up\" and \"top down\" paths. Set to 0 if no specific\r\n input size divisibility is required.\r\n \"\"\"\r\n return 0\r\n\r\n @property\r\n def padding_constraints(self) -> Dict[str, int]:\r\n \"\"\"\r\n This property is a generalization of size_divisibility. Some backbones and training\r\n recipes require specific padding constraints, such as enforcing divisibility by a specific\r\n integer (e.g., FPN) or padding to a square (e.g., ViTDet with large-scale jitter\r\n in :paper:vitdet). `padding_constraints` contains these optional items like:\r\n {\r\n \"size_divisibility\": int,\r\n \"square_size\": int,\r\n # Future options are possible\r\n }\r\n `size_divisibility` will read from here if presented and `square_size` indicates the\r\n square padding size if `square_size` > 0.\r\n\r\n TODO: use type of Dict[str, int] to avoid torchscipt issues. The type of padding_constraints\r\n could be generalized as TypedDict (Python 3.8+) to support more types in the future.\r\n \"\"\"\r\n return {}\r\n\r\n def output_shape(self):\r\n \"\"\"\r\n Returns:\r\n dict[str->ShapeSpec]\r\n \"\"\"\r\n # this is a backward-compatible default\r\n return {\r\n name: ShapeSpec(\r\n channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]\r\n )\r\n for name in self._out_features\r\n }\r" }, { "identifier": "Box2BoxTransform", "path": "annotator/oneformer/detectron2/modeling/box_regression.py", "snippet": "class Box2BoxTransform(object):\r\n \"\"\"\r\n The box-to-box transform defined in R-CNN. The transformation is parameterized\r\n by 4 deltas: (dx, dy, dw, dh). The transformation scales the box's width and height\r\n by exp(dw), exp(dh) and shifts a box's center by the offset (dx * width, dy * height).\r\n \"\"\"\r\n\r\n def __init__(\r\n self, weights: Tuple[float, float, float, float], scale_clamp: float = _DEFAULT_SCALE_CLAMP\r\n ):\r\n \"\"\"\r\n Args:\r\n weights (4-element tuple): Scaling factors that are applied to the\r\n (dx, dy, dw, dh) deltas. In Fast R-CNN, these were originally set\r\n such that the deltas have unit variance; now they are treated as\r\n hyperparameters of the system.\r\n scale_clamp (float): When predicting deltas, the predicted box scaling\r\n factors (dw and dh) are clamped such that they are <= scale_clamp.\r\n \"\"\"\r\n self.weights = weights\r\n self.scale_clamp = scale_clamp\r\n\r\n def get_deltas(self, src_boxes, target_boxes):\r\n \"\"\"\r\n Get box regression transformation deltas (dx, dy, dw, dh) that can be used\r\n to transform the `src_boxes` into the `target_boxes`. That is, the relation\r\n ``target_boxes == self.apply_deltas(deltas, src_boxes)`` is true (unless\r\n any delta is too large and is clamped).\r\n\r\n Args:\r\n src_boxes (Tensor): source boxes, e.g., object proposals\r\n target_boxes (Tensor): target of the transformation, e.g., ground-truth\r\n boxes.\r\n \"\"\"\r\n assert isinstance(src_boxes, torch.Tensor), type(src_boxes)\r\n assert isinstance(target_boxes, torch.Tensor), type(target_boxes)\r\n\r\n src_widths = src_boxes[:, 2] - src_boxes[:, 0]\r\n src_heights = src_boxes[:, 3] - src_boxes[:, 1]\r\n src_ctr_x = src_boxes[:, 0] + 0.5 * src_widths\r\n src_ctr_y = src_boxes[:, 1] + 0.5 * src_heights\r\n\r\n target_widths = target_boxes[:, 2] - target_boxes[:, 0]\r\n target_heights = target_boxes[:, 3] - target_boxes[:, 1]\r\n target_ctr_x = target_boxes[:, 0] + 0.5 * target_widths\r\n target_ctr_y = target_boxes[:, 1] + 0.5 * target_heights\r\n\r\n wx, wy, ww, wh = self.weights\r\n dx = wx * (target_ctr_x - src_ctr_x) / src_widths\r\n dy = wy * (target_ctr_y - src_ctr_y) / src_heights\r\n dw = ww * torch.log(target_widths / src_widths)\r\n dh = wh * torch.log(target_heights / src_heights)\r\n\r\n deltas = torch.stack((dx, dy, dw, dh), dim=1)\r\n assert (src_widths > 0).all().item(), \"Input boxes to Box2BoxTransform are not valid!\"\r\n return deltas\r\n\r\n def apply_deltas(self, deltas, boxes):\r\n \"\"\"\r\n Apply transformation `deltas` (dx, dy, dw, dh) to `boxes`.\r\n\r\n Args:\r\n deltas (Tensor): transformation deltas of shape (N, k*4), where k >= 1.\r\n deltas[i] represents k potentially different class-specific\r\n box transformations for the single box boxes[i].\r\n boxes (Tensor): boxes to transform, of shape (N, 4)\r\n \"\"\"\r\n deltas = deltas.float() # ensure fp32 for decoding precision\r\n boxes = boxes.to(deltas.dtype)\r\n\r\n widths = boxes[:, 2] - boxes[:, 0]\r\n heights = boxes[:, 3] - boxes[:, 1]\r\n ctr_x = boxes[:, 0] + 0.5 * widths\r\n ctr_y = boxes[:, 1] + 0.5 * heights\r\n\r\n wx, wy, ww, wh = self.weights\r\n dx = deltas[:, 0::4] / wx\r\n dy = deltas[:, 1::4] / wy\r\n dw = deltas[:, 2::4] / ww\r\n dh = deltas[:, 3::4] / wh\r\n\r\n # Prevent sending too large values into torch.exp()\r\n dw = torch.clamp(dw, max=self.scale_clamp)\r\n dh = torch.clamp(dh, max=self.scale_clamp)\r\n\r\n pred_ctr_x = dx * widths[:, None] + ctr_x[:, None]\r\n pred_ctr_y = dy * heights[:, None] + ctr_y[:, None]\r\n pred_w = torch.exp(dw) * widths[:, None]\r\n pred_h = torch.exp(dh) * heights[:, None]\r\n\r\n x1 = pred_ctr_x - 0.5 * pred_w\r\n y1 = pred_ctr_y - 0.5 * pred_h\r\n x2 = pred_ctr_x + 0.5 * pred_w\r\n y2 = pred_ctr_y + 0.5 * pred_h\r\n pred_boxes = torch.stack((x1, y1, x2, y2), dim=-1)\r\n return pred_boxes.reshape(deltas.shape)\r" }, { "identifier": "_dense_box_regression_loss", "path": "annotator/oneformer/detectron2/modeling/box_regression.py", "snippet": "def _dense_box_regression_loss(\r\n anchors: List[Union[Boxes, torch.Tensor]],\r\n box2box_transform: Box2BoxTransform,\r\n pred_anchor_deltas: List[torch.Tensor],\r\n gt_boxes: List[torch.Tensor],\r\n fg_mask: torch.Tensor,\r\n box_reg_loss_type=\"smooth_l1\",\r\n smooth_l1_beta=0.0,\r\n):\r\n \"\"\"\r\n Compute loss for dense multi-level box regression.\r\n Loss is accumulated over ``fg_mask``.\r\n\r\n Args:\r\n anchors: #lvl anchor boxes, each is (HixWixA, 4)\r\n pred_anchor_deltas: #lvl predictions, each is (N, HixWixA, 4)\r\n gt_boxes: N ground truth boxes, each has shape (R, 4) (R = sum(Hi * Wi * A))\r\n fg_mask: the foreground boolean mask of shape (N, R) to compute loss on\r\n box_reg_loss_type (str): Loss type to use. Supported losses: \"smooth_l1\", \"giou\",\r\n \"diou\", \"ciou\".\r\n smooth_l1_beta (float): beta parameter for the smooth L1 regression loss. Default to\r\n use L1 loss. Only used when `box_reg_loss_type` is \"smooth_l1\"\r\n \"\"\"\r\n if isinstance(anchors[0], Boxes):\r\n anchors = type(anchors[0]).cat(anchors).tensor # (R, 4)\r\n else:\r\n anchors = cat(anchors)\r\n if box_reg_loss_type == \"smooth_l1\":\r\n gt_anchor_deltas = [box2box_transform.get_deltas(anchors, k) for k in gt_boxes]\r\n gt_anchor_deltas = torch.stack(gt_anchor_deltas) # (N, R, 4)\r\n loss_box_reg = smooth_l1_loss(\r\n cat(pred_anchor_deltas, dim=1)[fg_mask],\r\n gt_anchor_deltas[fg_mask],\r\n beta=smooth_l1_beta,\r\n reduction=\"sum\",\r\n )\r\n elif box_reg_loss_type == \"giou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = giou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n elif box_reg_loss_type == \"diou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = diou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n elif box_reg_loss_type == \"ciou\":\r\n pred_boxes = [\r\n box2box_transform.apply_deltas(k, anchors) for k in cat(pred_anchor_deltas, dim=1)\r\n ]\r\n loss_box_reg = ciou_loss(\r\n torch.stack(pred_boxes)[fg_mask], torch.stack(gt_boxes)[fg_mask], reduction=\"sum\"\r\n )\r\n else:\r\n raise ValueError(f\"Invalid dense box regression loss type '{box_reg_loss_type}'\")\r\n return loss_box_reg\r" }, { "identifier": "Matcher", "path": "annotator/oneformer/detectron2/modeling/matcher.py", "snippet": "class Matcher(object):\r\n \"\"\"\r\n This class assigns to each predicted \"element\" (e.g., a box) a ground-truth\r\n element. Each predicted element will have exactly zero or one matches; each\r\n ground-truth element may be matched to zero or more predicted elements.\r\n\r\n The matching is determined by the MxN match_quality_matrix, that characterizes\r\n how well each (ground-truth, prediction)-pair match each other. For example,\r\n if the elements are boxes, this matrix may contain box intersection-over-union\r\n overlap values.\r\n\r\n The matcher returns (a) a vector of length N containing the index of the\r\n ground-truth element m in [0, M) that matches to prediction n in [0, N).\r\n (b) a vector of length N containing the labels for each prediction.\r\n \"\"\"\r\n\r\n def __init__(\r\n self, thresholds: List[float], labels: List[int], allow_low_quality_matches: bool = False\r\n ):\r\n \"\"\"\r\n Args:\r\n thresholds (list): a list of thresholds used to stratify predictions\r\n into levels.\r\n labels (list): a list of values to label predictions belonging at\r\n each level. A label can be one of {-1, 0, 1} signifying\r\n {ignore, negative class, positive class}, respectively.\r\n allow_low_quality_matches (bool): if True, produce additional matches\r\n for predictions with maximum match quality lower than high_threshold.\r\n See set_low_quality_matches_ for more details.\r\n\r\n For example,\r\n thresholds = [0.3, 0.5]\r\n labels = [0, -1, 1]\r\n All predictions with iou < 0.3 will be marked with 0 and\r\n thus will be considered as false positives while training.\r\n All predictions with 0.3 <= iou < 0.5 will be marked with -1 and\r\n thus will be ignored.\r\n All predictions with 0.5 <= iou will be marked with 1 and\r\n thus will be considered as true positives.\r\n \"\"\"\r\n # Add -inf and +inf to first and last position in thresholds\r\n thresholds = thresholds[:]\r\n assert thresholds[0] > 0\r\n thresholds.insert(0, -float(\"inf\"))\r\n thresholds.append(float(\"inf\"))\r\n # Currently torchscript does not support all + generator\r\n assert all([low <= high for (low, high) in zip(thresholds[:-1], thresholds[1:])])\r\n assert all([l in [-1, 0, 1] for l in labels])\r\n assert len(labels) == len(thresholds) - 1\r\n self.thresholds = thresholds\r\n self.labels = labels\r\n self.allow_low_quality_matches = allow_low_quality_matches\r\n\r\n def __call__(self, match_quality_matrix):\r\n \"\"\"\r\n Args:\r\n match_quality_matrix (Tensor[float]): an MxN tensor, containing the\r\n pairwise quality between M ground-truth elements and N predicted\r\n elements. All elements must be >= 0 (due to the us of `torch.nonzero`\r\n for selecting indices in :meth:`set_low_quality_matches_`).\r\n\r\n Returns:\r\n matches (Tensor[int64]): a vector of length N, where matches[i] is a matched\r\n ground-truth index in [0, M)\r\n match_labels (Tensor[int8]): a vector of length N, where pred_labels[i] indicates\r\n whether a prediction is a true or false positive or ignored\r\n \"\"\"\r\n assert match_quality_matrix.dim() == 2\r\n if match_quality_matrix.numel() == 0:\r\n default_matches = match_quality_matrix.new_full(\r\n (match_quality_matrix.size(1),), 0, dtype=torch.int64\r\n )\r\n # When no gt boxes exist, we define IOU = 0 and therefore set labels\r\n # to `self.labels[0]`, which usually defaults to background class 0\r\n # To choose to ignore instead, can make labels=[-1,0,-1,1] + set appropriate thresholds\r\n default_match_labels = match_quality_matrix.new_full(\r\n (match_quality_matrix.size(1),), self.labels[0], dtype=torch.int8\r\n )\r\n return default_matches, default_match_labels\r\n\r\n assert torch.all(match_quality_matrix >= 0)\r\n\r\n # match_quality_matrix is M (gt) x N (predicted)\r\n # Max over gt elements (dim 0) to find best gt candidate for each prediction\r\n matched_vals, matches = match_quality_matrix.max(dim=0)\r\n\r\n match_labels = matches.new_full(matches.size(), 1, dtype=torch.int8)\r\n\r\n for (l, low, high) in zip(self.labels, self.thresholds[:-1], self.thresholds[1:]):\r\n low_high = (matched_vals >= low) & (matched_vals < high)\r\n match_labels[low_high] = l\r\n\r\n if self.allow_low_quality_matches:\r\n self.set_low_quality_matches_(match_labels, match_quality_matrix)\r\n\r\n return matches, match_labels\r\n\r\n def set_low_quality_matches_(self, match_labels, match_quality_matrix):\r\n \"\"\"\r\n Produce additional matches for predictions that have only low-quality matches.\r\n Specifically, for each ground-truth G find the set of predictions that have\r\n maximum overlap with it (including ties); for each prediction in that set, if\r\n it is unmatched, then match it to the ground-truth G.\r\n\r\n This function implements the RPN assignment case (i) in Sec. 3.1.2 of\r\n :paper:`Faster R-CNN`.\r\n \"\"\"\r\n # For each gt, find the prediction with which it has highest quality\r\n highest_quality_foreach_gt, _ = match_quality_matrix.max(dim=1)\r\n # Find the highest quality match available, even if it is low, including ties.\r\n # Note that the matches qualities must be positive due to the use of\r\n # `torch.nonzero`.\r\n _, pred_inds_with_highest_quality = nonzero_tuple(\r\n match_quality_matrix == highest_quality_foreach_gt[:, None]\r\n )\r\n # If an anchor was labeled positive only due to a low-quality match\r\n # with gt_A, but it has larger overlap with gt_B, it's matched index will still be gt_B.\r\n # This follows the implementation in Detectron, and is found to have no significant impact.\r\n match_labels[pred_inds_with_highest_quality] = 1\r" }, { "identifier": "META_ARCH_REGISTRY", "path": "annotator/oneformer/detectron2/modeling/meta_arch/build.py", "snippet": "META_ARCH_REGISTRY = Registry(\"META_ARCH\") # noqa F401 isort:skip\r" }, { "identifier": "DenseDetector", "path": "annotator/oneformer/detectron2/modeling/meta_arch/dense_detector.py", "snippet": "class DenseDetector(nn.Module):\r\n \"\"\"\r\n Base class for dense detector. We define a dense detector as a fully-convolutional model that\r\n makes per-pixel (i.e. dense) predictions.\r\n \"\"\"\r\n\r\n def __init__(\r\n self,\r\n backbone: Backbone,\r\n head: nn.Module,\r\n head_in_features: Optional[List[str]] = None,\r\n *,\r\n pixel_mean,\r\n pixel_std,\r\n ):\r\n \"\"\"\r\n Args:\r\n backbone: backbone module\r\n head: head module\r\n head_in_features: backbone features to use in head. Default to all backbone features.\r\n pixel_mean (Tuple[float]):\r\n Values to be used for image normalization (BGR order).\r\n To train on images of different number of channels, set different mean & std.\r\n Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]\r\n pixel_std (Tuple[float]):\r\n When using pre-trained models in Detectron1 or any MSRA models,\r\n std has been absorbed into its conv1 weights, so the std needs to be set 1.\r\n Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)\r\n \"\"\"\r\n super().__init__()\r\n\r\n self.backbone = backbone\r\n self.head = head\r\n if head_in_features is None:\r\n shapes = self.backbone.output_shape()\r\n self.head_in_features = sorted(shapes.keys(), key=lambda x: shapes[x].stride)\r\n else:\r\n self.head_in_features = head_in_features\r\n self.register_buffer(\"pixel_mean\", torch.tensor(pixel_mean).view(-1, 1, 1), False)\r\n self.register_buffer(\"pixel_std\", torch.tensor(pixel_std).view(-1, 1, 1), False)\r\n\r\n @property\r\n def device(self):\r\n return self.pixel_mean.device\r\n\r\n def _move_to_current_device(self, x):\r\n return move_device_like(x, self.pixel_mean)\r\n\r\n def forward(self, batched_inputs: List[Dict[str, Tensor]]):\r\n \"\"\"\r\n Args:\r\n batched_inputs: a list, batched outputs of :class:`DatasetMapper` .\r\n Each item in the list contains the inputs for one image.\r\n For now, each item in the list is a dict that contains:\r\n\r\n * image: Tensor, image in (C, H, W) format.\r\n * instances: Instances\r\n\r\n Other information that's included in the original dicts, such as:\r\n\r\n * \"height\", \"width\" (int): the output resolution of the model, used in inference.\r\n See :meth:`postprocess` for details.\r\n\r\n Returns:\r\n In training, dict[str, Tensor]: mapping from a named loss to a tensor storing the\r\n loss. Used during training only. In inference, the standard output format, described\r\n in :doc:`/tutorials/models`.\r\n \"\"\"\r\n images = self.preprocess_image(batched_inputs)\r\n features = self.backbone(images.tensor)\r\n features = [features[f] for f in self.head_in_features]\r\n predictions = self.head(features)\r\n\r\n if self.training:\r\n assert not torch.jit.is_scripting(), \"Not supported\"\r\n assert \"instances\" in batched_inputs[0], \"Instance annotations are missing in training!\"\r\n gt_instances = [x[\"instances\"].to(self.device) for x in batched_inputs]\r\n return self.forward_training(images, features, predictions, gt_instances)\r\n else:\r\n results = self.forward_inference(images, features, predictions)\r\n if torch.jit.is_scripting():\r\n return results\r\n\r\n processed_results = []\r\n for results_per_image, input_per_image, image_size in zip(\r\n results, batched_inputs, images.image_sizes\r\n ):\r\n height = input_per_image.get(\"height\", image_size[0])\r\n width = input_per_image.get(\"width\", image_size[1])\r\n r = detector_postprocess(results_per_image, height, width)\r\n processed_results.append({\"instances\": r})\r\n return processed_results\r\n\r\n def forward_training(self, images, features, predictions, gt_instances):\r\n raise NotImplementedError()\r\n\r\n def preprocess_image(self, batched_inputs: List[Dict[str, Tensor]]):\r\n \"\"\"\r\n Normalize, pad and batch the input images.\r\n \"\"\"\r\n images = [self._move_to_current_device(x[\"image\"]) for x in batched_inputs]\r\n images = [(x - self.pixel_mean) / self.pixel_std for x in images]\r\n images = ImageList.from_tensors(\r\n images,\r\n self.backbone.size_divisibility,\r\n padding_constraints=self.backbone.padding_constraints,\r\n )\r\n return images\r\n\r\n def _transpose_dense_predictions(\r\n self, predictions: List[List[Tensor]], dims_per_anchor: List[int]\r\n ) -> List[List[Tensor]]:\r\n \"\"\"\r\n Transpose the dense per-level predictions.\r\n\r\n Args:\r\n predictions: a list of outputs, each is a list of per-level\r\n predictions with shape (N, Ai x K, Hi, Wi), where N is the\r\n number of images, Ai is the number of anchors per location on\r\n level i, K is the dimension of predictions per anchor.\r\n dims_per_anchor: the value of K for each predictions. e.g. 4 for\r\n box prediction, #classes for classification prediction.\r\n\r\n Returns:\r\n List[List[Tensor]]: each prediction is transposed to (N, Hi x Wi x Ai, K).\r\n \"\"\"\r\n assert len(predictions) == len(dims_per_anchor)\r\n res: List[List[Tensor]] = []\r\n for pred, dim_per_anchor in zip(predictions, dims_per_anchor):\r\n pred = [permute_to_N_HWA_K(x, dim_per_anchor) for x in pred]\r\n res.append(pred)\r\n return res\r\n\r\n def _ema_update(self, name: str, value: float, initial_value: float, momentum: float = 0.9):\r\n \"\"\"\r\n Apply EMA update to `self.name` using `value`.\r\n\r\n This is mainly used for loss normalizer. In Detectron1, loss is normalized by number\r\n of foreground samples in the batch. When batch size is 1 per GPU, #foreground has a\r\n large variance and using it lead to lower performance. Therefore we maintain an EMA of\r\n #foreground to stabilize the normalizer.\r\n\r\n Args:\r\n name: name of the normalizer\r\n value: the new value to update\r\n initial_value: the initial value to start with\r\n momentum: momentum of EMA\r\n\r\n Returns:\r\n float: the updated EMA value\r\n \"\"\"\r\n if hasattr(self, name):\r\n old = getattr(self, name)\r\n else:\r\n old = initial_value\r\n new = old * momentum + value * (1 - momentum)\r\n setattr(self, name, new)\r\n return new\r\n\r\n def _decode_per_level_predictions(\r\n self,\r\n anchors: Boxes,\r\n pred_scores: Tensor,\r\n pred_deltas: Tensor,\r\n score_thresh: float,\r\n topk_candidates: int,\r\n image_size: Tuple[int, int],\r\n ) -> Instances:\r\n \"\"\"\r\n Decode boxes and classification predictions of one featuer level, by\r\n the following steps:\r\n 1. filter the predictions based on score threshold and top K scores.\r\n 2. transform the box regression outputs\r\n 3. return the predicted scores, classes and boxes\r\n\r\n Args:\r\n anchors: Boxes, anchor for this feature level\r\n pred_scores: HxWxA,K\r\n pred_deltas: HxWxA,4\r\n\r\n Returns:\r\n Instances: with field \"scores\", \"pred_boxes\", \"pred_classes\".\r\n \"\"\"\r\n # Apply two filtering to make NMS faster.\r\n # 1. Keep boxes with confidence score higher than threshold\r\n keep_idxs = pred_scores > score_thresh\r\n pred_scores = pred_scores[keep_idxs]\r\n topk_idxs = torch.nonzero(keep_idxs) # Kx2\r\n\r\n # 2. Keep top k top scoring boxes only\r\n topk_idxs_size = topk_idxs.shape[0]\r\n if isinstance(topk_idxs_size, Tensor):\r\n # It's a tensor in tracing\r\n num_topk = torch.clamp(topk_idxs_size, max=topk_candidates)\r\n else:\r\n num_topk = min(topk_idxs_size, topk_candidates)\r\n pred_scores, idxs = pred_scores.topk(num_topk)\r\n topk_idxs = topk_idxs[idxs]\r\n\r\n anchor_idxs, classes_idxs = topk_idxs.unbind(dim=1)\r\n\r\n pred_boxes = self.box2box_transform.apply_deltas(\r\n pred_deltas[anchor_idxs], anchors.tensor[anchor_idxs]\r\n )\r\n return Instances(\r\n image_size, pred_boxes=Boxes(pred_boxes), scores=pred_scores, pred_classes=classes_idxs\r\n )\r\n\r\n def _decode_multi_level_predictions(\r\n self,\r\n anchors: List[Boxes],\r\n pred_scores: List[Tensor],\r\n pred_deltas: List[Tensor],\r\n score_thresh: float,\r\n topk_candidates: int,\r\n image_size: Tuple[int, int],\r\n ) -> Instances:\r\n \"\"\"\r\n Run `_decode_per_level_predictions` for all feature levels and concat the results.\r\n \"\"\"\r\n predictions = [\r\n self._decode_per_level_predictions(\r\n anchors_i,\r\n box_cls_i,\r\n box_reg_i,\r\n self.test_score_thresh,\r\n self.test_topk_candidates,\r\n image_size,\r\n )\r\n # Iterate over every feature level\r\n for box_cls_i, box_reg_i, anchors_i in zip(pred_scores, pred_deltas, anchors)\r\n ]\r\n return predictions[0].cat(predictions) # 'Instances.cat' is not scriptale but this is\r\n\r\n def visualize_training(self, batched_inputs, results):\r\n \"\"\"\r\n A function used to visualize ground truth images and final network predictions.\r\n It shows ground truth bounding boxes on the original image and up to 20\r\n predicted object bounding boxes on the original image.\r\n\r\n Args:\r\n batched_inputs (list): a list that contains input to the model.\r\n results (List[Instances]): a list of #images elements returned by forward_inference().\r\n \"\"\"\r\n from annotator.oneformer.detectron2.utils.visualizer import Visualizer\r\n\r\n assert len(batched_inputs) == len(\r\n results\r\n ), \"Cannot visualize inputs and results of different sizes\"\r\n storage = get_event_storage()\r\n max_boxes = 20\r\n\r\n image_index = 0 # only visualize a single image\r\n img = batched_inputs[image_index][\"image\"]\r\n img = convert_image_to_rgb(img.permute(1, 2, 0), self.input_format)\r\n v_gt = Visualizer(img, None)\r\n v_gt = v_gt.overlay_instances(boxes=batched_inputs[image_index][\"instances\"].gt_boxes)\r\n anno_img = v_gt.get_image()\r\n processed_results = detector_postprocess(results[image_index], img.shape[0], img.shape[1])\r\n predicted_boxes = processed_results.pred_boxes.tensor.detach().cpu().numpy()\r\n\r\n v_pred = Visualizer(img, None)\r\n v_pred = v_pred.overlay_instances(boxes=predicted_boxes[0:max_boxes])\r\n prop_img = v_pred.get_image()\r\n vis_img = np.vstack((anno_img, prop_img))\r\n vis_img = vis_img.transpose(2, 0, 1)\r\n vis_name = f\"Top: GT bounding boxes; Bottom: {max_boxes} Highest Scoring Results\"\r\n storage.put_image(vis_name, vis_img)\r" }, { "identifier": "permute_to_N_HWA_K", "path": "annotator/oneformer/detectron2/modeling/meta_arch/dense_detector.py", "snippet": "def permute_to_N_HWA_K(tensor, K: int):\r\n \"\"\"\r\n Transpose/reshape a tensor from (N, (Ai x K), H, W) to (N, (HxWxAi), K)\r\n \"\"\"\r\n assert tensor.dim() == 4, tensor.shape\r\n N, _, H, W = tensor.shape\r\n tensor = tensor.view(N, -1, K, H, W)\r\n tensor = tensor.permute(0, 3, 4, 1, 2)\r\n tensor = tensor.reshape(N, -1, K) # Size=(N,HWA,K)\r\n return tensor\r" } ]
import logging import math import torch from typing import List, Tuple from fvcore.nn import sigmoid_focal_loss_jit from torch import Tensor, nn from torch.nn import functional as F from annotator.oneformer.detectron2.config import configurable from annotator.oneformer.detectron2.layers import CycleBatchNormList, ShapeSpec, batched_nms, cat, get_norm from annotator.oneformer.detectron2.structures import Boxes, ImageList, Instances, pairwise_iou from annotator.oneformer.detectron2.utils.events import get_event_storage from ..anchor_generator import build_anchor_generator from ..backbone import Backbone, build_backbone from ..box_regression import Box2BoxTransform, _dense_box_regression_loss from ..matcher import Matcher from .build import META_ARCH_REGISTRY from .dense_detector import DenseDetector, permute_to_N_HWA_K # noqa
16,849
# Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } def forward_training(self, images, features, predictions, gt_instances): # Transpose the Hi*Wi*A dimension to the middle: pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item() get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 100) # classification and regression loss gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[ :, :-1 ] # no loss for the last (background) class loss_cls = sigmoid_focal_loss_jit( cat(pred_logits, dim=1)[valid_mask], gt_labels_target.to(pred_logits[0].dtype), alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction="sum", ) loss_box_reg = _dense_box_regression_loss( anchors, self.box2box_transform, pred_anchor_deltas, gt_boxes, pos_mask, box_reg_loss_type=self.box_reg_loss_type, smooth_l1_beta=self.smooth_l1_beta, ) return { "loss_cls": loss_cls / normalizer, "loss_box_reg": loss_box_reg / normalizer, } @torch.no_grad() def label_anchors(self, anchors, gt_instances): """ Args: anchors (list[Boxes]): A list of #feature level Boxes. The Boxes contains anchors of this image on the specific feature level. gt_instances (list[Instances]): a list of N `Instances`s. The i-th `Instances` contains the ground-truth per-instance annotations for the i-th input image. Returns: list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is the total number of anchors across all feature maps (sum(Hi * Wi * A)). Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background. list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors across feature maps. The values are the matched gt boxes for each anchor. Values are undefined for those anchors not labeled as foreground. """
# Copyright (c) Facebook, Inc. and its affiliates. __all__ = ["RetinaNet"] logger = logging.getLogger(__name__) @META_ARCH_REGISTRY.register() class RetinaNet(DenseDetector): """ Implement RetinaNet in :paper:`RetinaNet`. """ @configurable def __init__( self, *, backbone: Backbone, head: nn.Module, head_in_features, anchor_generator, box2box_transform, anchor_matcher, num_classes, focal_loss_alpha=0.25, focal_loss_gamma=2.0, smooth_l1_beta=0.0, box_reg_loss_type="smooth_l1", test_score_thresh=0.05, test_topk_candidates=1000, test_nms_thresh=0.5, max_detections_per_image=100, pixel_mean, pixel_std, vis_period=0, input_format="BGR", ): """ NOTE: this interface is experimental. Args: backbone: a backbone module, must follow detectron2's backbone interface head (nn.Module): a module that predicts logits and regression deltas for each level from a list of per-level features head_in_features (Tuple[str]): Names of the input feature maps to be used in head anchor_generator (nn.Module): a module that creates anchors from a list of features. Usually an instance of :class:`AnchorGenerator` box2box_transform (Box2BoxTransform): defines the transform from anchors boxes to instance boxes anchor_matcher (Matcher): label the anchors by matching them with ground truth. num_classes (int): number of classes. Used to label background proposals. # Loss parameters: focal_loss_alpha (float): focal_loss_alpha focal_loss_gamma (float): focal_loss_gamma smooth_l1_beta (float): smooth_l1_beta box_reg_loss_type (str): Options are "smooth_l1", "giou", "diou", "ciou" # Inference parameters: test_score_thresh (float): Inference cls score threshold, only anchors with score > INFERENCE_TH are considered for inference (to improve speed) test_topk_candidates (int): Select topk candidates before NMS test_nms_thresh (float): Overlap threshold used for non-maximum suppression (suppress boxes with IoU >= this threshold) max_detections_per_image (int): Maximum number of detections to return per image during inference (100 is based on the limit established for the COCO dataset). pixel_mean, pixel_std: see :class:`DenseDetector`. """ super().__init__( backbone, head, head_in_features, pixel_mean=pixel_mean, pixel_std=pixel_std ) self.num_classes = num_classes # Anchors self.anchor_generator = anchor_generator self.box2box_transform = box2box_transform self.anchor_matcher = anchor_matcher # Loss parameters: self.focal_loss_alpha = focal_loss_alpha self.focal_loss_gamma = focal_loss_gamma self.smooth_l1_beta = smooth_l1_beta self.box_reg_loss_type = box_reg_loss_type # Inference parameters: self.test_score_thresh = test_score_thresh self.test_topk_candidates = test_topk_candidates self.test_nms_thresh = test_nms_thresh self.max_detections_per_image = max_detections_per_image # Vis parameters self.vis_period = vis_period self.input_format = input_format @classmethod def from_config(cls, cfg): backbone = build_backbone(cfg) backbone_shape = backbone.output_shape() feature_shapes = [backbone_shape[f] for f in cfg.MODEL.RETINANET.IN_FEATURES] head = RetinaNetHead(cfg, feature_shapes) anchor_generator = build_anchor_generator(cfg, feature_shapes) return { "backbone": backbone, "head": head, "anchor_generator": anchor_generator, "box2box_transform": Box2BoxTransform(weights=cfg.MODEL.RETINANET.BBOX_REG_WEIGHTS), "anchor_matcher": Matcher( cfg.MODEL.RETINANET.IOU_THRESHOLDS, cfg.MODEL.RETINANET.IOU_LABELS, allow_low_quality_matches=True, ), "pixel_mean": cfg.MODEL.PIXEL_MEAN, "pixel_std": cfg.MODEL.PIXEL_STD, "num_classes": cfg.MODEL.RETINANET.NUM_CLASSES, "head_in_features": cfg.MODEL.RETINANET.IN_FEATURES, # Loss parameters: "focal_loss_alpha": cfg.MODEL.RETINANET.FOCAL_LOSS_ALPHA, "focal_loss_gamma": cfg.MODEL.RETINANET.FOCAL_LOSS_GAMMA, "smooth_l1_beta": cfg.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA, "box_reg_loss_type": cfg.MODEL.RETINANET.BBOX_REG_LOSS_TYPE, # Inference parameters: "test_score_thresh": cfg.MODEL.RETINANET.SCORE_THRESH_TEST, "test_topk_candidates": cfg.MODEL.RETINANET.TOPK_CANDIDATES_TEST, "test_nms_thresh": cfg.MODEL.RETINANET.NMS_THRESH_TEST, "max_detections_per_image": cfg.TEST.DETECTIONS_PER_IMAGE, # Vis parameters "vis_period": cfg.VIS_PERIOD, "input_format": cfg.INPUT.FORMAT, } def forward_training(self, images, features, predictions, gt_instances): # Transpose the Hi*Wi*A dimension to the middle: pred_logits, pred_anchor_deltas = self._transpose_dense_predictions( predictions, [self.num_classes, 4] ) anchors = self.anchor_generator(features) gt_labels, gt_boxes = self.label_anchors(anchors, gt_instances) return self.losses(anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes) def losses(self, anchors, pred_logits, gt_labels, pred_anchor_deltas, gt_boxes): """ Args: anchors (list[Boxes]): a list of #feature level Boxes gt_labels, gt_boxes: see output of :meth:`RetinaNet.label_anchors`. Their shapes are (N, R) and (N, R, 4), respectively, where R is the total number of anchors across levels, i.e. sum(Hi x Wi x Ai) pred_logits, pred_anchor_deltas: both are list[Tensor]. Each element in the list corresponds to one level and has shape (N, Hi * Wi * Ai, K or 4). Where K is the number of classes used in `pred_logits`. Returns: dict[str, Tensor]: mapping from a named loss to a scalar tensor storing the loss. Used during training only. The dict keys are: "loss_cls" and "loss_box_reg" """ num_images = len(gt_labels) gt_labels = torch.stack(gt_labels) # (N, R) valid_mask = gt_labels >= 0 pos_mask = (gt_labels >= 0) & (gt_labels != self.num_classes) num_pos_anchors = pos_mask.sum().item() get_event_storage().put_scalar("num_pos_anchors", num_pos_anchors / num_images) normalizer = self._ema_update("loss_normalizer", max(num_pos_anchors, 1), 100) # classification and regression loss gt_labels_target = F.one_hot(gt_labels[valid_mask], num_classes=self.num_classes + 1)[ :, :-1 ] # no loss for the last (background) class loss_cls = sigmoid_focal_loss_jit( cat(pred_logits, dim=1)[valid_mask], gt_labels_target.to(pred_logits[0].dtype), alpha=self.focal_loss_alpha, gamma=self.focal_loss_gamma, reduction="sum", ) loss_box_reg = _dense_box_regression_loss( anchors, self.box2box_transform, pred_anchor_deltas, gt_boxes, pos_mask, box_reg_loss_type=self.box_reg_loss_type, smooth_l1_beta=self.smooth_l1_beta, ) return { "loss_cls": loss_cls / normalizer, "loss_box_reg": loss_box_reg / normalizer, } @torch.no_grad() def label_anchors(self, anchors, gt_instances): """ Args: anchors (list[Boxes]): A list of #feature level Boxes. The Boxes contains anchors of this image on the specific feature level. gt_instances (list[Instances]): a list of N `Instances`s. The i-th `Instances` contains the ground-truth per-instance annotations for the i-th input image. Returns: list[Tensor]: List of #img tensors. i-th element is a vector of labels whose length is the total number of anchors across all feature maps (sum(Hi * Wi * A)). Label values are in {-1, 0, ..., K}, with -1 means ignore, and K means background. list[Tensor]: i-th element is a Rx4 tensor, where R is the total number of anchors across feature maps. The values are the matched gt boxes for each anchor. Values are undefined for those anchors not labeled as foreground. """
anchors = Boxes.cat(anchors) # Rx4
6
2023-12-05 02:51:53+00:00
24k
DiffusionLight/DiffusionLight
relighting/inpainter.py
[ { "identifier": "CustomStableDiffusionControlNetInpaintPipeline", "path": "relighting/pipeline.py", "snippet": "class CustomStableDiffusionControlNetInpaintPipeline(StableDiffusionControlNetInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n control_image: PipelineImageInput = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 1.0,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n controlnet_conditioning_scale: Union[float, List[float]] = 0.5,\n guess_mode: bool = False,\n control_guidance_start: Union[float, List[float]] = 0.0,\n control_guidance_end: Union[float, List[float]] = 1.0,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionControlNetInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionControlNetInpaintPipeline)\n\n controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet\n\n # align format for control guidance\n if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):\n control_guidance_start = len(control_guidance_end) * [control_guidance_start]\n elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):\n control_guidance_end = len(control_guidance_start) * [control_guidance_end]\n elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):\n mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1\n control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [\n control_guidance_end\n ]\n\n # 1. Check inputs. Raise error if not correct\n self.check_inputs(\n prompt,\n control_image,\n height,\n width,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n controlnet_conditioning_scale,\n control_guidance_start,\n control_guidance_end,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):\n controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)\n\n global_pool_conditions = (\n controlnet.config.global_pool_conditions\n if isinstance(controlnet, ControlNetModel)\n else controlnet.nets[0].config.global_pool_conditions\n )\n guess_mode = guess_mode or global_pool_conditions\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n\n # 4. Prepare image\n if isinstance(controlnet, ControlNetModel):\n control_image = self.prepare_control_image(\n image=control_image,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n elif isinstance(controlnet, MultiControlNetModel):\n control_images = []\n\n for control_image_ in control_image:\n control_image_ = self.prepare_control_image(\n image=control_image_,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n\n control_images.append(control_image_)\n\n control_image = control_images\n else:\n assert False\n\n # 4. Preprocess mask and image - resizes image and mask w.r.t height and width\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n mask = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n masked_image = init_image * (mask < 0.5)\n _, _, height, width = init_image.shape\n\n # 5. Prepare timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps=num_inference_steps, strength=strength, device=device\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n # EDITED HERE\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask, masked_image_latents = self.prepare_mask_latents(\n mask,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 7.1 Create tensor stating which controlnets to keep\n controlnet_keep = []\n for i in range(len(timesteps)):\n keeps = [\n 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)\n for s, e in zip(control_guidance_start, control_guidance_end)\n ]\n controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)\n\n # 8. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n # controlnet(s) inference\n if guess_mode and do_classifier_free_guidance:\n # Infer ControlNet only for the conditional batch.\n control_model_input = latents\n control_model_input = self.scheduler.scale_model_input(control_model_input, t)\n controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]\n else:\n control_model_input = latent_model_input\n controlnet_prompt_embeds = prompt_embeds\n\n if isinstance(controlnet_keep[i], list):\n cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]\n else:\n controlnet_cond_scale = controlnet_conditioning_scale\n if isinstance(controlnet_cond_scale, list):\n controlnet_cond_scale = controlnet_cond_scale[0]\n cond_scale = controlnet_cond_scale * controlnet_keep[i]\n\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n control_model_input,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=control_image,\n conditioning_scale=cond_scale,\n guess_mode=guess_mode,\n return_dict=False,\n )\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n # predict the noise residual\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n # If we do sequential model offloading, let's offload unet and controlnet\n # manually for max memory savings\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.unet.to(\"cpu\")\n self.controlnet.to(\"cpu\")\n torch.cuda.empty_cache()\n\n if not output_type == \"latent\":\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n # Offload all models\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "CustomStableDiffusionInpaintPipeline", "path": "relighting/pipeline_inpaintonly.py", "snippet": "class CustomStableDiffusionInpaintPipeline(StableDiffusionInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n masked_image_latents: torch.FloatTensor = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 1.0,\n num_inference_steps: int = 50,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionInpaintPipeline)\n\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 1. Check inputs\n self.check_inputs(\n prompt,\n height,\n width,\n strength,\n callback_steps,\n negative_prompt,\n prompt_embeds,\n negative_prompt_embeds,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n prompt_embeds, negative_prompt_embeds = self.encode_prompt(\n prompt,\n device,\n num_images_per_prompt,\n do_classifier_free_guidance,\n negative_prompt,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n # For classifier free guidance, we need to do two forward passes.\n # Here we concatenate the unconditional and text embeddings into a single batch\n # to avoid doing two forward passes\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])\n\n # 4. set timesteps\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps=num_inference_steps, strength=strength, device=device\n )\n # check that number of inference steps is not < 1 - as this doesn't make sense\n if num_inference_steps < 1:\n raise ValueError(\n f\"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline\"\n f\"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.\"\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 5. Preprocess mask and image\n\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask_condition = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n if masked_image_latents is None:\n masked_image = init_image * (mask_condition < 0.5)\n else:\n masked_image = masked_image_latents\n\n mask, masked_image_latents = self.prepare_mask_latents(\n mask_condition,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 8. Check that sizes of mask, masked image and latents match\n if num_channels_unet == 9:\n # default case for runwayml/stable-diffusion-inpainting\n num_channels_mask = mask.shape[1]\n num_channels_masked_image = masked_image_latents.shape[1]\n if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:\n raise ValueError(\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\n f\" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}\"\n f\" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of\"\n \" `pipeline.unet` or your `mask_image` or `image` input.\"\n )\n elif num_channels_unet != 4:\n raise ValueError(\n f\"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.\"\n )\n\n # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 10. Denoising loop\n num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if not output_type == \"latent\":\n condition_kwargs = {}\n if isinstance(self.vae, AsymmetricAutoencoderKL):\n init_image = init_image.to(device=device, dtype=masked_image_latents.dtype)\n init_image_condition = init_image.clone()\n init_image = self._encode_vae_image(init_image, generator=generator)\n mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype)\n condition_kwargs = {\"image\": init_image_condition, \"mask\": mask_condition}\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, **condition_kwargs)[0]\n image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)\n else:\n image = latents\n has_nsfw_concept = None\n\n if has_nsfw_concept is None:\n do_denormalize = [True] * image.shape[0]\n else:\n do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]\n\n image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)\n\n # Offload all models\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return (image, has_nsfw_concept)\n\n return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)" }, { "identifier": "CustomStableDiffusionXLInpaintPipeline", "path": "relighting/pipeline_inpaintonly.py", "snippet": "class CustomStableDiffusionXLInpaintPipeline(StableDiffusionXLInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n prompt_2: Optional[Union[str, List[str]]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n masked_image_latents: torch.FloatTensor = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 0.9999,\n num_inference_steps: int = 50,\n denoising_start: Optional[float] = None,\n denoising_end: Optional[float] = None,\n guidance_scale: float = 7.5,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n negative_prompt_2: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n guidance_rescale: float = 0.0,\n original_size: Tuple[int, int] = None,\n crops_coords_top_left: Tuple[int, int] = (0, 0),\n target_size: Tuple[int, int] = None,\n negative_original_size: Optional[Tuple[int, int]] = None,\n negative_crops_coords_top_left: Tuple[int, int] = (0, 0),\n negative_target_size: Optional[Tuple[int, int]] = None,\n aesthetic_score: float = 6.0,\n negative_aesthetic_score: float = 2.5,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionXLInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionXLInpaintPipeline)\n\n # 0. Default height and width to unet\n height = height or self.unet.config.sample_size * self.vae_scale_factor\n width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 1. Check inputs\n self.check_inputs(\n prompt,\n prompt_2,\n height,\n width,\n strength,\n callback_steps,\n negative_prompt,\n negative_prompt_2,\n prompt_embeds,\n negative_prompt_embeds,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = self.encode_prompt(\n prompt=prompt,\n prompt_2=prompt_2,\n device=device,\n num_images_per_prompt=num_images_per_prompt,\n do_classifier_free_guidance=do_classifier_free_guidance,\n negative_prompt=negative_prompt,\n negative_prompt_2=negative_prompt_2,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n pooled_prompt_embeds=pooled_prompt_embeds,\n negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n\n # 4. set timesteps\n def denoising_value_valid(dnv):\n return isinstance(denoising_end, float) and 0 < dnv < 1\n\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid else None\n )\n # check that number of inference steps is not < 1 - as this doesn't make sense\n if num_inference_steps < 1:\n raise ValueError(\n f\"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline\"\n f\"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.\"\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 5. Preprocess mask and image\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n mask = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n if masked_image_latents is not None:\n masked_image = masked_image_latents\n elif init_image.shape[1] == 4:\n # if images are in latent space, we can't mask it\n masked_image = None\n else:\n masked_image = init_image * (mask < 0.5)\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n # add_noise = True if denoising_start is None else False\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask, masked_image_latents = self.prepare_mask_latents(\n mask,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 8. Check that sizes of mask, masked image and latents match\n if num_channels_unet == 9:\n # default case for runwayml/stable-diffusion-inpainting\n num_channels_mask = mask.shape[1]\n num_channels_masked_image = masked_image_latents.shape[1]\n if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:\n raise ValueError(\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\n f\" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}\"\n f\" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of\"\n \" `pipeline.unet` or your `mask_image` or `image` input.\"\n )\n elif num_channels_unet != 4:\n raise ValueError(\n f\"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.\"\n )\n # 8.1 Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n height, width = latents.shape[-2:]\n height = height * self.vae_scale_factor\n width = width * self.vae_scale_factor\n\n original_size = original_size or (height, width)\n target_size = target_size or (height, width)\n\n # 10. Prepare added time ids & embeddings\n if negative_original_size is None:\n negative_original_size = original_size\n if negative_target_size is None:\n negative_target_size = target_size\n\n add_text_embeds = pooled_prompt_embeds\n add_time_ids, add_neg_time_ids = self._get_add_time_ids(\n original_size,\n crops_coords_top_left,\n target_size,\n aesthetic_score,\n negative_aesthetic_score,\n negative_original_size,\n negative_crops_coords_top_left,\n negative_target_size,\n dtype=prompt_embeds.dtype,\n )\n add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)\n add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)\n add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)\n\n prompt_embeds = prompt_embeds.to(device)\n add_text_embeds = add_text_embeds.to(device)\n add_time_ids = add_time_ids.to(device)\n\n # 11. Denoising loop\n num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)\n\n if (\n denoising_end is not None\n and denoising_start is not None\n and denoising_value_valid(denoising_end)\n and denoising_value_valid(denoising_start)\n and denoising_start >= denoising_end\n ):\n raise ValueError(\n f\"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: \"\n + f\" {denoising_end} when using type float.\"\n )\n elif denoising_end is not None and denoising_value_valid(denoising_end):\n discrete_timestep_cutoff = int(\n round(\n self.scheduler.config.num_train_timesteps\n - (denoising_end * self.scheduler.config.num_train_timesteps)\n )\n )\n num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))\n timesteps = timesteps[:num_inference_steps]\n\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # predict the noise residual\n added_cond_kwargs = {\"text_embeds\": add_text_embeds, \"time_ids\": add_time_ids}\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n added_cond_kwargs=added_cond_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n if do_classifier_free_guidance and guidance_rescale > 0.0:\n # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n if not output_type == \"latent\":\n # make sure the VAE is in float32 mode, as it overflows in float16\n needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast\n\n if needs_upcasting:\n self.upcast_vae()\n latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)\n\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n\n # cast back to fp16 if needed\n if needs_upcasting:\n self.vae.to(dtype=torch.float16)\n else:\n return StableDiffusionXLPipelineOutput(images=latents)\n\n # apply watermark if available\n if self.watermark is not None:\n image = self.watermark.apply_watermark(image)\n\n image = self.image_processor.postprocess(image, output_type=output_type)\n\n # Offload all models\n self.maybe_free_model_hooks()\n\n if not return_dict:\n return (image,)\n\n return StableDiffusionXLPipelineOutput(images=image)" }, { "identifier": "SAMPLERS", "path": "relighting/argument.py", "snippet": "SAMPLERS = {\n \"ddim\": DDIMScheduler,\n \"ddpm\": DDPMScheduler,\n \"unipc\": UniPCMultistepScheduler,\n}" }, { "identifier": "VAE_MODELS", "path": "relighting/argument.py", "snippet": "VAE_MODELS = {\n \"sdxl\": \"madebyollin/sdxl-vae-fp16-fix\",\n \"sdxl_fast\": \"madebyollin/sdxl-vae-fp16-fix\",\n}" }, { "identifier": "DEPTH_ESTIMATOR", "path": "relighting/argument.py", "snippet": "DEPTH_ESTIMATOR = \"Intel/dpt-hybrid-midas\"" }, { "identifier": "get_control_signal_type", "path": "relighting/argument.py", "snippet": "def get_control_signal_type(controlnet):\n if \"normal\" in controlnet:\n return \"normal\"\n elif \"depth\" in controlnet:\n return \"depth\"\n else:\n raise NotImplementedError" }, { "identifier": "estimate_scene_depth", "path": "relighting/image_processor.py", "snippet": "def estimate_scene_depth(image, depth_estimator):\n #image = feature_extractor(images=image, return_tensors=\"pt\").pixel_values.to(\"cuda\")\n #with torch.no_grad(), torch.autocast(\"cuda\"):\n # depth_map = depth_estimator(image).predicted_depth\n\n depth_map = depth_estimator(image)['predicted_depth']\n W, H = image.size\n depth_map = torch.nn.functional.interpolate(\n depth_map.unsqueeze(1),\n size=(H, W),\n mode=\"bicubic\",\n align_corners=False,\n )\n depth_min = torch.amin(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_max = torch.amax(depth_map, dim=[1, 2, 3], keepdim=True)\n depth_map = (depth_map - depth_min) / (depth_max - depth_min)\n image = torch.cat([depth_map] * 3, dim=1)\n\n image = image.permute(0, 2, 3, 1).cpu().numpy()[0]\n image = Image.fromarray((image * 255.0).clip(0, 255).astype(np.uint8))\n return image" }, { "identifier": "estimate_scene_normal", "path": "relighting/image_processor.py", "snippet": "def estimate_scene_normal(image, depth_estimator):\n # can be improve speed do not going back and float between numpy and torch\n normal_image = depth_estimator(image)['predicted_depth'][0]\n\n normal_image = normal_image.numpy()\n\n # upsizing image depth to match input\n hw = np.array(image).shape[:2]\n normal_image = skimage.transform.resize(normal_image, hw, preserve_range=True)\n\n image_depth = normal_image.copy()\n image_depth -= np.min(image_depth)\n image_depth /= np.max(image_depth)\n \n bg_threhold = 0.4\n\n x = cv2.Sobel(normal_image, cv2.CV_32F, 1, 0, ksize=3)\n x[image_depth < bg_threhold] = 0\n\n y = cv2.Sobel(normal_image, cv2.CV_32F, 0, 1, ksize=3)\n y[image_depth < bg_threhold] = 0\n\n z = np.ones_like(x) * np.pi * 2.0\n\n normal_image = np.stack([x, y, z], axis=2)\n normal_image /= np.sum(normal_image ** 2.0, axis=2, keepdims=True) ** 0.5\n\n # rescale back to image size\n return normal_image" }, { "identifier": "merge_normal_map", "path": "relighting/image_processor.py", "snippet": "def merge_normal_map(normal_map, normal_ball, mask_ball, x, y):\n \"\"\"\n Merge a ball to normal map using mask\n @params\n normal_amp (np.array) - normal map of the scene [height, width, 3]\n normal_ball (np.array) - normal map of the ball [ball_height, ball_width, 3]\n mask_ball (np.array) - mask of the ball [ball_height, ball_width]\n x (int) - x position of the ball (top-left)\n y (int) - y position of the ball (top-left)\n @return\n normal_mapthe merge normal map [height, width, 3] \n \"\"\"\n result = normal_map.copy()\n\n mask_ball = mask_ball[..., None]\n ball = (normal_ball * mask_ball) # alpha blending the ball\n unball = (normal_map[y:y+normal_ball.shape[0], x:x+normal_ball.shape[1]] * (1 - mask_ball)) # alpha blending the normal map\n result[y:y+normal_ball.shape[0], x:x+normal_ball.shape[1]] = ball+unball # add them together\n return result" }, { "identifier": "fill_depth_circular", "path": "relighting/image_processor.py", "snippet": "def fill_depth_circular(depth_image, x, y, r):\n depth_image = np.array(depth_image)\n\n for i in range(depth_image.shape[0]):\n for j in range(depth_image.shape[1]):\n xy = (i - x - r//2)**2 + (j - y - r//2)**2\n # if xy <= rr**2:\n # depth_image[j, i, :] = 255\n # depth_image[j, i, :] = int(minv + (maxv - minv) * z)\n if xy <= (r // 2)**2:\n depth_image[j, i, :] = 255\n \n depth_image = Image.fromarray(depth_image)\n return depth_image" }, { "identifier": "get_ideal_normal_ball", "path": "relighting/ball_processor.py", "snippet": "def get_ideal_normal_ball(size, flip_x=True):\n \"\"\"\n Generate normal ball for specific size \n Normal map is x \"left\", y up, z into the screen \n (we flip X to match sobel operator)\n @params\n - size (int) - single value of height and width\n @return:\n - normal_map (np.array) - normal map [size, size, 3]\n - mask (np.array) - mask that make a valid normal map [size,size]\n \"\"\"\n # we flip x to match sobel operator\n x = torch.linspace(1, -1, size)\n y = torch.linspace(1, -1, size)\n x = x.flip(dims=(-1,)) if not flip_x else x\n\n y, x = torch.meshgrid(y, x)\n z = (1 - x**2 - y**2)\n mask = z >= 0\n\n # clean up invalid value outsize the mask\n x = x * mask\n y = y * mask\n z = z * mask\n \n # get real z value\n z = torch.sqrt(z)\n \n # clean up normal map value outside mask \n normal_map = torch.cat([x[..., None], y[..., None], z[..., None]], dim=-1)\n normal_map = normal_map.numpy()\n mask = mask.numpy()\n return normal_map, mask" }, { "identifier": "crop_ball", "path": "relighting/ball_processor.py", "snippet": "def crop_ball(image, mask_ball, x, y, size, apply_mask=True, bg_color = (0, 0, 0)):\n if isinstance(image, Image.Image):\n result = np.array(image)\n else:\n result = image.copy()\n \n result = result[y:y+size, x:x+size]\n if apply_mask:\n result[~mask_ball] = bg_color\n return result" }, { "identifier": "CustomStableDiffusionXLControlNetInpaintPipeline", "path": "relighting/pipeline_xl.py", "snippet": "class CustomStableDiffusionXLControlNetInpaintPipeline(StableDiffusionXLControlNetInpaintPipeline):\n @torch.no_grad()\n def __call__(\n self,\n prompt: Union[str, List[str]] = None,\n prompt_2: Optional[Union[str, List[str]]] = None,\n image: PipelineImageInput = None,\n mask_image: PipelineImageInput = None,\n control_image: Union[\n PipelineImageInput,\n List[PipelineImageInput],\n ] = None,\n height: Optional[int] = None,\n width: Optional[int] = None,\n strength: float = 0.9999,\n num_inference_steps: int = 50,\n denoising_start: Optional[float] = None,\n denoising_end: Optional[float] = None,\n guidance_scale: float = 5.0,\n negative_prompt: Optional[Union[str, List[str]]] = None,\n negative_prompt_2: Optional[Union[str, List[str]]] = None,\n num_images_per_prompt: Optional[int] = 1,\n eta: float = 0.0,\n generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,\n latents: Optional[torch.FloatTensor] = None,\n prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_prompt_embeds: Optional[torch.FloatTensor] = None,\n pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,\n output_type: Optional[str] = \"pil\",\n return_dict: bool = True,\n callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,\n callback_steps: int = 1,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n controlnet_conditioning_scale: Union[float, List[float]] = 1.0,\n guess_mode: bool = False,\n control_guidance_start: Union[float, List[float]] = 0.0,\n control_guidance_end: Union[float, List[float]] = 1.0,\n guidance_rescale: float = 0.0,\n original_size: Tuple[int, int] = None,\n crops_coords_top_left: Tuple[int, int] = (0, 0),\n target_size: Tuple[int, int] = None,\n aesthetic_score: float = 6.0,\n negative_aesthetic_score: float = 2.5,\n newx: int = 0,\n newy: int = 0,\n newr: int = 256,\n current_seed=0,\n use_noise_moving=True,\n ):\n # OVERWRITE METHODS\n self.prepare_mask_latents = custom_prepare_mask_latents.__get__(self, CustomStableDiffusionXLControlNetInpaintPipeline)\n self.prepare_latents = custom_prepare_latents.__get__(self, CustomStableDiffusionXLControlNetInpaintPipeline)\n\n controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet\n\n # align format for control guidance\n if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):\n control_guidance_start = len(control_guidance_end) * [control_guidance_start]\n elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):\n control_guidance_end = len(control_guidance_start) * [control_guidance_end]\n elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):\n mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1\n control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [\n control_guidance_end\n ]\n\n # # 0.0 Default height and width to unet\n # height = height or self.unet.config.sample_size * self.vae_scale_factor\n # width = width or self.unet.config.sample_size * self.vae_scale_factor\n\n # 0.1 align format for control guidance\n if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):\n control_guidance_start = len(control_guidance_end) * [control_guidance_start]\n elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):\n control_guidance_end = len(control_guidance_start) * [control_guidance_end]\n elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):\n mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1\n control_guidance_start, control_guidance_end = mult * [control_guidance_start], mult * [\n control_guidance_end\n ]\n\n # 1. Check inputs\n self.check_inputs(\n prompt,\n prompt_2,\n control_image,\n strength,\n num_inference_steps,\n callback_steps,\n negative_prompt,\n negative_prompt_2,\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n controlnet_conditioning_scale,\n control_guidance_start,\n control_guidance_end,\n )\n\n # 2. Define call parameters\n if prompt is not None and isinstance(prompt, str):\n batch_size = 1\n elif prompt is not None and isinstance(prompt, list):\n batch_size = len(prompt)\n else:\n batch_size = prompt_embeds.shape[0]\n\n device = self._execution_device\n # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)\n # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`\n # corresponds to doing no classifier free guidance.\n do_classifier_free_guidance = guidance_scale > 1.0\n\n if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):\n controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)\n\n # 3. Encode input prompt\n text_encoder_lora_scale = (\n cross_attention_kwargs.get(\"scale\", None) if cross_attention_kwargs is not None else None\n )\n\n (\n prompt_embeds,\n negative_prompt_embeds,\n pooled_prompt_embeds,\n negative_pooled_prompt_embeds,\n ) = self.encode_prompt(\n prompt=prompt,\n prompt_2=prompt_2,\n device=device,\n num_images_per_prompt=num_images_per_prompt,\n do_classifier_free_guidance=do_classifier_free_guidance,\n negative_prompt=negative_prompt,\n negative_prompt_2=negative_prompt_2,\n prompt_embeds=prompt_embeds,\n negative_prompt_embeds=negative_prompt_embeds,\n pooled_prompt_embeds=pooled_prompt_embeds,\n negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,\n lora_scale=text_encoder_lora_scale,\n )\n\n # 4. set timesteps\n def denoising_value_valid(dnv):\n return isinstance(denoising_end, float) and 0 < dnv < 1\n\n self.scheduler.set_timesteps(num_inference_steps, device=device)\n timesteps, num_inference_steps = self.get_timesteps(\n num_inference_steps, strength, device, denoising_start=denoising_start if denoising_value_valid else None\n )\n # check that number of inference steps is not < 1 - as this doesn't make sense\n if num_inference_steps < 1:\n raise ValueError(\n f\"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline\"\n f\"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline.\"\n )\n # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)\n latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)\n # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise\n is_strength_max = strength == 1.0\n\n # 5. Preprocess mask and image - resizes image and mask w.r.t height and width\n # 5.1 Prepare init image\n init_image = self.image_processor.preprocess(image, height=height, width=width)\n init_image = init_image.to(dtype=torch.float32)\n\n # 5.2 Prepare control images\n if isinstance(controlnet, ControlNetModel):\n control_image = self.prepare_control_image(\n image=control_image,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n elif isinstance(controlnet, MultiControlNetModel):\n control_images = []\n\n for control_image_ in control_image:\n control_image_ = self.prepare_control_image(\n image=control_image_,\n width=width,\n height=height,\n batch_size=batch_size * num_images_per_prompt,\n num_images_per_prompt=num_images_per_prompt,\n device=device,\n dtype=controlnet.dtype,\n do_classifier_free_guidance=do_classifier_free_guidance,\n guess_mode=guess_mode,\n )\n\n control_images.append(control_image_)\n\n control_image = control_images\n else:\n raise ValueError(f\"{controlnet.__class__} is not supported.\")\n\n # 5.3 Prepare mask\n mask = self.mask_processor.preprocess(mask_image, height=height, width=width)\n\n masked_image = init_image * (mask < 0.5)\n _, _, height, width = init_image.shape\n\n # 6. Prepare latent variables\n num_channels_latents = self.vae.config.latent_channels\n num_channels_unet = self.unet.config.in_channels\n return_image_latents = num_channels_unet == 4\n\n add_noise = True if denoising_start is None else False\n latents_outputs = self.prepare_latents(\n batch_size * num_images_per_prompt,\n num_channels_latents,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n latents,\n image=init_image,\n timestep=latent_timestep,\n is_strength_max=is_strength_max,\n return_noise=True,\n return_image_latents=return_image_latents,\n newx=newx,\n newy=newy,\n newr=newr,\n current_seed=current_seed,\n use_noise_moving=use_noise_moving,\n )\n\n if return_image_latents:\n latents, noise, image_latents = latents_outputs\n else:\n latents, noise = latents_outputs\n\n # 7. Prepare mask latent variables\n mask, masked_image_latents = self.prepare_mask_latents(\n mask,\n masked_image,\n batch_size * num_images_per_prompt,\n height,\n width,\n prompt_embeds.dtype,\n device,\n generator,\n do_classifier_free_guidance,\n )\n\n # 8. Check that sizes of mask, masked image and latents match\n if num_channels_unet == 9:\n # default case for runwayml/stable-diffusion-inpainting\n num_channels_mask = mask.shape[1]\n num_channels_masked_image = masked_image_latents.shape[1]\n if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:\n raise ValueError(\n f\"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects\"\n f\" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +\"\n f\" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}\"\n f\" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of\"\n \" `pipeline.unet` or your `mask_image` or `image` input.\"\n )\n elif num_channels_unet != 4:\n raise ValueError(\n f\"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}.\"\n )\n # 8.1 Prepare extra step kwargs.\n extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)\n\n # 8.2 Create tensor stating which controlnets to keep\n controlnet_keep = []\n for i in range(len(timesteps)):\n keeps = [\n 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)\n for s, e in zip(control_guidance_start, control_guidance_end)\n ]\n if isinstance(self.controlnet, MultiControlNetModel):\n controlnet_keep.append(keeps)\n else:\n controlnet_keep.append(keeps[0])\n\n # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline\n height, width = latents.shape[-2:]\n height = height * self.vae_scale_factor\n width = width * self.vae_scale_factor\n\n original_size = original_size or (height, width)\n target_size = target_size or (height, width)\n\n # 10. Prepare added time ids & embeddings\n add_text_embeds = pooled_prompt_embeds\n add_time_ids, add_neg_time_ids = self._get_add_time_ids(\n original_size,\n crops_coords_top_left,\n target_size,\n aesthetic_score,\n negative_aesthetic_score,\n dtype=prompt_embeds.dtype,\n )\n add_time_ids = add_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n\n if do_classifier_free_guidance:\n prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)\n add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0)\n add_neg_time_ids = add_neg_time_ids.repeat(batch_size * num_images_per_prompt, 1)\n add_time_ids = torch.cat([add_neg_time_ids, add_time_ids], dim=0)\n\n prompt_embeds = prompt_embeds.to(device)\n add_text_embeds = add_text_embeds.to(device)\n add_time_ids = add_time_ids.to(device)\n\n # 11. Denoising loop\n num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)\n\n if (\n denoising_end is not None\n and denoising_start is not None\n and denoising_value_valid(denoising_end)\n and denoising_value_valid(denoising_start)\n and denoising_start >= denoising_end\n ):\n raise ValueError(\n f\"`denoising_start`: {denoising_start} cannot be larger than or equal to `denoising_end`: \"\n + f\" {denoising_end} when using type float.\"\n )\n elif denoising_end is not None and denoising_value_valid(denoising_end):\n discrete_timestep_cutoff = int(\n round(\n self.scheduler.config.num_train_timesteps\n - (denoising_end * self.scheduler.config.num_train_timesteps)\n )\n )\n num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps)))\n timesteps = timesteps[:num_inference_steps]\n\n with self.progress_bar(total=num_inference_steps) as progress_bar:\n for i, t in enumerate(timesteps):\n # expand the latents if we are doing classifier free guidance\n latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents\n\n # concat latents, mask, masked_image_latents in the channel dimension\n latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)\n\n added_cond_kwargs = {\"text_embeds\": add_text_embeds, \"time_ids\": add_time_ids}\n\n # controlnet(s) inference\n if guess_mode and do_classifier_free_guidance:\n # Infer ControlNet only for the conditional batch.\n control_model_input = latents\n control_model_input = self.scheduler.scale_model_input(control_model_input, t)\n controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]\n controlnet_added_cond_kwargs = {\n \"text_embeds\": add_text_embeds.chunk(2)[1],\n \"time_ids\": add_time_ids.chunk(2)[1],\n }\n else:\n control_model_input = latent_model_input\n controlnet_prompt_embeds = prompt_embeds\n controlnet_added_cond_kwargs = added_cond_kwargs\n\n if isinstance(controlnet_keep[i], list):\n cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]\n else:\n controlnet_cond_scale = controlnet_conditioning_scale\n if isinstance(controlnet_cond_scale, list):\n controlnet_cond_scale = controlnet_cond_scale[0]\n cond_scale = controlnet_cond_scale * controlnet_keep[i]\n\n # # Resize control_image to match the size of the input to the controlnet\n # if control_image.shape[-2:] != control_model_input.shape[-2:]:\n # control_image = F.interpolate(control_image, size=control_model_input.shape[-2:], mode=\"bilinear\", align_corners=False)\n\n down_block_res_samples, mid_block_res_sample = self.controlnet(\n control_model_input,\n t,\n encoder_hidden_states=controlnet_prompt_embeds,\n controlnet_cond=control_image,\n conditioning_scale=cond_scale,\n guess_mode=guess_mode,\n added_cond_kwargs=controlnet_added_cond_kwargs,\n return_dict=False,\n )\n\n if guess_mode and do_classifier_free_guidance:\n # Infered ControlNet only for the conditional batch.\n # To apply the output of ControlNet to both the unconditional and conditional batches,\n # add 0 to the unconditional batch to keep it unchanged.\n down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]\n mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])\n\n if num_channels_unet == 9:\n latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)\n\n # predict the noise residual\n noise_pred = self.unet(\n latent_model_input,\n t,\n encoder_hidden_states=prompt_embeds,\n cross_attention_kwargs=cross_attention_kwargs,\n down_block_additional_residuals=down_block_res_samples,\n mid_block_additional_residual=mid_block_res_sample,\n added_cond_kwargs=added_cond_kwargs,\n return_dict=False,\n )[0]\n\n # perform guidance\n if do_classifier_free_guidance:\n noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n\n if do_classifier_free_guidance and guidance_rescale > 0.0:\n print(\"rescale: \", guidance_rescale)\n # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf\n noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=guidance_rescale)\n\n # compute the previous noisy sample x_t -> x_t-1\n latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]\n\n if num_channels_unet == 4:\n init_latents_proper = image_latents[:1]\n init_mask = mask[:1]\n\n if i < len(timesteps) - 1:\n noise_timestep = timesteps[i + 1]\n init_latents_proper = self.scheduler.add_noise(\n init_latents_proper, noise, torch.tensor([noise_timestep])\n )\n\n latents = (1 - init_mask) * init_latents_proper + init_mask * latents\n\n # call the callback, if provided\n if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):\n progress_bar.update()\n if callback is not None and i % callback_steps == 0:\n callback(i, t, latents)\n\n # make sure the VAE is in float32 mode, as it overflows in float16\n if self.vae.dtype == torch.float16 and self.vae.config.force_upcast:\n self.upcast_vae()\n latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype)\n\n # If we do sequential model offloading, let's offload unet and controlnet\n # manually for max memory savings\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.unet.to(\"cpu\")\n self.controlnet.to(\"cpu\")\n torch.cuda.empty_cache()\n\n if not output_type == \"latent\":\n image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False)[0]\n else:\n return StableDiffusionXLPipelineOutput(images=latents)\n\n # apply watermark if available\n if self.watermark is not None:\n image = self.watermark.apply_watermark(image)\n\n image = self.image_processor.postprocess(image, output_type=output_type)\n\n # Offload last model to CPU\n if hasattr(self, \"final_offload_hook\") and self.final_offload_hook is not None:\n self.final_offload_hook.offload()\n\n if not return_dict:\n return (image,)\n\n return StableDiffusionXLPipelineOutput(images=image)" } ]
import torch import numpy as np import os import pickle from diffusers import ControlNetModel, AutoencoderKL from PIL import Image from tqdm.auto import tqdm from transformers import pipeline as transformers_pipeline from relighting.pipeline import CustomStableDiffusionControlNetInpaintPipeline from relighting.pipeline_inpaintonly import CustomStableDiffusionInpaintPipeline, CustomStableDiffusionXLInpaintPipeline from relighting.argument import SAMPLERS, VAE_MODELS, DEPTH_ESTIMATOR, get_control_signal_type from relighting.image_processor import ( estimate_scene_depth, estimate_scene_normal, merge_normal_map, fill_depth_circular ) from relighting.ball_processor import get_ideal_normal_ball, crop_ball from relighting.pipeline_xl import CustomStableDiffusionXLControlNetInpaintPipeline
18,556
control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator) normal_image = merge_normal_map(normal_scene, normal_ball, mask_ball, x, y) normal_image = (normal_image * 127.5 + 127.5).clip(0, 255).astype(np.uint8) control_image = Image.fromarray(normal_image) return control_image def __call__(self, *args, **kwargs): process_fn = getattr(self, f"process_{self.sd_arch}_{self.control_signal_type}", None) if process_fn is None: raise ValueError else: return process_fn(*args, **kwargs) class BallInpainter(): def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True): self.pipeline = pipeline self.sd_arch = sd_arch self.control_generator = control_generator self.median = {} if disable_water_mask: self._disable_water_mask() def _disable_water_mask(self): if hasattr(self.pipeline, "watermark"): self.pipeline.watermark = NoWaterMark() print("Disabled watermasking") @classmethod def from_sd(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, offload=False ): if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16) pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, torch_dtype=torch_dtype, ).to(device) control_generator = ControlSignalGenerator("sd", control_signal_type, device=device) else: pipe = CustomStableDiffusionInpaintPipeline.from_pretrained( model, torch_dtype=torch_dtype, ).to(device) control_generator = None try: if torch_dtype==torch.float16 and device != torch.device("cpu"): pipe.enable_xformers_memory_efficient_attention() except: pass pipe.set_progress_bar_config(disable=True) pipe.scheduler = SAMPLERS[sampler].from_config(pipe.scheduler.config) return BallInpainter(pipe, "sd", control_generator, disable_water_mask) @classmethod def from_sdxl(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, use_fixed_vae=True, offload=False ): vae = VAE_MODELS["sdxl"] vae = AutoencoderKL.from_pretrained(vae, torch_dtype=torch_dtype).to(device) if use_fixed_vae else None extra_kwargs = {"vae": vae} if vae is not None else {} if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained( controlnet, variant="fp16" if torch_dtype == torch.float16 else None, use_safetensors=True, torch_dtype=torch_dtype, ).to(device) pipe = CustomStableDiffusionXLControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, variant="fp16" if torch_dtype == torch.float16 else None, use_safetensors=True, torch_dtype=torch_dtype, **extra_kwargs, ).to(device) control_generator = ControlSignalGenerator("sdxl", control_signal_type, device=device) else:
class NoWaterMark: def apply_watermark(self, *args, **kwargs): return args[0] class ControlSignalGenerator(): def __init__(self, sd_arch, control_signal_type, device): self.sd_arch = sd_arch self.control_signal_type = control_signal_type self.device = device def process_sd_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", device=self.device.index) control_image = self.depth_estimator(input_image)['depth'] control_image = np.array(control_image) control_image = control_image[:, :, None] control_image = np.concatenate([control_image, control_image, control_image], axis=2) control_image = Image.fromarray(control_image) control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sdxl_depth(self, input_image, normal_ball=None, mask_ball=None, x=None, y=None, r=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) control_image = estimate_scene_depth(input_image, depth_estimator=self.depth_estimator) xs = [x] if not isinstance(x, list) else x ys = [y] if not isinstance(y, list) else y rs = [r] if not isinstance(r, list) else r for x, y, r in zip(xs, ys, rs): #print(f"depth at {x}, {y}, {r}") control_image = fill_depth_circular(control_image, x, y, r) return control_image def process_sd_normal(self, input_image, normal_ball, mask_ball, x, y, r=None, normal_ball_path=None): if getattr(self, 'depth_estimator', None) is None: self.depth_estimator = transformers_pipeline("depth-estimation", model=DEPTH_ESTIMATOR, device=self.device.index) normal_scene = estimate_scene_normal(input_image, depth_estimator=self.depth_estimator) normal_image = merge_normal_map(normal_scene, normal_ball, mask_ball, x, y) normal_image = (normal_image * 127.5 + 127.5).clip(0, 255).astype(np.uint8) control_image = Image.fromarray(normal_image) return control_image def __call__(self, *args, **kwargs): process_fn = getattr(self, f"process_{self.sd_arch}_{self.control_signal_type}", None) if process_fn is None: raise ValueError else: return process_fn(*args, **kwargs) class BallInpainter(): def __init__(self, pipeline, sd_arch, control_generator, disable_water_mask=True): self.pipeline = pipeline self.sd_arch = sd_arch self.control_generator = control_generator self.median = {} if disable_water_mask: self._disable_water_mask() def _disable_water_mask(self): if hasattr(self.pipeline, "watermark"): self.pipeline.watermark = NoWaterMark() print("Disabled watermasking") @classmethod def from_sd(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, offload=False ): if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained(controlnet, torch_dtype=torch.float16) pipe = CustomStableDiffusionControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, torch_dtype=torch_dtype, ).to(device) control_generator = ControlSignalGenerator("sd", control_signal_type, device=device) else: pipe = CustomStableDiffusionInpaintPipeline.from_pretrained( model, torch_dtype=torch_dtype, ).to(device) control_generator = None try: if torch_dtype==torch.float16 and device != torch.device("cpu"): pipe.enable_xformers_memory_efficient_attention() except: pass pipe.set_progress_bar_config(disable=True) pipe.scheduler = SAMPLERS[sampler].from_config(pipe.scheduler.config) return BallInpainter(pipe, "sd", control_generator, disable_water_mask) @classmethod def from_sdxl(cls, model, controlnet=None, device=0, sampler="unipc", torch_dtype=torch.float16, disable_water_mask=True, use_fixed_vae=True, offload=False ): vae = VAE_MODELS["sdxl"] vae = AutoencoderKL.from_pretrained(vae, torch_dtype=torch_dtype).to(device) if use_fixed_vae else None extra_kwargs = {"vae": vae} if vae is not None else {} if controlnet is not None: control_signal_type = get_control_signal_type(controlnet) controlnet = ControlNetModel.from_pretrained( controlnet, variant="fp16" if torch_dtype == torch.float16 else None, use_safetensors=True, torch_dtype=torch_dtype, ).to(device) pipe = CustomStableDiffusionXLControlNetInpaintPipeline.from_pretrained( model, controlnet=controlnet, variant="fp16" if torch_dtype == torch.float16 else None, use_safetensors=True, torch_dtype=torch_dtype, **extra_kwargs, ).to(device) control_generator = ControlSignalGenerator("sdxl", control_signal_type, device=device) else:
pipe = CustomStableDiffusionXLInpaintPipeline.from_pretrained(
2
2023-12-07 14:03:31+00:00
24k
modelscope/normal-depth-diffusion
ldm/models/diffusion/mv_ddpm.py
[ { "identifier": "AutoencoderKL", "path": "ldm/models/autoencoder.py", "snippet": "class AutoencoderKL(pl.LightningModule):\n\n def __init__(self,\n ddconfig,\n lossconfig,\n embed_dim,\n ckpt_path=None,\n ignore_keys=[],\n image_key='image',\n colorize_nlabels=None,\n monitor=None,\n prior_model=None,\n prior_normal=None,\n using_rgb=True):\n super().__init__()\n self.image_key = image_key\n self.encoder = Encoder(**ddconfig)\n self.decoder = Decoder(**ddconfig)\n self.loss = instantiate_from_config(lossconfig)\n self.prior_model = prior_model\n self.using_rgb = using_rgb\n\n assert ddconfig['double_z']\n self.quant_conv = torch.nn.Conv2d(2 * ddconfig['z_channels'],\n 2 * embed_dim, 1)\n self.post_quant_conv = torch.nn.Conv2d(embed_dim,\n ddconfig['z_channels'], 1)\n self.embed_dim = embed_dim\n if colorize_nlabels is not None:\n assert type(colorize_nlabels) == int\n self.register_buffer('colorize',\n torch.randn(3, colorize_nlabels, 1, 1))\n if monitor is not None:\n self.monitor = monitor\n\n if prior_model is not None:\n self.prior_model = instantiate_from_config(prior_model)\n if prior_normal is not None:\n self.prior_normal = instantiate_from_config(prior_normal)\n\n if ckpt_path is not None:\n self.init_from_ckpt(ckpt_path, ignore_keys=ignore_keys)\n\n def init_from_ckpt(self, path, ignore_keys=list()):\n try:\n sd = torch.load(path, map_location='cpu')['state_dict']\n except:\n sd = torch.load(path, map_location='cpu')\n\n keys = list(sd.keys())\n for k in keys:\n for ik in ignore_keys:\n if k.startswith(ik):\n print('Deleting key {} from state_dict.'.format(k))\n del sd[k]\n m, u = self.load_state_dict(sd, strict=False)\n if len(m) > 0:\n print('missing keys:')\n print(m)\n if len(u) > 0:\n print('unexpected keys:')\n print(u)\n\n print(f'Restored from {path}')\n\n def encode(self, x):\n h = self.encoder(x)\n moments = self.quant_conv(h)\n posterior = DiagonalGaussianDistribution(moments)\n return posterior\n\n def decode(self, z):\n z = self.post_quant_conv(z)\n dec = self.decoder(z)\n return dec\n\n def prior_to_eval(self):\n\n if self.prior_model is not None:\n self.prior_model.eval()\n\n if self.prior_normal is not None:\n self.prior_normal.eval()\n\n @torch.no_grad()\n def prior_inference(self, inputs, prior_inputs):\n # depth prior model\n # midas or zoe is 384 model\n prior_results = {}\n\n self.prior_to_eval()\n\n model_prior_results = self.prior_model(prior_inputs)\n prior_results.update(model_prior_results)\n\n # using normal map\n if not self.using_rgb:\n normal_prior = self.prior_normal(prior_inputs)\n prior_results.update(normal_prior)\n\n resize_prior_results = {}\n _, __, h, w = inputs.shape\n\n for key in prior_results.keys():\n resize_prior_results[key] = F.interpolate(\n prior_results[key], (w, h), mode='bilinear')\n\n if self.using_rgb:\n return torch.cat([inputs, resize_prior_results['depth']], dim=1)\n else:\n return torch.cat([\n resize_prior_results['normal'], resize_prior_results['depth']\n ],\n dim=1)\n\n def forward(self, input, sample_posterior=True):\n\n posterior = self.encode(input)\n if sample_posterior:\n z = posterior.sample()\n else:\n z = posterior.mode()\n dec = self.decode(z)\n return dec, posterior\n\n def get_input(self, batch, k):\n x = batch[k]\n if len(x.shape) == 3:\n x = x[..., None]\n x = x.permute(0, 3, 1,\n 2).to(memory_format=torch.contiguous_format).float()\n return x\n\n def training_step(self, batch, batch_idx, optimizer_idx):\n\n inputs = self.get_input(batch, self.image_key)\n if self.prior_model is not None:\n inputs = self.prior_inference(inputs, batch['prior'])\n\n reconstructions, posterior = self(inputs)\n\n if optimizer_idx == 0:\n # train encoder+decoder+logvar\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='train')\n\n self.log(\n 'rec_loss',\n log_dict_ae['train/rec_loss'],\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n self.log(\n 'aeloss',\n aeloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n self.log_dict(\n log_dict_ae,\n prog_bar=False,\n logger=True,\n on_step=True,\n on_epoch=False)\n return aeloss\n\n if optimizer_idx == 1:\n # train the discriminator\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n optimizer_idx,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='train')\n\n self.log(\n 'discloss',\n discloss,\n prog_bar=True,\n logger=True,\n on_step=True,\n on_epoch=True)\n self.log_dict(\n log_dict_disc,\n prog_bar=False,\n logger=True,\n on_step=True,\n on_epoch=False)\n return discloss\n\n def validation_step(self, batch, batch_idx):\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n aeloss, log_dict_ae = self.loss(\n inputs,\n reconstructions,\n posterior,\n 0,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='val')\n\n discloss, log_dict_disc = self.loss(\n inputs,\n reconstructions,\n posterior,\n 1,\n self.global_step,\n last_layer=self.get_last_layer(),\n split='val')\n\n self.log('val/rec_loss', log_dict_ae['val/rec_loss'])\n self.log_dict(log_dict_ae)\n self.log_dict(log_dict_disc)\n return self.log_dict\n\n @torch.no_grad()\n def test_step(self, batch, batch_idx):\n pass\n\n @torch.no_grad()\n def sample_imgs(self, batch):\n '''using to test for sampling image\n\n '''\n inputs = self.get_input(batch, self.image_key)\n reconstructions, posterior = self(inputs)\n\n return {'samples': reconstructions}\n\n def configure_optimizers(self):\n lr = self.learning_rate\n opt_ae = torch.optim.Adam(\n list(self.encoder.parameters()) + list(self.decoder.parameters())\n + list(self.quant_conv.parameters())\n + list(self.post_quant_conv.parameters()),\n lr=lr,\n betas=(0.5, 0.9))\n opt_disc = torch.optim.Adam(\n self.loss.discriminator.parameters(), lr=lr, betas=(0.5, 0.9))\n\n return [opt_ae, opt_disc], []\n\n def get_last_layer(self):\n return self.decoder.conv_out.weight\n\n @torch.no_grad()\n def log_images(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n xrec = repeat(xrec[:, 0, ...], 'b h w -> b c h w', c=3)\n\n if x.shape[1] > 3:\n # colorize with random projection\n assert xrec.shape[1] > 3\n x = self.to_rgb(x)\n xrec = self.to_rgb(xrec)\n samples = self.decode(torch.randn_like(posterior.sample()))\n samples = repeat(samples[:, 0, ...], 'b h w -> b c h w', c=3)\n log['samples'] = samples\n\n log['reconstructions'] = xrec\n log['inputs'] = x\n return log\n\n @torch.no_grad()\n def log_rgbd(self, batch, only_inputs=False, **kwargs):\n log = dict()\n x = self.get_input(batch, self.image_key)\n\n if x.shape[1] == 3:\n if self.prior_model is not None:\n x = self.prior_inference(x, batch['prior'])\n\n x = x.to(self.device)\n if not only_inputs:\n xrec, posterior = self(x)\n samples = self.decode(torch.randn_like(posterior.sample()))\n log['samples'] = samples\n log['reconstructions'] = xrec\n log['inputs'] = x\n return log\n\n def to_rgb(self, x):\n assert self.image_key == 'segmentation'\n if not hasattr(self, 'colorize'):\n self.register_buffer('colorize',\n torch.randn(3, x.shape[1], 1, 1).to(x))\n x = F.conv2d(x, weight=self.colorize)\n x = 2. * (x - x.min()) / (x.max() - x.min()) - 1.\n return x" }, { "identifier": "IdentityFirstStage", "path": "ldm/models/autoencoder.py", "snippet": "class IdentityFirstStage(torch.nn.Module):\n\n def __init__(self, *args, vq_interface=False, **kwargs):\n self.vq_interface = vq_interface # TODO: Should be true by default but check to not break older stuff\n super().__init__()\n\n def encode(self, x, *args, **kwargs):\n return x\n\n def decode(self, x, *args, **kwargs):\n return x\n\n def quantize(self, x, *args, **kwargs):\n if self.vq_interface:\n return x, None, [None, None, None]\n return x\n\n def forward(self, x, *args, **kwargs):\n return x" }, { "identifier": "VQModelInterface", "path": "ldm/models/autoencoder.py", "snippet": "class VQModelInterface(VQModel):\n\n def __init__(self, embed_dim, *args, **kwargs):\n super().__init__(embed_dim=embed_dim, *args, **kwargs)\n self.embed_dim = embed_dim\n\n def encode(self, x):\n h = self.encoder(x)\n h = self.quant_conv(h)\n return h\n\n def decode(self, h, force_not_quantize=False):\n # also go through quantization layer\n if not force_not_quantize:\n quant, emb_loss, info = self.quantize(h)\n else:\n quant = h\n quant = self.post_quant_conv(quant)\n dec = self.decoder(quant)\n return dec" }, { "identifier": "DDIMSampler", "path": "ldm/models/diffusion/ddim.py", "snippet": "class DDIMSampler(object):\n\n def __init__(self, model, schedule='linear', **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n def make_schedule(self,\n ddim_num_steps,\n ddim_discretize='uniform',\n ddim_eta=0.,\n verbose=True):\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[\n 0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model\n .device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev',\n to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod',\n to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod',\n to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod',\n to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas',\n np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *\n (1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps',\n sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n samples, intermediates = self.ddim_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n return samples, intermediates\n\n @torch.no_grad()\n def ddim_sampling(self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n **kwargs):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = reversed(range(\n 0, timesteps)) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[\n 0]\n\n iterator = tqdm(time_range, desc='DDIM Sampler', total=total_steps)\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b, ), step, device=device, dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_ddim(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n img, pred_x0 = outs\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_ddim(self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n dynamic_threshold=None,\n **kwargs):\n b, *_, device = *x.shape, x.device\n\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n model_output = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n if isinstance(c, dict):\n assert isinstance(unconditional_conditioning, dict)\n c_in = dict()\n for k in c:\n if isinstance(c[k], list):\n c_in[k] = [\n torch.cat(\n [unconditional_conditioning[k][i], c[k][i]])\n for i in range(len(c[k]))\n ]\n elif isinstance(c[k], torch.Tensor):\n c_in[k] = torch.cat(\n [unconditional_conditioning[k], c[k]])\n else:\n assert c[k] == unconditional_conditioning[k]\n c_in[k] = c[k]\n elif isinstance(c, list):\n c_in = list()\n assert isinstance(unconditional_conditioning, list)\n for i in range(len(c)):\n c_in.append(\n torch.cat([unconditional_conditioning[i], c[i]]))\n else:\n c_in = torch.cat([unconditional_conditioning, c])\n model_uncond, model_t = self.model.apply_model(x_in, t_in,\n c_in).chunk(2)\n # model_t = self.model.apply_model(x, t, c, **kwargs)\n # model_uncond = self.model.apply_model(x, t, unconditional_conditioning, **kwargs)\n model_output = model_uncond + unconditional_guidance_scale * (\n model_t - model_uncond)\n\n if self.model.parameterization == 'v':\n print('using v!')\n e_t = self.model.predict_eps_from_z_and_v(x, t, model_output)\n else:\n e_t = model_output\n\n if score_corrector is not None:\n assert self.model.parameterization == 'eps', 'not implemented'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c,\n **corrector_kwargs)\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1), alphas_prev[index], device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1),\n sqrt_one_minus_alphas[index],\n device=device)\n\n # current prediction for x_0\n if self.model.parameterization != 'v':\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n else:\n pred_x0 = self.model.predict_start_from_z_and_v(x, t, model_output)\n\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n\n if dynamic_threshold is not None:\n raise NotImplementedError()\n\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device,\n repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n @torch.no_grad()\n def stochastic_encode(self, x0, t, use_original_steps=False, noise=None):\n # fast, but does not allow for exact reconstruction\n # t serves as an index to gather the correct alphas\n if use_original_steps:\n sqrt_alphas_cumprod = self.sqrt_alphas_cumprod\n sqrt_one_minus_alphas_cumprod = self.sqrt_one_minus_alphas_cumprod\n else:\n sqrt_alphas_cumprod = torch.sqrt(self.ddim_alphas)\n sqrt_one_minus_alphas_cumprod = self.ddim_sqrt_one_minus_alphas\n\n if noise is None:\n noise = torch.randn_like(x0)\n return (\n extract_into_tensor(sqrt_alphas_cumprod, t, x0.shape) * x0\n + extract_into_tensor(sqrt_one_minus_alphas_cumprod, t, x0.shape)\n * noise)\n\n @torch.no_grad()\n def decode(self,\n x_latent,\n cond,\n t_start,\n unconditional_guidance_scale=1.0,\n unconditional_conditioning=None,\n use_original_steps=False,\n **kwargs):\n\n timesteps = np.arange(self.ddpm_num_timesteps\n ) if use_original_steps else self.ddim_timesteps\n timesteps = timesteps[:t_start]\n\n time_range = np.flip(timesteps)\n total_steps = timesteps.shape[0]\n\n iterator = tqdm(time_range, desc='Decoding image', total=total_steps)\n x_dec = x_latent\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((x_latent.shape[0], ),\n step,\n device=x_latent.device,\n dtype=torch.long)\n x_dec, _ = self.p_sample_ddim(\n x_dec,\n cond,\n ts,\n index=index,\n use_original_steps=use_original_steps,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n **kwargs)\n return x_dec" }, { "identifier": "DPMSolverSampler", "path": "ldm/models/diffusion/dpm_solver/sampler.py", "snippet": "class DPMSolverSampler(object):\n\n def __init__(self, model, **kwargs):\n super().__init__()\n self.model = model\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(model.\n device)\n self.register_buffer('alphas_cumprod', to_torch(model.alphas_cumprod))\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n\n # print(f'Data shape for DPM-Solver sampling is {size}, sampling steps {S}')\n\n device = self.model.betas.device\n if x_T is None:\n img = torch.randn(size, device=device)\n else:\n img = x_T\n\n ns = NoiseScheduleVP('discrete', alphas_cumprod=self.alphas_cumprod)\n\n model_fn = model_wrapper(\n lambda x, t, c: self.model.apply_model(x, t, c),\n ns,\n model_type='noise',\n guidance_type='classifier-free',\n condition=conditioning,\n unconditional_condition=unconditional_conditioning,\n guidance_scale=unconditional_guidance_scale,\n )\n\n dpm_solver = DPM_Solver(\n model_fn, ns, predict_x0=True, thresholding=False)\n x = dpm_solver.sample(\n img,\n steps=S,\n skip_type='time_uniform',\n method='multistep',\n order=2,\n lower_order_final=True)\n\n return x.to(device), None" }, { "identifier": "PLMSSampler", "path": "ldm/models/diffusion/plms.py", "snippet": "class PLMSSampler(object):\n\n def __init__(self, model, schedule='linear', **kwargs):\n super().__init__()\n self.model = model\n self.ddpm_num_timesteps = model.num_timesteps\n self.schedule = schedule\n\n def register_buffer(self, name, attr):\n if type(attr) == torch.Tensor:\n if attr.device != torch.device('cuda'):\n attr = attr.to(torch.device('cuda'))\n setattr(self, name, attr)\n\n def make_schedule(self,\n ddim_num_steps,\n ddim_discretize='uniform',\n ddim_eta=0.,\n verbose=True):\n if ddim_eta != 0:\n raise ValueError('ddim_eta must be 0 for PLMS')\n self.ddim_timesteps = make_ddim_timesteps(\n ddim_discr_method=ddim_discretize,\n num_ddim_timesteps=ddim_num_steps,\n num_ddpm_timesteps=self.ddpm_num_timesteps,\n verbose=verbose)\n alphas_cumprod = self.model.alphas_cumprod\n assert alphas_cumprod.shape[\n 0] == self.ddpm_num_timesteps, 'alphas have to be defined for each timestep'\n to_torch = lambda x: x.clone().detach().to(torch.float32).to(self.model\n .device)\n\n self.register_buffer('betas', to_torch(self.model.betas))\n self.register_buffer('alphas_cumprod', to_torch(alphas_cumprod))\n self.register_buffer('alphas_cumprod_prev',\n to_torch(self.model.alphas_cumprod_prev))\n\n # calculations for diffusion q(x_t | x_{t-1}) and others\n self.register_buffer('sqrt_alphas_cumprod',\n to_torch(np.sqrt(alphas_cumprod.cpu())))\n self.register_buffer('sqrt_one_minus_alphas_cumprod',\n to_torch(np.sqrt(1. - alphas_cumprod.cpu())))\n self.register_buffer('log_one_minus_alphas_cumprod',\n to_torch(np.log(1. - alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recip_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu())))\n self.register_buffer('sqrt_recipm1_alphas_cumprod',\n to_torch(np.sqrt(1. / alphas_cumprod.cpu() - 1)))\n\n # ddim sampling parameters\n ddim_sigmas, ddim_alphas, ddim_alphas_prev = make_ddim_sampling_parameters(\n alphacums=alphas_cumprod.cpu(),\n ddim_timesteps=self.ddim_timesteps,\n eta=ddim_eta,\n verbose=verbose)\n self.register_buffer('ddim_sigmas', ddim_sigmas)\n self.register_buffer('ddim_alphas', ddim_alphas)\n self.register_buffer('ddim_alphas_prev', ddim_alphas_prev)\n self.register_buffer('ddim_sqrt_one_minus_alphas',\n np.sqrt(1. - ddim_alphas))\n sigmas_for_original_sampling_steps = ddim_eta * torch.sqrt(\n (1 - self.alphas_cumprod_prev) / (1 - self.alphas_cumprod) *\n (1 - self.alphas_cumprod / self.alphas_cumprod_prev))\n self.register_buffer('ddim_sigmas_for_original_num_steps',\n sigmas_for_original_sampling_steps)\n\n @torch.no_grad()\n def sample(\n self,\n S,\n batch_size,\n shape,\n conditioning=None,\n callback=None,\n normals_sequence=None,\n img_callback=None,\n quantize_x0=False,\n eta=0.,\n mask=None,\n x0=None,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n verbose=True,\n x_T=None,\n log_every_t=100,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n # this has to come in the same format as the conditioning, # e.g. as encoded tokens, ...\n **kwargs):\n if conditioning is not None:\n if isinstance(conditioning, dict):\n cbs = conditioning[list(conditioning.keys())[0]].shape[0]\n if cbs != batch_size:\n print(\n f'Warning: Got {cbs} conditionings but batch-size is {batch_size}'\n )\n else:\n if conditioning.shape[0] != batch_size:\n print(\n f'Warning: Got {conditioning.shape[0]} conditionings but batch-size is {batch_size}'\n )\n\n self.make_schedule(ddim_num_steps=S, ddim_eta=eta, verbose=verbose)\n # sampling\n C, H, W = shape\n size = (batch_size, C, H, W)\n print(f'Data shape for PLMS sampling is {size}')\n\n samples, intermediates = self.plms_sampling(\n conditioning,\n size,\n callback=callback,\n img_callback=img_callback,\n quantize_denoised=quantize_x0,\n mask=mask,\n x0=x0,\n ddim_use_original_steps=False,\n noise_dropout=noise_dropout,\n temperature=temperature,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n x_T=x_T,\n log_every_t=log_every_t,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n )\n return samples, intermediates\n\n @torch.no_grad()\n def plms_sampling(\n self,\n cond,\n shape,\n x_T=None,\n ddim_use_original_steps=False,\n callback=None,\n timesteps=None,\n quantize_denoised=False,\n mask=None,\n x0=None,\n img_callback=None,\n log_every_t=100,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n ):\n device = self.model.betas.device\n b = shape[0]\n if x_T is None:\n img = torch.randn(shape, device=device)\n else:\n img = x_T\n\n if timesteps is None:\n timesteps = self.ddpm_num_timesteps if ddim_use_original_steps else self.ddim_timesteps\n elif timesteps is not None and not ddim_use_original_steps:\n subset_end = int(\n min(timesteps / self.ddim_timesteps.shape[0], 1)\n * self.ddim_timesteps.shape[0]) - 1\n timesteps = self.ddim_timesteps[:subset_end]\n\n intermediates = {'x_inter': [img], 'pred_x0': [img]}\n time_range = list(reversed(range(\n 0, timesteps))) if ddim_use_original_steps else np.flip(timesteps)\n total_steps = timesteps if ddim_use_original_steps else timesteps.shape[\n 0]\n print(f'Running PLMS Sampling with {total_steps} timesteps')\n\n iterator = tqdm(time_range, desc='PLMS Sampler', total=total_steps)\n old_eps = []\n\n for i, step in enumerate(iterator):\n index = total_steps - i - 1\n ts = torch.full((b, ), step, device=device, dtype=torch.long)\n ts_next = torch.full((b, ),\n time_range[min(i + 1,\n len(time_range) - 1)],\n device=device,\n dtype=torch.long)\n\n if mask is not None:\n assert x0 is not None\n img_orig = self.model.q_sample(\n x0, ts) # TODO: deterministic forward pass?\n img = img_orig * mask + (1. - mask) * img\n\n outs = self.p_sample_plms(\n img,\n cond,\n ts,\n index=index,\n use_original_steps=ddim_use_original_steps,\n quantize_denoised=quantize_denoised,\n temperature=temperature,\n noise_dropout=noise_dropout,\n score_corrector=score_corrector,\n corrector_kwargs=corrector_kwargs,\n unconditional_guidance_scale=unconditional_guidance_scale,\n unconditional_conditioning=unconditional_conditioning,\n old_eps=old_eps,\n t_next=ts_next)\n img, pred_x0, e_t = outs\n old_eps.append(e_t)\n if len(old_eps) >= 4:\n old_eps.pop(0)\n if callback: callback(i)\n if img_callback: img_callback(pred_x0, i)\n\n if index % log_every_t == 0 or index == total_steps - 1:\n intermediates['x_inter'].append(img)\n intermediates['pred_x0'].append(pred_x0)\n\n return img, intermediates\n\n @torch.no_grad()\n def p_sample_plms(self,\n x,\n c,\n t,\n index,\n repeat_noise=False,\n use_original_steps=False,\n quantize_denoised=False,\n temperature=1.,\n noise_dropout=0.,\n score_corrector=None,\n corrector_kwargs=None,\n unconditional_guidance_scale=1.,\n unconditional_conditioning=None,\n old_eps=None,\n t_next=None):\n b, *_, device = *x.shape, x.device\n\n def get_model_output(x, t):\n if unconditional_conditioning is None or unconditional_guidance_scale == 1.:\n e_t = self.model.apply_model(x, t, c)\n else:\n x_in = torch.cat([x] * 2)\n t_in = torch.cat([t] * 2)\n c_in = torch.cat([unconditional_conditioning, c])\n e_t_uncond, e_t = self.model.apply_model(x_in, t_in,\n c_in).chunk(2)\n e_t = e_t_uncond + unconditional_guidance_scale * (\n e_t - e_t_uncond)\n\n if score_corrector is not None:\n assert self.model.parameterization == 'eps'\n e_t = score_corrector.modify_score(self.model, e_t, x, t, c,\n **corrector_kwargs)\n\n return e_t\n\n alphas = self.model.alphas_cumprod if use_original_steps else self.ddim_alphas\n alphas_prev = self.model.alphas_cumprod_prev if use_original_steps else self.ddim_alphas_prev\n sqrt_one_minus_alphas = self.model.sqrt_one_minus_alphas_cumprod if use_original_steps else self.ddim_sqrt_one_minus_alphas\n sigmas = self.model.ddim_sigmas_for_original_num_steps if use_original_steps else self.ddim_sigmas\n\n def get_x_prev_and_pred_x0(e_t, index):\n # select parameters corresponding to the currently considered timestep\n a_t = torch.full((b, 1, 1, 1), alphas[index], device=device)\n a_prev = torch.full((b, 1, 1, 1),\n alphas_prev[index],\n device=device)\n sigma_t = torch.full((b, 1, 1, 1), sigmas[index], device=device)\n sqrt_one_minus_at = torch.full((b, 1, 1, 1),\n sqrt_one_minus_alphas[index],\n device=device)\n\n # current prediction for x_0\n pred_x0 = (x - sqrt_one_minus_at * e_t) / a_t.sqrt()\n if quantize_denoised:\n pred_x0, _, *_ = self.model.first_stage_model.quantize(pred_x0)\n # direction pointing to x_t\n dir_xt = (1. - a_prev - sigma_t**2).sqrt() * e_t\n noise = sigma_t * noise_like(x.shape, device,\n repeat_noise) * temperature\n if noise_dropout > 0.:\n noise = torch.nn.functional.dropout(noise, p=noise_dropout)\n x_prev = a_prev.sqrt() * pred_x0 + dir_xt + noise\n return x_prev, pred_x0\n\n e_t = get_model_output(x, t)\n if len(old_eps) == 0:\n # Pseudo Improved Euler (2nd order)\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t, index)\n e_t_next = get_model_output(x_prev, t_next)\n e_t_prime = (e_t + e_t_next) / 2\n elif len(old_eps) == 1:\n # 2nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (3 * e_t - old_eps[-1]) / 2\n elif len(old_eps) == 2:\n # 3nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (23 * e_t - 16 * old_eps[-1] + 5 * old_eps[-2]) / 12\n elif len(old_eps) >= 3:\n # 4nd order Pseudo Linear Multistep (Adams-Bashforth)\n e_t_prime = (55 * e_t - 59 * old_eps[-1] + 37 * old_eps[-2]\n - 9 * old_eps[-3]) / 24\n\n x_prev, pred_x0 = get_x_prev_and_pred_x0(e_t_prime, index)\n\n return x_prev, pred_x0, e_t" }, { "identifier": "CrossAttention", "path": "ldm/modules/attention.py", "snippet": "class CrossAttention(nn.Module):\n\n def __init__(self,\n query_dim,\n context_dim=None,\n heads=8,\n dim_head=64,\n dropout=0.):\n super().__init__()\n inner_dim = dim_head * heads\n context_dim = default(context_dim, query_dim)\n\n self.scale = dim_head**-0.5\n self.heads = heads\n\n self.to_q = nn.Linear(query_dim, inner_dim, bias=False)\n self.to_k = nn.Linear(context_dim, inner_dim, bias=False)\n self.to_v = nn.Linear(context_dim, inner_dim, bias=False)\n\n self.to_out = nn.Sequential(\n nn.Linear(inner_dim, query_dim), nn.Dropout(dropout))\n\n def forward(self, x, context=None, mask=None):\n h = self.heads\n\n q = self.to_q(x)\n context = default(context, x)\n k = self.to_k(context)\n v = self.to_v(context)\n\n q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h),\n (q, k, v))\n\n sim = einsum('b i d, b j d -> b i j', q, k) * self.scale\n\n if exists(mask):\n mask = rearrange(mask, 'b ... -> b (...)')\n max_neg_value = -torch.finfo(sim.dtype).max\n mask = repeat(mask, 'b j -> (b h) () j', h=h)\n sim.masked_fill_(~mask, max_neg_value)\n\n # attention, what we cannot get enough of\n attn = sim.softmax(dim=-1)\n\n out = einsum('b i j, b j d -> b i d', attn, v)\n out = rearrange(out, '(b h) n d -> b n (h d)', h=h)\n return self.to_out(out)" }, { "identifier": "extract_into_tensor", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def extract_into_tensor(a, t, x_shape):\n b, *_ = t.shape\n out = a.gather(-1, t)\n return out.reshape(b, *((1, ) * (len(x_shape) - 1)))" }, { "identifier": "make_beta_schedule", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def make_beta_schedule(schedule,\n n_timestep,\n linear_start=1e-4,\n linear_end=2e-2,\n cosine_s=8e-3):\n if schedule == 'linear':\n betas = (\n torch.linspace(\n linear_start**0.5,\n linear_end**0.5,\n n_timestep,\n dtype=torch.float64)**2)\n\n elif schedule == 'cosine':\n timesteps = (\n torch.arange(n_timestep + 1, dtype=torch.float64) / n_timestep\n + cosine_s)\n alphas = timesteps / (1 + cosine_s) * np.pi / 2\n alphas = torch.cos(alphas).pow(2)\n alphas = alphas / alphas[0]\n betas = 1 - alphas[1:] / alphas[:-1]\n betas = np.clip(betas, a_min=0, a_max=0.999)\n\n elif schedule == 'sqrt_linear':\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64)\n elif schedule == 'sqrt':\n betas = torch.linspace(\n linear_start, linear_end, n_timestep, dtype=torch.float64)**0.5\n else:\n raise ValueError(f\"schedule '{schedule}' unknown.\")\n return betas.numpy()" }, { "identifier": "noise_like", "path": "ldm/modules/diffusionmodules/util.py", "snippet": "def noise_like(shape, device, repeat=False):\n repeat_noise = lambda: torch.randn((1, *shape[1:]), device=device).repeat(\n shape[0], *((1, ) * (len(shape) - 1)))\n noise = lambda: torch.randn(shape, device=device)\n return repeat_noise() if repeat else noise()" }, { "identifier": "DiagonalGaussianDistribution", "path": "ldm/modules/distributions/distributions.py", "snippet": "class DiagonalGaussianDistribution(object):\n\n def __init__(self, parameters, deterministic=False):\n self.parameters = parameters\n self.mean, self.logvar = torch.chunk(parameters, 2, dim=1)\n self.logvar = torch.clamp(self.logvar, -30.0, 20.0)\n self.deterministic = deterministic\n self.std = torch.exp(0.5 * self.logvar)\n self.var = torch.exp(self.logvar)\n if self.deterministic:\n self.var = self.std = torch.zeros_like(\n self.mean).to(device=self.parameters.device)\n\n def sample(self):\n x = self.mean + self.std * torch.randn(\n self.mean.shape).to(device=self.parameters.device)\n return x\n\n def kl(self, other=None):\n if self.deterministic:\n return torch.Tensor([0.])\n else:\n if other is None:\n return 0.5 * torch.sum(\n torch.pow(self.mean, 2) + self.var - 1.0 - self.logvar,\n dim=[1, 2, 3])\n else:\n return 0.5 * torch.sum(\n torch.pow(self.mean - other.mean, 2) / other.var\n + self.var / other.var - 1.0 - self.logvar + other.logvar,\n dim=[1, 2, 3])\n\n def nll(self, sample, dims=[1, 2, 3]):\n if self.deterministic:\n return torch.Tensor([0.])\n logtwopi = np.log(2.0 * np.pi)\n return 0.5 * torch.sum(\n logtwopi + self.logvar\n + torch.pow(sample - self.mean, 2) / self.var,\n dim=dims)\n\n def mode(self):\n return self.mean" }, { "identifier": "normal_kl", "path": "ldm/modules/distributions/distributions.py", "snippet": "def normal_kl(mean1, logvar1, mean2, logvar2):\n \"\"\"\n source: https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/losses.py#L12\n Compute the KL divergence between two gaussians.\n Shapes are automatically broadcasted, so batches can be compared to\n scalars, among other use cases.\n \"\"\"\n tensor = None\n for obj in (mean1, logvar1, mean2, logvar2):\n if isinstance(obj, torch.Tensor):\n tensor = obj\n break\n assert tensor is not None, 'at least one argument must be a Tensor'\n\n # Force variances to be Tensors. Broadcasting helps convert scalars to\n # Tensors, but it does not work for torch.exp().\n logvar1, logvar2 = [\n x if isinstance(x, torch.Tensor) else torch.tensor(x).to(tensor)\n for x in (logvar1, logvar2)\n ]\n\n return 0.5 * (-1.0 + logvar2 - logvar1 + torch.exp(logvar1 - logvar2) +\n ((mean1 - mean2)**2) * torch.exp(-logvar2))" }, { "identifier": "LitEma", "path": "ldm/modules/ema.py", "snippet": "class LitEma(nn.Module):\n\n def __init__(self, model, decay=0.9999, use_num_upates=True):\n super().__init__()\n if decay < 0.0 or decay > 1.0:\n raise ValueError('Decay must be between 0 and 1')\n\n self.m_name2s_name = {}\n self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32))\n self.register_buffer(\n 'num_updates',\n torch.tensor(0, dtype=torch.int)\n if use_num_upates else torch.tensor(-1, dtype=torch.int))\n\n for name, p in model.named_parameters():\n if p.requires_grad:\n #remove as '.'-character is not allowed in buffers\n s_name = name.replace('.', '')\n self.m_name2s_name.update({name: s_name})\n self.register_buffer(s_name, p.clone().detach().data)\n\n self.collected_params = []\n\n def forward(self, model):\n decay = self.decay\n\n if self.num_updates >= 0:\n self.num_updates += 1\n decay = min(self.decay, (1 + self.num_updates) /\n (10 + self.num_updates))\n\n one_minus_decay = 1.0 - decay\n\n with torch.no_grad():\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n\n for key in m_param:\n if m_param[key].requires_grad:\n sname = self.m_name2s_name[key]\n shadow_params[sname] = shadow_params[sname].type_as(\n m_param[key])\n shadow_params[sname].sub_(\n one_minus_decay *\n (shadow_params[sname] - m_param[key]))\n else:\n assert not key in self.m_name2s_name\n\n def copy_to(self, model):\n m_param = dict(model.named_parameters())\n shadow_params = dict(self.named_buffers())\n for key in m_param:\n if m_param[key].requires_grad:\n m_param[key].data.copy_(\n shadow_params[self.m_name2s_name[key]].data)\n else:\n assert not key in self.m_name2s_name\n\n def store(self, parameters):\n \"\"\"\n Save the current parameters for restoring later.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n temporarily stored.\n \"\"\"\n self.collected_params = [param.clone() for param in parameters]\n\n def restore(self, parameters):\n \"\"\"\n Restore the parameters stored with the `store` method.\n Useful to validate the model with EMA parameters without affecting the\n original optimization process. Store the parameters before the\n `copy_to` method. After validation (or model saving), use this to\n restore the former parameters.\n Args:\n parameters: Iterable of `torch.nn.Parameter`; the parameters to be\n updated with the stored parameters.\n \"\"\"\n for c_param, param in zip(self.collected_params, parameters):\n param.data.copy_(c_param.data)" }, { "identifier": "count_params", "path": "ldm/util.py", "snippet": "def count_params(model, verbose=False):\n total_params = sum(p.numel() for p in model.parameters())\n if verbose:\n print(\n f'{model.__class__.__name__} has {total_params * 1.e-6:.2f} M params.'\n )\n return total_params" }, { "identifier": "default", "path": "ldm/util.py", "snippet": "def default(val, d):\n if exists(val):\n return val\n return d() if isfunction(d) else d" }, { "identifier": "exists", "path": "ldm/util.py", "snippet": "def exists(x):\n return x is not None" }, { "identifier": "filter_nan_loss", "path": "ldm/util.py", "snippet": "def filter_nan_loss(loss):\n fake_loss = torch.isnan(loss)\n loss = loss[torch.logical_not(fake_loss)]\n\n if loss.shape[0] == 0:\n return loss.sum()\n else:\n return loss" }, { "identifier": "instantiate_from_config", "path": "ldm/util.py", "snippet": "def instantiate_from_config(config):\n if not 'target' in config:\n\n print(config)\n if config == '__is_first_stage__':\n return None\n elif config == '__is_unconditional__':\n return None\n raise KeyError('Expected key `target` to instantiate.')\n return get_obj_from_str(config['target'])(**config.get('params', dict()))" }, { "identifier": "isimage", "path": "ldm/util.py", "snippet": "def isimage(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] == 3 or x.shape[1] == 1)" }, { "identifier": "ismap", "path": "ldm/util.py", "snippet": "def ismap(x):\n if not isinstance(x, torch.Tensor):\n return False\n return (len(x.shape) == 4) and (x.shape[1] > 3)" }, { "identifier": "log_txt_as_img", "path": "ldm/util.py", "snippet": "def log_txt_as_img(wh, xc, size=20):\n # wh a tuple of (width, height)\n # xc a list of captions to plot\n b = len(xc)\n txts = list()\n for bi in range(b):\n txt = Image.new('RGB', wh, color='white')\n draw = ImageDraw.Draw(txt)\n font = ImageFont.truetype('data/DejaVuSans.ttf', size=size)\n nc = int(10 * (wh[0] / 256))\n lines = '\\n'.join(xc[bi][start:start + nc]\n for start in range(0, len(xc[bi]), nc))\n\n try:\n draw.text((0, 0), lines, fill='black', font=font)\n except UnicodeEncodeError:\n print('Cant encode string for logging. Skipping.')\n\n txt = np.array(txt).transpose(2, 0, 1) / 127.5 - 1.0\n txts.append(txt)\n txts = np.stack(txts)\n txts = torch.tensor(txts)\n return txts" }, { "identifier": "mean_flat", "path": "ldm/util.py", "snippet": "def mean_flat(tensor):\n \"\"\"\n https://github.com/openai/guided-diffusion/blob/27c20a8fab9cb472df5d6bdd6c8d11c8f430b924/guided_diffusion/nn.py#L86\n Take the mean over all non-batch dimensions.\n \"\"\"\n return tensor.mean(dim=list(range(1, len(tensor.shape))))" } ]
import pdb import numpy as np import pytorch_lightning as pl import torch import torch.nn as nn import torch.nn.functional as F from contextlib import contextmanager from functools import partial from einops import rearrange, repeat from ldm.models.autoencoder import (AutoencoderKL, IdentityFirstStage, VQModelInterface) from ldm.models.diffusion.ddim import DDIMSampler from ldm.models.diffusion.dpm_solver import DPMSolverSampler from ldm.models.diffusion.plms import PLMSSampler from ldm.modules.attention import CrossAttention from ldm.modules.diffusionmodules.util import (extract_into_tensor, make_beta_schedule, noise_like) from ldm.modules.distributions.distributions import ( DiagonalGaussianDistribution, normal_kl) from ldm.modules.ema import LitEma from ldm.util import (count_params, default, exists, filter_nan_loss, instantiate_from_config, isimage, ismap, log_txt_as_img, mean_flat) from torch.optim.lr_scheduler import LambdaLR from torchvision.utils import make_grid from tqdm import tqdm from pytorch_lightning.utilities.distributed import rank_zero_only from pytorch_lightning.utilities.rank_zero import rank_zero_only
15,149
else: return int(self.start_steps + self.start_steps * upper_bound(self.anneal_global_step, global_step)) class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule='linear', loss_type='l2', ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor='val/loss', use_ema=True, first_stage_key='image', image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization='eps', # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., anneal_t=False, # we find at the begining, smaller t, larger denoise mse loss. anneal_global_step=[], anneal_ratio=0.9, prior_model=None, prior_normal=None, input_keys=['rgb'], ): super().__init__() assert parameterization in [ 'eps', 'x0' ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f'{self.__class__.__name__}: Running in {self.parameterization}-prediction mode' ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.') self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.input_keys = input_keys if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full( fill_value=logvar_init, size=(self.num_timesteps, )) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) ### anneal t function if not anneal_t: self.anneal_func = anneal_identity() else: self.anneal_func = anneal_warmup(anneal_ratio, anneal_global_step, self.num_timesteps) if prior_model is not None: self.prior_model = instantiate_from_config(prior_model) else: self.prior_model = None if prior_normal is not None: self.prior_normal = instantiate_from_config(prior_normal) else: self.prior_normal = None def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
""" wild mixture of https://github.com/lucidrains/denoising-diffusion-pytorch/blob/7706bdfc6f527f58d33f84b7b522e61e6e3164b3/denoising_diffusion_pytorch/denoising_diffusion_pytorch.py https://github.com/openai/improved-diffusion/blob/e94489283bb876ac1477d5dd7709bbbd2d9902ce/improved_diffusion/gaussian_diffusion.py https://github.com/CompVis/taming-transformers -- merci """ try: except: __conditioning_keys__ = { 'concat': 'c_concat', 'crossattn': 'c_crossattn', 'adm': 'y' } def disabled_train(self, mode=True): """Overwrite model.train with this function to make sure train/eval mode does not change anymore.""" return self def uniform_on_device(r1, r2, shape, device): return (r1 - r2) * torch.rand(*shape, device=device) + r2 class anneal_identity(): def __call__(self, x, global_step): return x def upper_bound(arr, key): left = 0 right = len(arr) while left < right: mid = (left + right) >> 1 if arr[mid] < key: left = mid + 1 else: right = mid return left class anneal_warmup(): def __init__(self, anneal_ratio, anneal_global_step, num_steps): self.anneal_ratio = anneal_ratio self.anneal_global_step = anneal_global_step self.steps = num_steps // (len(anneal_global_step) + 1) self.start_steps = self.steps def __call__(self, x, global_step): if (torch.rand(1) > self.anneal_ratio).item(): return x else: return int(self.start_steps + self.start_steps * upper_bound(self.anneal_global_step, global_step)) class DDPM(pl.LightningModule): # classic DDPM with Gaussian diffusion, in image space def __init__( self, unet_config, timesteps=1000, beta_schedule='linear', loss_type='l2', ckpt_path=None, ignore_keys=[], load_only_unet=False, monitor='val/loss', use_ema=True, first_stage_key='image', image_size=256, channels=3, log_every_t=100, clip_denoised=True, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3, given_betas=None, original_elbo_weight=0., v_posterior=0., # weight for choosing posterior variance as sigma = (1-v) * beta_tilde + v * beta l_simple_weight=1., conditioning_key=None, parameterization='eps', # all assuming fixed variance schedules scheduler_config=None, use_positional_encodings=False, learn_logvar=False, logvar_init=0., anneal_t=False, # we find at the begining, smaller t, larger denoise mse loss. anneal_global_step=[], anneal_ratio=0.9, prior_model=None, prior_normal=None, input_keys=['rgb'], ): super().__init__() assert parameterization in [ 'eps', 'x0' ], 'currently only supporting "eps" and "x0"' self.parameterization = parameterization print( f'{self.__class__.__name__}: Running in {self.parameterization}-prediction mode' ) self.cond_stage_model = None self.clip_denoised = clip_denoised self.log_every_t = log_every_t self.first_stage_key = first_stage_key self.image_size = image_size # try conv? self.channels = channels self.use_positional_encodings = use_positional_encodings self.model = DiffusionWrapper(unet_config, conditioning_key) count_params(self.model, verbose=True) self.use_ema = use_ema if self.use_ema: self.model_ema = LitEma(self.model) print(f'Keeping EMAs of {len(list(self.model_ema.buffers()))}.') self.use_scheduler = scheduler_config is not None if self.use_scheduler: self.scheduler_config = scheduler_config self.v_posterior = v_posterior self.original_elbo_weight = original_elbo_weight self.l_simple_weight = l_simple_weight self.input_keys = input_keys if monitor is not None: self.monitor = monitor if ckpt_path is not None: self.init_from_ckpt( ckpt_path, ignore_keys=ignore_keys, only_model=load_only_unet) self.register_schedule( given_betas=given_betas, beta_schedule=beta_schedule, timesteps=timesteps, linear_start=linear_start, linear_end=linear_end, cosine_s=cosine_s) self.loss_type = loss_type self.learn_logvar = learn_logvar self.logvar = torch.full( fill_value=logvar_init, size=(self.num_timesteps, )) if self.learn_logvar: self.logvar = nn.Parameter(self.logvar, requires_grad=True) ### anneal t function if not anneal_t: self.anneal_func = anneal_identity() else: self.anneal_func = anneal_warmup(anneal_ratio, anneal_global_step, self.num_timesteps) if prior_model is not None: self.prior_model = instantiate_from_config(prior_model) else: self.prior_model = None if prior_normal is not None: self.prior_normal = instantiate_from_config(prior_normal) else: self.prior_normal = None def register_schedule(self, given_betas=None, beta_schedule='linear', timesteps=1000, linear_start=1e-4, linear_end=2e-2, cosine_s=8e-3):
if exists(given_betas):
15
2023-12-06 07:29:34+00:00
24k
RobertCsordas/moe_attention
tasks/simple/language_model/transformer_lm_mixin.py
[ { "identifier": "TransformerLanguageModel", "path": "models/transformer_language_model.py", "snippet": "class TransformerLanguageModel(LoggingLayer, torch.nn.Module):\n def __init__(self, voc_size: int, embedding_size: Optional[int], state_size: int, dropout: float,\n tied_embedding: bool, layers: List[torch.nn.Module], n_prev_states: int,\n n_prev_states_test: Optional[int] = None, adaptive_cutoffs: List[int] = [],\n same_length_eval: bool = True, norm_before_output: bool = False,\n p_drop_layer: float = 0.0, use_last_state: bool = False, same_length: bool = False):\n\n super().__init__()\n\n self.embedding = torch.nn.Embedding(voc_size, embedding_size or state_size)\n torch.nn.init.kaiming_normal_(self.embedding.weight, mode=\"fan_in\", nonlinearity=\"linear\")\n\n self.shared_layers = all([la is layers[0] for la in layers])\n\n if embedding_size is None:\n self.embedding_adapter = lambda x: x\n else:\n self.embedding_adapter = torch.nn.Linear(embedding_size, state_size)\n\n self.dropout = torch.nn.Dropout(dropout)\n self.layers = layers\n self.unique_layers = torch.nn.ModuleList(unique_obejcts(layers))\n self.output_adapter = lambda x: x\n self.n_prev_states = n_prev_states\n self.n_prev_states_test = n_prev_states_test or n_prev_states\n self.same_length_eval = same_length_eval\n self.embedding_scale = math.sqrt(state_size)\n self.p_drop_layer = p_drop_layer\n self.use_last_state = use_last_state\n self.same_length = same_length\n self.iter = 0\n\n self.adaptive = bool(adaptive_cutoffs)\n\n out_proj_size = (embedding_size or state_size) if tied_embedding else state_size\n if self.adaptive:\n self.output = framework.layers.CustomAdaptiveLogSoftmaxWithLoss(\n out_proj_size, voc_size, adaptive_cutoffs, div_value=1,\n tied_to=self.embedding if tied_embedding else None)\n else:\n self.output = torch.nn.Linear(out_proj_size, voc_size)\n\n if norm_before_output:\n self.out_norm = torch.nn.LayerNorm(state_size)\n else:\n self.out_norm = lambda x: x\n\n if tied_embedding:\n if not self.adaptive:\n self.output.weight = self.embedding.weight\n if embedding_size is not None:\n self.output_adapter = torch.nn.Linear(state_size, embedding_size)\n\n @staticmethod\n def generate_history_mask(sz: int, device: torch.device) -> torch.Tensor:\n return torch.tril(torch.ones(sz, sz, dtype=torch.bool, device=device), diagonal=-1)\n\n def gen_output(self, x: torch.Tensor, target: Optional[torch.Tensor]) -> torch.Tensor:\n net = self.out_norm(x)\n net = self.output_adapter(net)\n net = self.dropout(net)\n\n if self.adaptive:\n net = self.output(net.transpose(0, 1), target)\n else:\n net = self.output(net.transpose(0, 1))\n\n return net\n\n def forward(self, x: torch.Tensor, target: Optional[torch.Tensor], state) -> Tuple[torch.Tensor, Any]:\n causality_mask = Transformer.generate_square_subsequent_mask(x.shape[0], x.device)\n\n net = self.dropout(self.embedding(x.T.long()))\n net = self.embedding_adapter(net)\n net = net * self.embedding_scale\n\n new_state = []\n features = [net]\n\n n_prev_states = self.n_prev_states if self.training else self.n_prev_states_test\n\n same_length = self.same_length or ((not self.training) and self.same_length_eval)\n if same_length and state is not None:\n causality_mask = [self.generate_history_mask(x.shape[0], x.device)] + \\\n [torch.zeros_like(causality_mask)] * (len(state[0]) - 1) + [causality_mask]\n causality_mask = torch.cat(causality_mask, -1)\n\n\n plot_cossim = (self.iter % 100 == 0 and self.training)\n for li, l in enumerate(self.layers):\n if n_prev_states > 0:\n if li == 0:\n # Pos offset should be constant for all layers\n pos_offset = sum(s.shape[1] for s in state[0]) if state is not None else 0\n\n # Concatenate the new state with the previous states\n li_r = -1 if self.use_last_state else li\n s = (state[li_r] + [net]) if state is not None else [net]\n attend_to = torch.cat(s, 1)\n\n if not self.use_last_state:\n s[-1] = s[-1].detach()\n new_state.append(s[-n_prev_states:])\n else:\n pos_offset = None\n attend_to = None\n\n net_o = l(net, mask=AttentionMask(None, causality_mask), attend_to=attend_to,\n pos_offset=pos_offset)\n\n if plot_cossim:\n features.append(net_o)\n\n with torch.no_grad():\n ndiff = torch.norm(net_o - net, p=2, dim=-1)\n n_in = torch.norm(net, p=2, dim=-1)\n self.log(f\"activation_norm/abs_update_layer_{li}\", ndiff.mean())\n self.log(f\"activation_norm/in_layer_{li}\", n_in.mean())\n self.log(f\"activation_norm/rel_update_layer_{li}\", (ndiff/n_in.clamp(min=torch.finfo(n_in.dtype).eps)).mean())\n\n if self.training and self.p_drop_layer > 0.0:\n net = torch.where(torch.rand_like(net_o[..., 0:1]) < self.p_drop_layer, net, net_o)\n else:\n net = net_o\n\n if self.use_last_state and n_prev_states > 0:\n # If we carry over the last state, save it here\n new_state = [((state[0] if state is not None else []) + [net.detach()])[-n_prev_states:]]\n\n if plot_cossim:\n with torch.no_grad():\n f_sample = [f.view(-1, f.shape[-1])[:1024] for f in features]\n f_sample_all = torch.stack(f_sample, -2)\n scores = framework.utils.cossim(f_sample_all, f_sample_all).mean(0)\n self.log(\"feature_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n outs = F.softmax(self.gen_output(f_sample_all, target).transpose(0, 1), -1)\n scores = framework.utils.cossim(outs, outs).mean(0)\n self.log(\"out_dist_cossim\", framework.visualize.plot.Heatmap(scores, range=(0, 1), textval=False))\n\n real_out = outs[:, -1]\n for i in range(outs.shape[-2] - 1):\n self.log(f\"out_diff_{i}\", (outs[:, i] - real_out).norm(dim=-1, p=1).mean())\n\n del outs\n\n\n del features\n\n net = self.gen_output(net, target)\n self.iter += 1\n\n return net, new_state" }, { "identifier": "task", "path": "tasks/task_db.py", "snippet": "def task(name: Optional[str] = None):\n def wrapper(cls):\n n = TASK_PREFIX + (name or camel_to_snake(cls.__name__))\n assert n not in TASKS, f\"Task {n} already exists\"\n TASKS[n] = cls\n return cls\n return wrapper" }, { "identifier": "args", "path": "tasks/task_db.py", "snippet": "def args(fn):\n global ARGS_REGISTERS\n ARGS_REGISTERS.append(fn)\n return fn" }, { "identifier": "RelativeTransformerEncoderLayer", "path": "layers/transformer/relative_transformer.py", "snippet": "class RelativeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, test_pos_clamp: Optional[int] = None, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, ln_after_attention: bool = True):\n super().__init__()\n self.ln_after_attention = ln_after_attention\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n if ln_after_attention:\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.self_attn(src, attend_to if attend_to is not None else src, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src = self.norm1(src) if self.ln_after_attention else src\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))\n src = src + self.dropout2(src2)\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "PrelnRelativeTransformerEncoderLayer", "path": "layers/transformer/relative_preln_transformer.py", "snippet": "class PrelnRelativeTransformerEncoderLayer(RelativeTransformerEncoderLayer):\n is_preln = True\n\n def __init__(self, d_model, nhead, n_layers: int, dim_feedforward=2048, dropout=0.1,\n activation: ActivationFunction = F.relu, attention_dropout=0, test_pos_clamp: Optional[int] = None,\n drop_expand: bool = True, head_projection_size: Optional[int] = None):\n super().__init__(\n d_model=d_model, nhead=nhead, dim_feedforward=dim_feedforward, dropout=dropout,\n activation=activation, attention_dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n drop_expand=drop_expand, head_projection_size=head_projection_size)\n\n reset_prenorm_params(self, n_layers)\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src)\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n src2 = self.norm2(src)\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n return src" }, { "identifier": "RelativeMoeTransformerEncoderLayer", "path": "layers/transformer/relative_moe_transformer.py", "snippet": "class RelativeMoeTransformerEncoderLayer(LoggingLayer, torch.nn.Module):\n def __init__(self, d_model, nhead, n_experts: int, expert_size: int, n_layers: int,\n dropout=0.1, activation: ActivationFunction = F.relu, attention_dropout=0,\n test_pos_clamp: Optional[int] = None,\n dropout_mode: str = \"none\", selection_mode: str = \"add\",\n perplexity_reg: float = 0.0,\n n_heads: int = 1, norm_keys: bool = False, perplexity_reg_mode: str=\"step\",\n n_random: int = 0, reg_type: str = \"normal\",\n topk_mode: str = \"full\", head_projection_size: Optional[int] = None,\n activation_after_topk: bool = False,\n drop_parallel: bool = True,\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n identical_init: bool = False,\n sel_norm: str = \"none\",\n preln: bool = True, ln_affine: bool = True,\n moe_dropout_factor: float = 1.0,\n drop_expert: float = 0.0, sync_distributed: bool = True,\n modulation_amplitude: float = 0.5, moe_init_scale: float = 1.0,\n moe_att_n_experts: int = 4, moe_att_expert_dropout: Optional[float] = None,\n moe_att_selection_mode: str = \"sigmoid\",\n moe_att_k: Optional[int] = None, moe_att_ppl_reg: Optional[float] = None,\n q_expert: bool = True, k_expert: bool = True, v_expert: bool = True,\n o_expert: bool = True,\n v_projection_size: Optional[int] = None,\n qside_n_experts: Optional[int] = None,\n moe_attention: bool = False, moe_att_variant: str = \"full\",\n moe_att_shared_experts: bool = False,\n moe_att_kq_n_experts: Optional[int] = None, moe_att_separate_kq_sel: bool = False,\n moe_att_norm_init: bool = False, moe_att_same_sel: bool = False, moe_att_norm_retrieval: bool = False,\n rotate_fraction: float = 0.5, rope_base: float = 10000):\n super().__init__()\n self.preln = preln\n self.i = 0\n\n if moe_attention:\n if moe_att_variant == \"full\":\n self.self_attn = FullMoeRelativeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts,\n perplexity_reg=perplexity_reg if moe_att_ppl_reg is None else moe_att_ppl_reg,\n expert_dropout=drop_expert if moe_att_expert_dropout is None else moe_att_expert_dropout,\n selection_mode=moe_att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n moe_k=n_heads if moe_att_k is None else moe_att_k, o_expert=o_expert, qside_n_experts=qside_n_experts,\n v_projection_size=v_projection_size, shared_experts=moe_att_shared_experts,\n kq_n_experts=moe_att_kq_n_experts, separate_kq_sel=moe_att_separate_kq_sel,\n normalize_init=moe_att_norm_init,\n same_sel=moe_att_same_sel, normalize_retrieval=moe_att_norm_retrieval,\n )\n elif moe_att_variant == \"full_rope\":\n self.self_attn = FullMoeRopeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts,\n perplexity_reg=perplexity_reg if moe_att_ppl_reg is None else moe_att_ppl_reg,\n expert_dropout=drop_expert if moe_att_expert_dropout is None else moe_att_expert_dropout,\n selection_mode=moe_att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n moe_k=n_heads if moe_att_k is None else moe_att_k, o_expert=o_expert, qside_n_experts=qside_n_experts,\n v_projection_size=v_projection_size, shared_experts=moe_att_shared_experts,\n kq_n_experts=moe_att_kq_n_experts, separate_kq_sel=moe_att_separate_kq_sel,\n normalize_init=moe_att_norm_init, normalize_retrieval=moe_att_norm_retrieval,\n rotate_fraction=rotate_fraction, rope_base=rope_base,\n )\n else:\n raise ValueError(f\"Unknown attention variant {moe_att_variant}\")\n else:\n self.self_attn = FixedRelativeMultiheadAttention(\n d_model, nhead, dropout=attention_dropout, test_pos_clamp=test_pos_clamp,\n projection_size=head_projection_size)\n\n std_scale = math.sqrt(2.0 / n_layers) if preln else 1.0\n std_scale *= math.sqrt(moe_init_scale)\n\n self.pkm = MoE(\n d_model, n_experts, expert_size, dropout=dropout * moe_dropout_factor, dropout_mode=dropout_mode,\n weight_scale=std_scale, selection_mode=selection_mode,\n perplexity_reg=perplexity_reg, n_heads=n_heads,\n norm_keys=norm_keys, perplexity_reg_mode=perplexity_reg_mode, n_random=n_random,\n reg_type=reg_type, topk_mode=topk_mode,\n activation_after_topk=activation_after_topk,\n activation=activation,\n normalize_expert_sel_init=normalize_expert_sel_init, norm_key_init=norm_key_init,\n norm_value_init=norm_value_init, identical_init=identical_init,\n sel_norm=sel_norm,\n expert_dropout=drop_expert,\n sync_distributed=sync_distributed,\n modulation_amplitude=modulation_amplitude)\n\n self.norm1 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.norm2 = torch.nn.LayerNorm(d_model, elementwise_affine=ln_affine)\n self.dropout = torch.nn.Dropout(dropout)\n\n self.activation = activation\n self.drop_parallel = drop_parallel\n\n if preln:\n reset_prenorm_params(self, n_layers)\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n\n src2 = self.norm1(src) if self.preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask,\n pos_offset=pos_offset)\n src = src + self.dropout(src2)\n\n if self.preln:\n src2 = self.norm2(src)\n else:\n src = src2 = self.norm1(src)\n\n src3 = self.pkm(src2)\n\n src = src + self.dropout(src3)\n if not self.preln:\n src = self.norm2(src)\n return src" }, { "identifier": "FastRopeTransformerEncoderLayer", "path": "layers/transformer/fast_rope_transformer.py", "snippet": "class FastRopeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, preln: bool = False, n_layers: Optional[int] = None,\n rotate_fraction: float = 0.5, rope_base: float = 10000):\n super().__init__()\n self.preln = preln\n self.self_attn = FastRopeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, rotate_fraction=rotate_fraction,\n rope_base=rope_base)\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n\n if preln:\n if n_layers is None:\n raise ValueError(\"n_layers must be specified when using preln\")\n reset_prenorm_params(self, n_layers)\n else:\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src) if self.preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n\n if self.preln:\n src2 = self.norm2(src)\n else:\n src2 = src = self.norm1(src)\n\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n\n if not self.preln:\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "MoeAttentionRelativeTransformerEncoderLayer", "path": "layers/transformer/moe_attention_relative_transformer.py", "snippet": "class MoeAttentionRelativeTransformerEncoderLayer(torch.nn.Module):\n def __init__(self, d_model, nhead, moe_att_n_experts, dim_feedforward=2048, dropout=0.1, activation: ActivationFunction = F.relu,\n attention_dropout=0, drop_expand: bool = True,\n head_projection_size: Optional[int] = None, preln: bool = False, n_layers: Optional[int] = None,\n att_perplexity_reg: float = 0.0, expert_dropout: float = 0.0, att_selection_mode=\"sigmoid\",\n attention_variant=\"moa\", q_expert: bool = True, k_expert: bool = True, v_expert: bool = True,\n o_expert: bool = True, moe_k: int = 2,\n norm_qk_score: bool = False, v_projection_size: Optional[int] = None, same_sel: bool = False,\n qside_n_experts: Optional[int] = None, shared_experts: bool = False,\n kq_n_experts: Optional[int] = None, separate_kq_sel: bool = False,\n cvloss: float = 0.0, switchloss: float = 0.0, zloss: float = 0.0,\n moa_mode: str = \"my\", rotate_fraction: float = 0.5, rope_base: float = 10000,\n moeatt_norm_init: bool = False):\n super().__init__()\n self.is_preln = preln\n if attention_variant not in {\"full\", \"full_rope\"} and (not q_expert):\n raise ValueError(\"q_expert can be disabled only when using qside attention\")\n\n if attention_variant == \"moa\":\n self.self_attn = MoA(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts, perplexity_reg=att_perplexity_reg, expert_dropout=expert_dropout,\n selection_mode=att_selection_mode, mode=moa_mode, cvloss=cvloss, switchloss=switchloss, zloss=zloss\n )\n elif attention_variant == \"full\":\n self.self_attn = FullMoeRelativeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts, perplexity_reg=att_perplexity_reg, expert_dropout=expert_dropout,\n selection_mode=att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n norm_qk_score=norm_qk_score, v_projection_size=v_projection_size, same_sel=same_sel,\n o_expert=o_expert, moe_k=moe_k, qside_n_experts=qside_n_experts,\n shared_experts=shared_experts, kq_n_experts=kq_n_experts, separate_kq_sel=separate_kq_sel,\n normalize_init=moeatt_norm_init\n )\n elif attention_variant == \"full_rope\":\n self.self_attn = FullMoeRopeAttention(\n d_model, nhead, dropout=attention_dropout,\n projection_size=head_projection_size, init_std_scale=math.sqrt(2 / n_layers) if preln else 1.0,\n n_experts=moe_att_n_experts, perplexity_reg=att_perplexity_reg, expert_dropout=expert_dropout,\n selection_mode=att_selection_mode, q_expert=q_expert, k_expert=k_expert, v_expert=v_expert,\n norm_qk_score=norm_qk_score, v_projection_size=v_projection_size, same_sel=same_sel,\n o_expert=o_expert, moe_k=moe_k, qside_n_experts=qside_n_experts,\n shared_experts=shared_experts, kq_n_experts=kq_n_experts, separate_kq_sel=separate_kq_sel,\n rotate_fraction=rotate_fraction, rope_base=rope_base,\n normalize_init=moeatt_norm_init\n )\n else:\n raise ValueError(f\"Unknown attention variant: {attention_variant}\")\n\n self.linear1 = torch.nn.Linear(d_model, dim_feedforward)\n self.dropout = torch.nn.Dropout(dropout) if drop_expand else lambda x: x\n self.linear2 = torch.nn.Linear(dim_feedforward, d_model)\n\n self.norm1 = torch.nn.LayerNorm(d_model)\n self.norm2 = torch.nn.LayerNorm(d_model)\n self.dropout1 = torch.nn.Dropout(dropout)\n self.dropout2 = torch.nn.Dropout(dropout)\n\n self.activation = activation\n\n if preln:\n if n_layers is None:\n raise ValueError(\"n_layers must be specified when using preln\")\n reset_prenorm_params(self, n_layers)\n else:\n self.reset_parameters()\n\n def forward(self, src: torch.Tensor, mask: Optional[AttentionMask] = None, attend_to: Optional[torch.Tensor] = None,\n pos_offset: Optional[int] = None) -> torch.Tensor:\n src2 = self.norm1(src) if self.is_preln else src\n src2 = self.self_attn(src2, self.norm1(attend_to) if attend_to is not None else src2, mask, pos_offset=pos_offset)\n src = src + self.dropout1(src2)\n\n if self.is_preln:\n src2 = self.norm2(src)\n else:\n src2 = src = self.norm1(src)\n\n src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))\n src = src + self.dropout2(src2)\n\n if not self.is_preln:\n src = self.norm2(src)\n return src\n\n def reset_parameters(self):\n torch.nn.init.xavier_normal_(self.linear1.weight, gain=torch.nn.init.calculate_gain('relu')\n if self.activation is F.relu else 1.0)\n torch.nn.init.xavier_uniform_(self.linear2.weight)" }, { "identifier": "MoE", "path": "layers/moe_layer.py", "snippet": "class MoE(LoggingLayer, RegularizedLayer, OncePerIterLayer, torch.nn.Module):\n def __init__(self, dmodel: int, n_experts: int, expert_size: int, n_heads: int,\n dropout: float = 0, weight_scale: float = 1.0,\n dropout_mode: str = \"none\", selection_mode: str = \"sigmoid\", perplexity_reg: float = 0.0,\n norm_keys: bool = False,\n perplexity_reg_mode: str=\"step\", n_random: int = 0, reg_type: str = \"entropy\",\n topk_mode: str = \"full\", activation_after_topk: bool = False,\n activation = lambda x: F.relu(x, inplace=True),\n normalize_expert_sel_init: bool = False, norm_key_init: bool = False, norm_value_init: bool = False,\n identical_init: bool = False,\n rescale_normed: bool = False, sel_norm: str = \"none\",\n v_dim: Optional[int] = None,\n expert_dropout: float = 0.0,\n sync_distributed: bool = False,\n modulation_amplitude: float = 0.5,\n ppl_past_blocks: int = 0):\n\n super().__init__()\n self.k_dim = dmodel\n self.v_dim = v_dim if v_dim is not None else dmodel\n self.n_experts = n_experts\n self.expert_size = expert_size\n self.size = self.n_experts * self.expert_size\n self.dropout = dropout\n self.dropout_mode = dropout_mode\n self.selection_mode = selection_mode\n self.perplexity_reg = perplexity_reg\n self.k_vec_dim = self.k_dim\n self.n_heads = n_heads\n self.norm_keys = norm_keys\n self.perplexity_reg_mode = perplexity_reg_mode\n self.n_random = n_random\n self.reg_type = reg_type\n self.topk_mode = topk_mode\n self.activation_after_topk = activation_after_topk\n self.activation = activation\n self.weight_scale = weight_scale\n self.normalize_expert_sel_init = normalize_expert_sel_init\n self.norm_key_init = norm_key_init\n self.norm_value_init = norm_value_init\n self.identical_init = identical_init\n self.layer = 0\n self.initalized = False\n self.rescale_normed = rescale_normed\n self.sel_norm = sel_norm\n self.was_training = True\n self.expert_dropout = expert_dropout\n self.reg_counts = 0\n self.sync_distributed = sync_distributed and torch.distributed.is_initialized()\n self.modulation_amplitude = modulation_amplitude\n self.record_all_expert_sel_counts = False\n self.ppl_past_blocks = ppl_past_blocks\n self.blocks_for_ppl = []\n self.recorded_inputs = []\n\n self.coocurence = None\n\n assert self.selection_mode in {\"gate\", \"sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"sinkhorn_local\", \"mul\", \"sinkmoid2\", \"sinkmax2\"}\n assert self.perplexity_reg_mode in {\"step\", \"global\", \"time\", \"global_time\"}\n assert self.dropout_mode in {\"none\", \"score\"}\n assert self.reg_type in {\"perplexity\", \"variance\", \"entropy\", \"l2\", \"switch\"}\n assert self.topk_mode in {\"full\", \"l1_approx\", \"approx\"}\n assert self.sel_norm in {\"none\", \"cos\", \"input\", \"weights\"}\n\n self.register_buffer(\"iter\", torch.tensor(0, dtype=torch.int64), persistent=False)\n\n if selection_mode in {\"mul\"} and activation_after_topk:\n raise ValueError(\"Activation after topk is not supported with mul selection\")\n\n self.keys = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim, self.expert_size))\n\n self.values = torch.nn.Parameter(torch.empty(self.n_experts, self.expert_size, self.v_dim))\n\n self.expert_sel = torch.nn.Parameter(torch.empty(self.n_experts, self.k_vec_dim))\n self.sel = lambda x: F.linear(x, self.expert_sel)\n\n torch.nn.init.normal_(self.expert_sel, std=self.k_vec_dim ** -0.5 * weight_scale)\n torch.nn.init.normal_(self.keys, std=dmodel ** -0.5 * weight_scale)\n torch.nn.init.normal_(self.values, std=self.size ** -0.5 * weight_scale)\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.sel_count_log = None\n\n self.all_expert_sel_counts = []\n self.all_expert_sel_soft = []\n\n self.register_buffer(\"kv_sel_counts\", torch.zeros(self.n_experts, self.expert_size), persistent=False)\n self.register_buffer(\"kv_sel_counts_100\", torch.zeros_like(self.kv_sel_counts))\n\n if self.rescale_normed and self.sel_norm != \"none\":\n self.sel_scale = torch.nn.Parameter(torch.ones([1]))\n else:\n self.sel_scale = 1.0\n\n self.register_buffer(\"seq\", torch.arange(max(self.n_heads, self.n_experts, self.k_dim, self.v_dim), dtype=torch.long), persistent=False)\n self.regroup_weights()\n\n if self.ppl_past_blocks > 0 and self.reg_type not in {\"perplexity\", \"entropy\"}:\n print(f\"Warning: ppl_past_blocks>0 (currently {self.ppl_past_blocks}) is only supported with perplexity and entropy regularization\")\n\n def keys_to_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n k = keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n return k.permute(0, 2, 1).contiguous().view(-1, self.k_vec_dim)\n\n def keys_from_logical_order(self, keys: torch.Tensor) -> torch.Tensor:\n return keys.view(self.n_experts, self.expert_size, self.k_vec_dim).permute(0, 2, 1).contiguous().view(self.n_experts * self.k_vec_dim, self.expert_size)\n\n def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):\n with torch.no_grad():\n std = weight.std()\n weight.div_(weight.norm(dim=dim, keepdim=True))\n weight.mul_(std / weight.std())\n\n def regroup_weights(self) -> Optional[torch.Tensor]:\n with torch.no_grad():\n if self.norm_key_init:\n self.renorm_keep_std(self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size), dim=1)\n\n if self.norm_value_init:\n self.renorm_keep_std(self.values, dim=1)\n\n if self.identical_init:\n k = self.keys.view(self.n_experts, self.k_vec_dim, self.expert_size)\n self.keys.set_(k[:1].expand_as(k).reshape_as(self.keys))\n\n v = self.values.view(self.n_experts, self.expert_size, self.v_dim)\n self.values.set_(v[:1].expand_as(v).reshape_as(self.values))\n\n if self.normalize_expert_sel_init:\n self.renorm_keep_std(self.expert_sel, dim=1)\n\n def ani(self, x: torch.Tensor) -> torch.Tensor:\n assert x.ndim == 2\n chunk_size = 32\n\n xnorm = F.normalize(x, 2, dim=-1)\n\n accu = 0\n for i in range(0, x.shape[0], chunk_size):\n a = xnorm[i: i + chunk_size]\n sims = xnorm @ a.T\n sims[i : i + chunk_size].fill_diagonal_(0)\n accu += sims.sum()\n\n return accu / (x.shape[0] * (x.shape[0] - 1))\n\n def log_expert_sel_usage(self, prefix: str, channel_sel_counts: torch.Tensor):\n sel_nonzero = (channel_sel_counts != 0).type(torch.float).sum(axis=-1) / self.expert_size\n self.log(f\"{prefix}/mean\", sel_nonzero.mean())\n self.log(f\"{prefix}/min\", sel_nonzero.min())\n self.log(f\"{prefix}/max\", sel_nonzero.max())\n\n\n def pre_train_forward(self):\n if self.norm_keys:\n with torch.no_grad():\n self.keys.div_(self.keys.norm(dim=-1, keepdim=True))\n\n if self.training and not self.was_training:\n sorted_counts = self.index_sel_counts.sort(descending=True).values\n self.log(\"test_exert_channel_usage\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n self.layer = 0\n if self.sel_hist:\n self.sel_hist = []\n self.index_sel_counts = 0\n self.index_sel_norm = 0\n self.reg_counts = 0\n\n def before_loss(self):\n if self.sel_hist:\n # Concatenate against time dimension. Important for the within-batch regularization\n sel = torch.cat(self.sel_hist, -2)\n self.add_perplexity_reg(sel)\n\n self.sel_hist = []\n\n if self.index_sel_norm > 0:\n if self.training:\n with torch.no_grad():\n self.log(\"usag_rel_perplexity_all_layers\", utils.relative_perplexity(self.index_sel_counts / self.index_sel_norm))\n self.log(\"dead_expert_proportion_all_layers\", (self.index_sel_counts == 0).float().sum() / self.n_experts)\n\n self.log_expert_sel_usage(\"exert_channel_usage\", self.kv_sel_counts)\n\n self.kv_sel_counts_100.add_(self.kv_sel_counts)\n self.kv_sel_counts.zero_()\n\n self.index_sel_counts_100 = self.index_sel_counts_100 + self.index_sel_counts\n self.index_sel_norm_100 = self.index_sel_norm_100 + self.index_sel_norm\n\n if self.training and self.iter % 100 == 0:\n norm_cnt = self.index_sel_counts_100 / self.index_sel_norm_100\n self.log(\"usag_rel_perplexity_100\", utils.relative_perplexity(norm_cnt))\n self.log(\"dead_expert_proportion_100\", (self.index_sel_counts_100 == 0).float().sum() / self.n_experts)\n\n sorted_counts = self.index_sel_counts_100.sort(descending=True).values\n self.log(\"usage_counts_100\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n\n self.log_expert_sel_usage(\"exert_channel_usage_100\", self.kv_sel_counts_100)\n self.kv_sel_counts_100.zero_()\n\n self.index_sel_counts_100 = 0\n self.index_sel_norm_100 = 0\n\n self.log(\"ani/keys\", self.ani(self.keys_to_logical_order(self.keys)))\n self.log(\"ani/values\", self.ani(self.values.flatten(0, -2)))\n self.log(\"ani/expert_sel\", self.ani(self.expert_sel.T))\n\n if self.training:\n self.iter += 1\n\n def topk(self, x: torch.Tensor, k: int, approx: bool) -> Tuple[torch.Tensor, torch.Tensor]:\n if approx:\n x = x.view(*x.shape[:-1], k, -1)\n scores, ind = x.max(-1)\n return scores, self.seq[:k] * x.shape[-1] + ind\n else:\n return x.topk(k, dim=-1, sorted=False)\n\n def rolling_logsumexp(self, x: torch.Tensor) -> torch.Tensor:\n # Simulate calculating logsumexp over a bigger batch than the current one. Will have stale values, but that\n # should not matter much later in training.\n if self.ppl_past_blocks == 0 or not self.training:\n return F.log_softmax(x, dim=-1)\n else:\n if len(self.blocks_for_ppl) == self.ppl_past_blocks:\n self.blocks_for_ppl.pop(0)\n\n self.blocks_for_ppl.append(x)\n res = F.log_softmax(torch.cat(self.blocks_for_ppl, dim=0), dim=-1)\n self.blocks_for_ppl[-1] = self.blocks_for_ppl[-1].detach()\n return res\n\n def add_perplexity_reg(self, sel: torch.Tensor):\n sync_distributed = self.sync_distributed and (self.perplexity_reg_mode not in {\"time\", \"global_time\"})\n\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n sel = sel.flatten(0, -3)\n else:\n sel = sel.flatten(0, -2)\n\n # Note: sel are raw logits, no matter what activation is used\n if self.perplexity_reg > 0:\n if self.reg_type == \"perplexity\":\n sel_d = self.rolling_logsumexp(sel)\n sel_d = framework.utils.distributed_ops.log_mean(sel_d, -2, self.sync_distributed)\n loss = lambda: self.perplexity_reg * ( - utils.relative_perplexity_l(sel_d).mean())\n elif self.reg_type == \"entropy\":\n sel_d = self.rolling_logsumexp(sel)\n sel_d = framework.utils.distributed_ops.log_mean(sel_d, -2, self.sync_distributed)\n loss = lambda: self.perplexity_reg * ( - utils.entropy_l(sel_d).mean())\n elif self.reg_type == \"variance\":\n if sync_distributed:\n raise NotImplementedError(\"Variance regularization is not supported in distributed mode\")\n avg_sel = sel.mean(-2)\n loss = lambda: self.perplexity_reg * avg_sel.var(-1).mean()\n elif self.reg_type == \"l2\":\n loss = lambda: self.perplexity_reg * sel.pow(2).mean()\n elif self.reg_type == \"switch\":\n if sync_distributed:\n torch.distributed.all_reduce(self.reg_counts, op=torch.distributed.ReduceOp.SUM)\n\n p_sel_real = self.reg_counts / self.reg_counts.sum(-1, keepdims=True)\n if self.perplexity_reg_mode in {\"time\", \"global_time\"}:\n p_sel_real = p_sel_real.unsqueeze(-2)\n\n loss = lambda: self.perplexity_reg * (F.softmax(sel, dim=-1) * p_sel_real).mean()\n self.reg_counts = 0\n else:\n assert False\n\n self.add_reg(loss, \"moe\")\n\n def compute_scores(self, input: torch.Tensor, index: CVMMSel, expert_scores: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n scores = cvmm(input, index, self.keys)\n\n if self.selection_mode in {\"mul\"}:\n scores = scores * expert_scores[..., None]\n elif self.selection_mode in {\"gate\", \"sigmoid\", \"sinkhorn\", \"sinkhorn2\", \"sinkmoid\", \"sinkmax\", \"sinkmoid2\"}:\n # Handle it later\n pass\n\n scores = self.activation(scores)\n\n plot_training = self.train and self.iter % 10 == 0\n if plot_training:\n with torch.no_grad():\n gt0 = (scores > 0).float()\n gt0_s = gt0.sum()\n\n if plot_training:\n self.log(\"relu_pass_rate\", gt0_s / scores.numel())\n\n self.kv_sel_counts.index_add_(0, index.raw_sel.flatten(), gt0.flatten(end_dim=-2))\n\n if self.dropout > 0 and self.dropout_mode != \"none\":\n scores = F.dropout(scores, self.dropout, training=self.training)\n\n return scores\n\n def sel_activation(self, sel: torch.Tensor, seq_len: int) -> Tuple[torch.Tensor, torch.Tensor]:\n reg_sel = sel\n if self.selection_mode in {\"sigmoid\"}:\n sel = torch.sigmoid(sel)\n elif self.selection_mode in {\"mul\"}:\n sel = sel.abs()\n reg_sel = sel\n elif self.selection_mode in {\"gate\"}:\n sel = F.softmax(sel, dim=-1)\n with torch.no_grad():\n self.log(\"expert_rel_perplexity_per_selection\", utils.relative_perplexity(sel).mean())\n else:\n assert False\n\n return sel, reg_sel\n\n def forward(self, input: torch.Tensor) -> torch.Tensor:\n out = 0\n\n in1 = in2 = input\n\n sel = self.sel(in1)\n # sel=sel.float()\n\n if self.sel_norm == \"cos\":\n sel = sel / (in1.norm(dim=-1, keepdim=True) * self.expert_sel.norm(dim=-1)[None]) * self.sel_scale\n elif self.sel_norm == \"weights\":\n sel = sel * (self.sel_scale / self.expert_sel.norm(dim=-1)[None])\n elif self.sel_norm == \"input\":\n sel = sel * (self.sel_scale / in1.norm(dim=-1, keepdim=True))\n\n sel_raw = reg_sel = sel\n\n inv_val = float(\"-inf\")\n\n if not self.activation_after_topk:\n # Sinkhorn should be always applied before top-k\n sel, reg_sel = self.sel_activation(sel, input.shape[-2])\n inv_val = 0\n\n if self.training and self.expert_dropout > 0:\n if self.selection_mode not in {\"sigmoid\", \"gate\"}:\n raise ValueError(\"Expert dropout not supported in this mode\")\n\n mask = torch.rand_like(sel) < self.expert_dropout\n sel2 = sel.masked_fill(mask, inv_val)\n else:\n sel2 = sel\n\n sel_val, sel_index = self.topk(sel2, self.n_heads, self.topk_mode in {\"l1_approx\", \"approx\"})\n\n if self.activation_after_topk or (self.selection_mode in {\"mul\"}):\n sel_val = torch.gather(sel_raw, -1, sel_index)\n sel_val, reg_sel = self.sel_activation(sel_val, input.shape[-2])\n\n\n record_counts_now = (self.training and self.iter % 10 == 0) or (not self.training) or (self.record_all_expert_sel_counts)\n\n if not self.training:\n sel_index_flat = sel_index.flatten(end_dim=-2)\n if self.coocurence is None:\n self.coocurence = torch.zeros([self.n_experts, self.n_experts], device=sel_index_flat.device, dtype=torch.long)\n\n for h1 in range(self.n_heads):\n for h2 in range(self.n_heads):\n ind_flat = sel_index_flat[..., h1] * self.n_experts + sel_index_flat[..., h2]\n values = torch.tensor([1], device=self.coocurence.device, dtype=self.coocurence.dtype).expand_as(ind_flat)\n # values = sel_val[..., h2].flatten()\n self.coocurence.flatten().put_(ind_flat, values, accumulate=True)\n # self.coocurence[sel_index_flat[..., h1], sel_index_flat[..., h2]] += 1\n\n if record_counts_now or self.reg_type == \"switch\":\n reg_counts = F.one_hot(sel_index, self.n_experts).type_as(input)\n\n if self.reg_type == \"switch\":\n reg_counts2 = reg_counts.view(*input.shape[:-2], input.shape[-2] * self.n_heads, self.n_experts)\n if self.perplexity_reg_mode == \"time\":\n reg_counts2 = reg_counts2.sum(-2)\n else:\n reg_counts2 = reg_counts2.flatten(end_dim=-2).sum(0)\n\n self.reg_counts = self.reg_counts + reg_counts2\n\n if record_counts_now:\n with torch.no_grad():\n sel_counts = reg_counts.flatten(end_dim=-2).sum(0)\n cnt = sel_index.nelement()\n\n p_expert_sel = sel_counts / cnt\n\n self.index_sel_counts = self.index_sel_counts + sel_counts\n self.index_sel_norm = self.index_sel_norm + cnt\n\n if self.record_all_expert_sel_counts:\n softcnt = torch.zeros_like(sel_counts, dtype=sel_val.dtype)\n softcnt.index_add_(0, sel_index.flatten(), sel_val.flatten())\n\n self.all_expert_sel_soft.append(softcnt)\n self.all_expert_sel_counts.append(sel_counts)\n\n if self.training:\n self.log(\"min_sel_score\", sel_val.min(dim=-1).values.mean())\n self.log(\"max_sel_score\", sel_val.max(dim=-1).values.mean())\n\n sel_oh = F.one_hot(sel_index, self.n_experts).sum(-2).bool()\n if self.layer >= 1 and self.training:\n self.log(f\"layer_sel_overlap_{self.layer}\", ((self.prev_sel_oh & sel_oh).sum(-1).float() / self.n_heads).mean())\n\n self.prev_sel_oh = sel_oh\n\n ppl = utils.relative_perplexity(p_expert_sel)\n self.log(\"usage_rel_perplexity\", ppl)\n self.log(\"dead_expert_proportion\", (p_expert_sel == 0).float().sum() / self.n_experts)\n\n if self.perplexity_reg_mode in {\"step\", \"time\"}:\n self.add_perplexity_reg(reg_sel)\n elif self.perplexity_reg > 0 and self.training:\n self.sel_hist.append(reg_sel)\n\n sel_indices = cvmm_prepare_sel2(sel_index.int())\n\n scores = self.compute_scores(in2, sel_indices, sel_val)\n\n sel_indices = sel_indices.clone()\n sel_indices.reduction_weight = sel_val\n sel_indices.sel_index = sel_indices.out_index\n sel_indices.out_index = None\n\n if self.selection_mode not in {\"gate\", \"sigmoid\"}:\n sel_indices.reduction_weight = torch.ones_like(sel_indices.reduction_weight)\n\n out = cvmm(scores, sel_indices, self.values)\n\n self.layer += 1\n\n self.was_training = self.training\n res = out.view(*input.shape[:-1], self.v_dim)\n return res\n\n def dump_logs(self, save_dir: str):\n if self.coocurence is not None:\n os.makedirs(save_dir, exist_ok=True)\n torch.save(self.coocurence, os.path.join(save_dir, \"coocurence.pt\"))\n\n def get_logs(self) -> Dict[str, Any]:\n res = super().get_logs()\n\n if self.coocurence is not None:\n coo = self.coocurence / self.coocurence.diagonal().clamp(min=1)[:, None]\n res[\"expert_coocurence\"] = framework.visualize.plot.Heatmap(coo, xlabel=\"expert\", ylabel=\"expert\", textval=False)\n self.coocurence = None\n return res" }, { "identifier": "Result", "path": "interfaces/result.py", "snippet": "class Result:\n outputs: torch.Tensor\n loss: torch.Tensor\n\n batch_dim = 0\n\n def plot(self) -> Dict[str, Any]:\n return {}\n\n @property\n def batch_size(self) -> int:\n return self.outputs.shape[self.batch_dim]\n\n @staticmethod\n def merge(l: List, batch_weights: Optional[List[float]] = None):\n if len(l) == 1:\n return l[0]\n batch_weights = batch_weights if batch_weights is not None else [1] * len(l)\n loss = sum([r.loss * w for r, w in zip(l, batch_weights)]) / sum(batch_weights)\n out = torch.cat([r.outputs for r in l], l[0].batch_dim)\n return l[0].__class__(out, loss)" }, { "identifier": "LayerVisualizer", "path": "layers/layer_with_visualization.py", "snippet": "class LayerVisualizer:\n def __init__(self, module: torch.nn.Module, options: Dict[str, Any] = {}):\n self.modules = []\n self.options = options\n self.curr_options = None\n for n, m in module.named_modules():\n if isinstance(m, LayerWithVisualization):\n self.modules.append((n, m))\n\n def plot(self) -> Dict[str, Any]:\n res = {}\n for n, m in self.modules:\n res.update({f\"{n}/{k}\": v for k, v in m.plot(self.curr_options).items()})\n m.visualization_enabled = False\n\n self.curr_options = None\n return res\n\n def prepare(self, options: Dict[str, Any] = {}):\n self.curr_options = self.options.copy()\n self.curr_options.update(options)\n\n for _, m in self.modules:\n m.prepare()\n m.visualization_enabled = True" }, { "identifier": "FullMoeRelativeAttentionCore", "path": "layers/transformer/full_moe_relative_attention.py", "snippet": "class FullMoeRelativeAttentionCore(LayerWithVisualization, LoggingLayer, RegularizedLayer, OncePerIterLayer, torch.nn.Module):\n def __init__(self, state_size: int, n_heads: int, n_experts: int, dropout: float = 0.0, input_size: Optional[int] = None,\n projection_size: Optional[int] = None, output_size: Optional[int] = None, init_std_scale: float = 1.0,\n perplexity_reg: float = 0, share_pk: bool = True, expert_dropout: float = 0.0,\n selection_mode: str = \"sigmoid\", moe_k: int = 2, q_expert: bool = True,\n k_expert: bool = True, v_expert: bool = True, o_expert: bool = True, norm_qk_score: bool = False,\n v_projection_size: Optional[int] = None, same_sel: bool = False,\n qside_n_experts: Optional[int] = None, shared_experts: bool = False,\n kq_n_experts: Optional[int] = None, separate_kq_sel: bool = False,\n normalize_init: bool = False, normalize_retrieval: bool = False):\n\n super().__init__()\n\n self.input_size = input_size or state_size\n self.output_size = output_size or state_size\n self.pe_size = self.input_size\n self.perplexity_reg = perplexity_reg\n self.share_pk = share_pk\n self.expert_dropout = expert_dropout\n self.selection_mode = selection_mode\n self.iter = 0\n self.moe_k = moe_k\n self.norm_qk_score = norm_qk_score\n self.same_sel = same_sel\n self.shared_experts = shared_experts\n self.init_std_scale = init_std_scale\n self.normalize_init = normalize_init\n self.attention_to_visualize = []\n self.selections_to_visualize = {}\n\n self.is_expert = {\n \"k\": k_expert,\n \"q\": q_expert,\n \"v\": v_expert,\n \"o\": o_expert\n }\n self.n_experts = {\n \"k\": kq_n_experts or n_experts,\n \"q\": kq_n_experts or qside_n_experts or n_experts,\n \"v\": n_experts,\n \"o\": qside_n_experts or n_experts\n }\n\n self.separate_k_sel = separate_kq_sel or (self.n_experts[\"k\"] != self.n_experts[\"v\"])\n self.separate_q_sel = separate_kq_sel or (self.n_experts[\"q\"] != self.n_experts[\"o\"])\n\n self.sel_hist = {}\n self.sel_counts_100 = {}\n\n self.n_heads = n_heads\n self.dropout = torch.nn.Dropout(dropout) if dropout > 0 else lambda x: x\n self.projection_size = projection_size or (state_size // n_heads)\n self.v_projection_size = v_projection_size or self.projection_size\n\n self.std_in = init_std_scale * math.sqrt(1 / self.input_size)\n std_out = init_std_scale * math.sqrt(1 / (n_heads * self.v_projection_size))\n\n self.create_selection_logic()\n\n self.src_side_maps = {\"k\", \"v\"}\n\n self.projections = torch.nn.ParameterDict({\n \"q\": self.create_param_block(\"q\", self.input_size, self.projection_size, self.std_in),\n \"k\": self.create_param_block(\"k\", self.input_size, self.projection_size, self.std_in),\n \"v\": self.create_param_block(\"v\", self.input_size, self.v_projection_size, self.std_in),\n \"o\": self.create_param_block(\"o\", self.v_projection_size, self.output_size, std_out),\n })\n\n if normalize_retrieval:\n self.norm_ret = torch.nn.LayerNorm(self.projection_size)\n else:\n self.norm_ret = lambda x: x\n\n self.sel_correlation = 0\n\n self.register_buffer(\"scale\", torch.full([1], 1.0 / math.sqrt(self.projection_size)), persistent=False)\n\n def renorm_keep_std(self, weight: torch.Tensor, dim: int = 0):\n with torch.no_grad():\n std = weight.std()\n weight.div_(weight.norm(dim=dim, keepdim=True))\n weight.mul_(std / weight.std())\n\n def get_n_copies(self, name: str):\n return self.n_heads\n\n def create_param_block(self, name: str, in_size: int, out_size: int, std: float):\n n_copies = self.get_n_copies(name)\n\n if self.is_expert[name]:\n exp_mul = 1 if self.shared_experts else n_copies\n p = torch.nn.Parameter(torch.randn(exp_mul * self.n_experts[name], in_size, out_size) * std)\n if self.normalize_init:\n self.renorm_keep_std(p, dim=0)\n return p\n else:\n if name == \"o\":\n in_size = n_copies * in_size\n else:\n out_size = n_copies * out_size\n return torch.nn.Parameter(torch.randn(out_size, in_size) * std)\n\n def create_selection_logic(self):\n sels_params = {}\n self.sel_map = {}\n\n def register_remap(dest: str, src: str) -> bool:\n if not (src in sels_params or src in self.sel_map):\n # src is not defined\n return False\n\n assert self.n_experts[src] == self.n_experts[dest]\n self.sel_map[dest] = self.sel_map.get(src, src)\n return True\n\n if self.is_expert[\"o\"]:\n sels_params[\"o\"] = self.init_sel(\"o\", self.std_in)\n\n if self.is_expert[\"q\"] and (self.separate_q_sel or not register_remap(\"q\", \"o\")):\n sels_params[\"q\"] = self.init_sel(\"q\", self.std_in)\n\n if self.is_expert[\"v\"] and ((not self.same_sel) or not register_remap(\"v\", \"o\")):\n sels_params[\"v\"] = self.init_sel(\"v\", self.std_in)\n\n if self.is_expert[\"k\"]:\n if (not (self.same_sel and self.separate_k_sel and register_remap(\"k\", \"q\"))) and (self.separate_k_sel or not register_remap(\"k\", \"v\")):\n sels_params[\"k\"] = self.init_sel(\"k\", self.std_in)\n\n self.selections = torch.nn.ParameterDict(sels_params)\n\n def init_sel(self, name: str, std: float) -> torch.nn.Parameter:\n n_copies = self.get_n_copies(name)\n n_experts = self.n_experts[name]\n sel = torch.nn.Parameter(torch.randn(n_experts*n_copies, self.input_size) * std)\n self.renorm_rows(sel)\n return sel\n\n def renorm_rows(self, x: torch.Tensor):\n with torch.no_grad():\n std_t = x.std(dim=-1, keepdim=True)\n x.div_(x.norm(dim=-1, keepdim=True))\n x.mul_(std_t / x.std())\n\n\n def project_to_torch_order(self, x: torch.Tensor):\n return x.view(*x.shape[:-1], self.get_n_copies(\"k\"), -1).transpose(-2, -3)\n\n def get_mask_tensor(self, src_len: int, mask: Optional[AttentionMask]) -> Optional[torch.Tensor]:\n if mask is None or (mask.position_mask is None and mask.src_length_mask is None):\n return None\n\n # mask.position_mask: [..., N_out, N_in]\n # mask.src_length_mask: [B, ...., N_in]\n # True where it has to be masked\n\n if mask.position_mask is not None:\n n_pad = src_len - mask.position_mask.shape[-1]\n if n_pad > 0:\n pm = F.pad(mask.position_mask, (n_pad, 0), 'constant', value=False)\n else:\n pm = mask.position_mask\n\n if mask.position_mask is None:\n m = mask.src_length_mask.unsqueeze(-2).unsqueeze(-2)\n elif mask.src_length_mask is None:\n m = pm\n else:\n m = mask.src_length_mask.unsqueeze(-2).unsqueeze(-2) | pm\n\n return m\n\n def train(self, mode: bool = True):\n self.sel_hist = {}\n return super().train(mode)\n\n def get_lost_on_hist(self, l: List[torch.Tensor]) -> torch.Tensor:\n assert l[0].ndim == 4\n l = [t.flatten(1,2) for t in l]\n sel = torch.cat(l, -2)\n sel_d = F.log_softmax(sel, dim=-1)\n sel_d = framework.utils.distributed_ops.log_mean(sel_d, -2, sync_distributed=False)\n return self.perplexity_reg * ( - utils.entropy_l(sel_d).mean())\n\n def get_reg_loss(self) -> Dict[str, torch.Tensor]:\n l = super().get_reg_loss()\n for k, v in self.sel_hist.items():\n l[f\"moe_att_entropy/{k}\"] = self.get_lost_on_hist(v)\n\n self.sel_hist = {}\n return l\n\n def get_sel(self, t: torch.Tensor, w: torch.Tensor, name: str) -> Selection:\n n_experts = self.n_experts[name]\n n_copies = self.get_n_copies(name)\n\n sel = F.linear(t, w).float()\n sel = sel.view(*sel.shape[:-1], n_copies, -1)\n with torch.no_grad():\n if self.expert_dropout > 0 and self.training:\n mask = torch.rand_like(sel) < self.expert_dropout\n sel2 = sel.masked_fill(mask, float('-inf'))\n else:\n sel2 = sel\n _, sel_index = sel2.topk(self.moe_k, dim=-1, sorted=False)\n sel_val = torch.gather(sel, -1, sel_index)\n\n if self.selection_mode == \"softmax\":\n sel_val = sel_val.softmax(-1)\n elif self.selection_mode == \"sigmoid\":\n sel_val = sel_val.sigmoid()\n else:\n raise ValueError(\"Unknown selection mode: \" + self.selection_mode)\n\n exp_shift = 0 if self.shared_experts else n_experts\n\n sel_index_shifted = (torch.arange(n_copies, device=sel_index.device, dtype=sel_index.dtype) * exp_shift).unsqueeze(-1) + sel_index\n sel_index_pp = cvmm_prepare_sel2(sel_index_shifted.flatten(-2,-1), sel_val)\n\n return Selection(sel, sel_val, sel_index, sel_index_pp)\n\n def before_loss(self):\n self.iter += 1\n if self.iter % 100 == 0:\n for k, v in self.sel_counts_100.items():\n sorted_counts = v.sort(descending=True).values\n self.log(f\"sel_counts/{k}\", framework.visualize.plot.Barplot(sorted_counts, xlabel=\"expert\", ylabel=\"usage count\"), drop_old=True)\n\n self.sel_counts_100 = {}\n\n def exp_proj(self, x: torch.Tensor, w: torch.Tensor, sel: Selection) -> torch.Tensor:\n return cvmm(x, sel.sel_index, w)\n\n def compute_sel(self, curr_state: torch.Tensor, attend_to: torch.Tensor) -> Dict[str, Selection]:\n self.selection_mode\n outs = {}\n done = {}\n cross_atten = curr_state is not attend_to\n\n for name in (set(self.selections.keys()) | set(self.sel_map.keys())):\n name_actual = self.sel_map.get(name, name)\n\n # There coukd be 2 versions of everything: source side and destination side. Check if they are different,\n # and if not, use the cached version, my_id is the unique identifier for this transformation\n is_src_side = (name in self.src_side_maps) or not cross_atten\n my_id = (name_actual, is_src_side)\n\n cached = done.get(my_id)\n if cached is not None:\n outs[name] = cached\n continue\n\n # No cache, actually compute\n inp = attend_to if is_src_side else curr_state\n v = self.selections[name_actual]\n outs[name] = self.get_sel(inp, v, name)\n\n # Save history for regularization\n if self.perplexity_reg > 0 and self.training:\n if name not in self.sel_hist:\n self.sel_hist[name] = []\n self.sel_hist[name].append(outs[name].raw_sel)\n\n # Visualize statistics\n if self.training and self.iter % 10 == 0:\n self.sel_counts_100[name] = self.sel_counts_100.get(name, 0) + \\\n F.one_hot(outs[name].raw_sel_index.flatten(), self.n_experts[name]).sum(0)\n\n done[my_id] = outs[name]\n\n return outs\n\n def project(self, name: str, src: torch.Tensor, sel: Dict[str, Selection]) -> torch.Tensor:\n if name in sel:\n sv = sel[name]\n if self.norm_qk_score and name in {\"q\", \"k\"}:\n sv.sel_index.reduction_weight = F.normalize(sv.sel_index.reduction_weight, p=1, dim=-1)\n return self.exp_proj(src, self.projections[name], sv)\n else:\n return F.linear(src, self.projections[name])\n\n def attend(self, curr_state: torch.Tensor, attend_to: torch.Tensor, pos_offset: int, v: torch.Tensor,\n k: torch.Tensor, q: torch.Tensor, mask: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n raise NotImplementedError()\n\n def attention_proj(self, att: torch.Tensor, v: torch.Tensor,\n mask: Optional[torch.Tensor]) -> Tuple[torch.Tensor, torch.Tensor]:\n if mask is not None:\n att.masked_fill_(mask, float('-inf'))\n\n att = F.softmax(att, dim=-1)\n\n res = att @ v\n return res, att\n\n def forward(self, curr_state: torch.Tensor, attend_to: torch.Tensor, mask: Optional[AttentionMask],\n pos_offset: Optional[int] = None, need_weights: bool = False):\n # curr_state: [batch_size, out_len, c]\n # attend_to: [batch_size, in_len, c]\n\n if pos_offset is None:\n assert curr_state.shape[1] == attend_to.shape[1], \"If attend_to has different shape than curr_state, pos_offset should be provided\"\n pos_offset = 0\n\n sel = self.compute_sel(curr_state, attend_to)\n\n # scale q and k with sqrt(scale) before the attention. This should save memory, be faster, and\n # keep the range of k and v better. It should make attention NaNs better with float16.\n scale = self.scale.sqrt()\n\n q = self.project(\"q\", curr_state, sel)\n q = q * scale.type_as(q)\n k = self.project(\"k\", attend_to, sel)\n k = k * scale.type_as(k)\n v = self.project(\"v\", attend_to, sel)\n\n q = self.project_to_torch_order(q) if \"q\" not in sel else q.transpose(-2,-3)\n k = self.project_to_torch_order(k) if \"k\" not in sel else k.transpose(-2,-3)\n v = self.project_to_torch_order(v) if \"v\" not in sel else v.transpose(-2,-3)\n\n k = self.dropout(k)\n\n res, att = self.attend(curr_state, attend_to, pos_offset, v, k, q, self.get_mask_tensor(attend_to.shape[-2], mask))\n res = self.norm_ret(res)\n\n if self.visualization_enabled:\n self.attention_to_visualize.append(att[0].detach())\n for k, s in sel.items():\n if k not in self.selections_to_visualize:\n self.selections_to_visualize[k] = []\n\n with torch.no_grad():\n m = torch.zeros([*s.sel_val[0].shape[:-1], self.n_experts[k]], device=s.sel_val.device, dtype=s.sel_val.dtype)\n m.scatter_(-1, s.raw_sel_index[0], s.sel_val[0])\n\n self.selections_to_visualize[k].append(m)\n\n if self.get_n_copies(\"k\") != self.get_n_copies(\"v\"):\n res = res.view(\n *res.shape[:-1], self.get_n_copies(\"v\") // self.get_n_copies(\"k\"), -1).transpose(2,3).flatten(1,2).contiguous()\n\n if self.is_expert[\"o\"]:\n res = res.transpose(-2, -3)\n # The output selection indices are calculated from the current state and are also used for projecting \"q\".\n # But that projection needs to create multiple copies for the different heads. Here we already have the\n # heads, but we have to create copies for the top-k elements. We can calculate that from the reduction\n # weight. We also want to compute not only the weighted average between the top-k elements, but also\n # of the different heads. So reshape the reduction weight accordingly.\n o_sel = sel[\"o\"].sel_index.clone()\n o_sel.sel_index = o_sel.out_index // o_sel.reduction_weight.shape[-1]\n o_sel.reduction_weight = o_sel.reduction_weight.flatten(-2)\n out = cvmm(res, o_sel, self.projections[\"o\"])\n else:\n res = res.transpose(-2, -3)\n out = F.linear(res.contiguous().view(*curr_state.shape[:-1], -1), self.projections[\"o\"])\n\n return out\n\n def plot(self, options: Dict[str, Any]) -> Dict[str, Any]:\n r = {}\n marks = options.get(\"steplabel\")\n n_steps = options.get(\"n_steps\") or 9999999\n y_marks = options.get(\"target_labels\", marks)\n\n ns1 = (self.attention_to_visualize[0].shape[-2] + n_steps) if n_steps < 0 else 0\n ns1_e = self.attention_to_visualize[0].shape[-2] if n_steps < 0 else n_steps\n ns2 = (self.attention_to_visualize[0].shape[-1] + n_steps) if n_steps < 0 else 0\n ns2_e = self.attention_to_visualize[0].shape[-1] if n_steps < 0 else n_steps\n\n if marks is not None:\n assert len(marks) == self.attention_to_visualize[0].shape[-1]\n marks = marks[ns2:ns2_e]\n\n if y_marks is not None:\n assert len(y_marks) == self.attention_to_visualize[0].shape[-2]\n y_marks = y_marks[ns1:ns1_e]\n\n if options.get(\"mha.plot_head_details\") and self.attention_to_visualize[0].shape[0] > 1:\n for head in range(self.attention_to_visualize[0].shape[0]):\n sel_map = {k: [e[:, head][ns1:ns1_e] if k in {'q', 'o'} else e[:, head][ns2:ns2_e] for e in v] for k, v in self.selections_to_visualize.items()}\n selections = {k: torch.stack(v, 0).cpu() for k, v in sel_map.items()}\n\n x_selections = {k: v for k, v in selections.items() if k in {'k', 'v'}}\n y_selections = {k: v for k, v in selections.items() if k in {'q', 'o'}}\n\n r[f\"head_{head}\"] = MoEAttentionPlot(\n torch.stack([layer[head][ns1:ns1_e, ns2:ns2_e] for _, layer in enumerate(self.attention_to_visualize)], 0),\n x_selections, y_selections,\n ylabel=\"dest\", xlabel=\"src\", x_marks=marks, y_marks=y_marks)\n\n r[\"attention_max\"] = framework.visualize.plot.AnimatedHeatmap(\n torch.stack([layer.max(0)[0][ns1:ns1_e, ns2:ns2_e] for _, layer in enumerate(self.attention_to_visualize)], 0),\n ylabel=\"dest\", xlabel=\"src\", textval=False, x_marks=marks, y_marks=y_marks, ignore_wrong_marks=True)\n\n self.attention_to_visualize = []\n self.selections_to_visualize = {}\n return r\n\n def dump_logs(self, save_dir: str):\n if torch.is_tensor(self.sel_correlation):\n os.makedirs(save_dir, exist_ok=True)\n torch.save(self.sel_correlation, os.path.join(save_dir, \"sel_correlation.pt\"))\n\n def get_logs(self) -> Dict[str, Any]:\n res = super().get_logs()\n\n if torch.is_tensor(self.sel_correlation):\n coo = self.sel_correlation / self.sel_correlation.flatten(1).sum(-1).clamp(min=1)[:, None, None]\n for h in range(self.n_heads):\n res[f\"expert_coocurence_{h}\"] = framework.visualize.plot.Heatmap(coo[h], xlabel=\"o expert\", ylabel=\"v expert\", textval=False)\n self.sel_correlation = 0\n return res" } ]
import framework import torch import torch.nn import torch.nn.functional as F import torch.utils.data import math from typing import List, Tuple, Dict, Any from models import TransformerLanguageModel from ... import task, args from layers.transformer import RelativeTransformerEncoderLayer, PrelnRelativeTransformerEncoderLayer from layers.transformer.relative_moe_transformer import RelativeMoeTransformerEncoderLayer from layers.transformer.fast_rope_transformer import FastRopeTransformerEncoderLayer from layers.transformer.moe_attention_relative_transformer import MoeAttentionRelativeTransformerEncoderLayer from layers.moe_layer import MoE from interfaces import Result from layers import LayerVisualizer from layers.transformer.full_moe_relative_attention import FullMoeRelativeAttentionCore
20,164
parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dropout=self.helper.args.dropout, activation=activation ) if self.helper.args.transformer.variant not in {"preln_moe", "moe"}: base_args["dim_feedforward"]=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_moeatt"}: mklayer = lambda: MoeAttentionRelativeTransformerEncoderLayer( **base_args, **extra_args, moe_att_n_experts=self.helper.args.moe.att.n_experts, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size, att_perplexity_reg=self.helper.args.moe.perplexity_reg if self.helper.args.moe.att.perplexity_reg is None else self.helper.args.moe.att.perplexity_reg, expert_dropout=self.helper.args.moe.drop_expert if self.helper.args.moe.att.drop_expert is None else self.helper.args.moe.att.drop_expert, att_selection_mode=self.helper.args.moe.att.selection_mode, preln=self.is_preln(), attention_variant=self.helper.args.moe.att.variant, q_expert=self.helper.args.moe.att.q_expert, k_expert=self.helper.args.moe.att.k_expert, v_expert=self.helper.args.moe.att.v_expert, o_expert=self.helper.args.moe.att.o_expert, norm_qk_score=self.helper.args.moe.att.norm_qk, v_projection_size=self.helper.args.moe.att.v_size, same_sel=self.helper.args.moe.att.same_sel, moe_k=self.helper.args.moe.att.k, qside_n_experts=self.helper.args.moe.att.qside_n_experts, shared_experts=self.helper.args.moe.att.shared_experts, kq_n_experts=self.helper.args.moe.att.kq_n_experts, separate_kq_sel=self.helper.args.moe.att.separate_kq_sel, moa_mode=self.helper.args.moa.mode, cvloss=self.helper.args.moa.cvloss, switchloss=self.helper.args.moa.switchloss, zloss=self.helper.args.moa.zloss, rotate_fraction=self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base, moeatt_norm_init=self.helper.args.moe.att.norm_init) elif self.helper.args.transformer.variant in {"preln_rope", "rope"}: mklayer = lambda: FastRopeTransformerEncoderLayer( **base_args, **extra_args, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size, preln=self.is_preln(), rotate_fraction = self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base) elif self.helper.args.transformer.variant in {"preln_moe", "moe"}: # def __init__(self, d_model, nhead, n_bins: int, bin_size: int, n_layers: int, dim_feedforward=2048,
@args def a(parser: framework.helpers.ArgumentParser): parser.add_argument("-lm.trafo.context_blocks", default=1) parser.add_argument("-lm.trafo.test_context_blocks", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.test_pos_clamp", default="none", parser=parser.int_or_none_parser) parser.add_argument("-lm.trafo.same_length_eval", default=False) parser.add_argument("-lm.trafo.same_length", default=False) parser.add_argument("-lm.trafo.last_layer_context", default=False) parser.add_argument("-lm.trafo.xl_init", default=False) parser.add_argument("-lm.trafo.embedding_mode_init", default="default", choice=["default", "scale_to_sqrt_dmodel", "init_to_sqrt_dmodel", "one_and_scale_to_sqrt_dmodel", "like_preln"]) parser.add_argument("-pkm.n_heads", default=1) parser.add_argument("-moe.n_experts", default=128) parser.add_argument("-moe.expert_size", default=128) parser.add_argument("-moe.selection_mode", default="sigmoid", choice=["gate", "sigmoid", "mul"]) parser.add_argument("-moe.perplexity_reg", default=0.0) parser.add_argument("-moe.perplexity_reg_mode", default="step", choice=["step", "global", "time", "global_time"]) parser.add_argument("-moe.reg_type", default="entropy", choice=["perplexity", "variance", "entropy", "l2", "switch", "normal"]) parser.add_argument("-moe.norm_keys", default=False) parser.add_argument("-moe.n_random", default=0) parser.add_argument("-moe.topk_mode", default="full", choice=["full", "l1_approx", "approx"]) parser.add_argument("-moe.activation_after_topk", default=False) parser.add_argument("-moe.drop_parallel", default=True) parser.add_argument("-moe.norm_key_init", default=False) parser.add_argument("-moe.norm_value_init", default=False) parser.add_argument("-moe.identical_init", default=False) parser.add_argument("-moe.sel_lr_multipler", default=1.0) parser.add_argument("-moe.expert_lr_multipler", default=1.0) parser.add_argument("-moe.sel_norm", default="none", choice=["none", "cos", "input", "weights"]) parser.add_argument("-moe.dropout_factor", default=1.0) parser.add_argument("-moe.drop_expert", default=0.0) parser.add_argument("-moe.sync_distributed", default=True) parser.add_argument("-moe.modulation_amplitude", default=0.5) parser.add_argument("-moe.init_scale", default=1.0) parser.add_argument("-moe.norm_expert_sel_init", default=False) parser.add_argument("-kvmem.dropout", default="none", choice=["none", "early", "late", "weight", "score"]) parser.add_argument("-kvmem.norm_values", default=False) parser.add_argument("-transformer.topk_value", default=32) parser.add_argument("-transformer.activation", default="relu", choice=["relu", "topk", "gelu", "identity", "sigmoid", "softmax"]) parser.add_argument("-transformer.p_drop_layer", default=0.0) parser.add_argument("-transformer.head_projection_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.ln_affine", default=True) parser.add_argument("-transformer.ln_after_attention", default=True) parser.add_argument("-moe.att.n_experts", default=4) parser.add_argument("-moe.att.variant", default="moa", choice=["moa", "simple", "qside", "full", "full_rope", "seq", "target"]) parser.add_argument("-moe.att.enable", default=False) parser.add_argument("-moe.att.q_expert", default=True) parser.add_argument("-moe.att.k_expert", default=True) parser.add_argument("-moe.att.v_expert", default=True) parser.add_argument("-moe.att.o_expert", default=True) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_qk", default=False) parser.add_argument("-moe.att.v_size", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.same_sel", default=False) parser.add_argument("-moe.att.expert_dropout", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.selection_mode", default="sigmoid", choice=["sigmoid", "softmax"]) parser.add_argument("-moe.att.perplexity_reg", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.qside_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.k", default=2) parser.add_argument("-moe.att.norm_ret", default=False) parser.add_argument("-moe.att.shared_experts", default=False) parser.add_argument("-moe.att.drop_expert", default="none", parser=parser.float_or_none_parser) parser.add_argument("-moe.att.kq_n_experts", default="none", parser=parser.int_or_none_parser) parser.add_argument("-moe.att.separate_kq_sel", default=False) parser.add_argument("-moe.att.norm_init", default=False) parser.add_argument("-rope.rotate_fraction", default=0.5) parser.add_argument("-rope.base", default=10000.0) parser.add_argument("-moa.mode", default="my", choice=["my", "moa"]) parser.add_argument("-moa.cvloss", default=0.0) parser.add_argument("-moa.switchloss", default=0.0) parser.add_argument("-moa.zloss", default=0.0) parser.add_argument("-debug_plot_interval", default="none", parser=parser.int_or_none_parser) parser.add_argument("-transformer.plot_head_details", default=False) parser.add_argument("-plot.n_steps", default=-128) @task() class TransformerLMMixin: helper: framework.helpers.TrainingHelper def is_preln(self) -> bool: return "preln" in self.helper.args.transformer.variant def topk_activation(self, x: torch.Tensor) -> torch.Tensor: nx = -x return torch.masked_fill(x, nx <= nx.kthvalue(self.helper.args.transformer.topk_value, keepdim=True)[0], 0) def get_layers(self) -> List[torch.nn.Module]: # pyright: reportOptionalMemberAccess=false if self.helper.args.transformer.activation == "relu": activation = F.relu elif self.helper.args.transformer.activation == "topk": activation = self.topk_activation elif self.helper.args.transformer.activation == "identity": activation = lambda x: x elif self.helper.args.transformer.activation == "sigmoid": activation = torch.sigmoid elif self.helper.args.transformer.activation == "gelu": activation = F.gelu elif self.helper.args.transformer.activation == "softmax": activation = lambda x: F.softmax(x, dim=-1) else: raise ValueError(f"Invalid activation: {self.helper.args.transformer.activation}") base_args = dict( d_model=self.helper.args.state_size, nhead=self.helper.args.transformer.n_heads, dropout=self.helper.args.dropout, activation=activation ) if self.helper.args.transformer.variant not in {"preln_moe", "moe"}: base_args["dim_feedforward"]=int(self.helper.args.state_size * self.helper.args.transformer.ff_multiplier) extra_args = {} if not self.helper.args.transformer.variant.endswith("_gelu") else { "activation": F.gelu, "drop_expand": False } if self.helper.args.transformer.variant in {"preln_relative"}: mklayer = lambda: PrelnRelativeTransformerEncoderLayer( **base_args, **extra_args, test_pos_clamp=self.helper.args.lm.trafo.test_pos_clamp, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size,) elif self.helper.args.transformer.variant in {"preln_moeatt"}: mklayer = lambda: MoeAttentionRelativeTransformerEncoderLayer( **base_args, **extra_args, moe_att_n_experts=self.helper.args.moe.att.n_experts, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size, att_perplexity_reg=self.helper.args.moe.perplexity_reg if self.helper.args.moe.att.perplexity_reg is None else self.helper.args.moe.att.perplexity_reg, expert_dropout=self.helper.args.moe.drop_expert if self.helper.args.moe.att.drop_expert is None else self.helper.args.moe.att.drop_expert, att_selection_mode=self.helper.args.moe.att.selection_mode, preln=self.is_preln(), attention_variant=self.helper.args.moe.att.variant, q_expert=self.helper.args.moe.att.q_expert, k_expert=self.helper.args.moe.att.k_expert, v_expert=self.helper.args.moe.att.v_expert, o_expert=self.helper.args.moe.att.o_expert, norm_qk_score=self.helper.args.moe.att.norm_qk, v_projection_size=self.helper.args.moe.att.v_size, same_sel=self.helper.args.moe.att.same_sel, moe_k=self.helper.args.moe.att.k, qside_n_experts=self.helper.args.moe.att.qside_n_experts, shared_experts=self.helper.args.moe.att.shared_experts, kq_n_experts=self.helper.args.moe.att.kq_n_experts, separate_kq_sel=self.helper.args.moe.att.separate_kq_sel, moa_mode=self.helper.args.moa.mode, cvloss=self.helper.args.moa.cvloss, switchloss=self.helper.args.moa.switchloss, zloss=self.helper.args.moa.zloss, rotate_fraction=self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base, moeatt_norm_init=self.helper.args.moe.att.norm_init) elif self.helper.args.transformer.variant in {"preln_rope", "rope"}: mklayer = lambda: FastRopeTransformerEncoderLayer( **base_args, **extra_args, n_layers=self.helper.args.transformer.encoder_n_layers, head_projection_size=self.helper.args.transformer.head_projection_size, preln=self.is_preln(), rotate_fraction = self.helper.args.rope.rotate_fraction, rope_base=self.helper.args.rope.base) elif self.helper.args.transformer.variant in {"preln_moe", "moe"}: # def __init__(self, d_model, nhead, n_bins: int, bin_size: int, n_layers: int, dim_feedforward=2048,
mklayer = lambda: RelativeMoeTransformerEncoderLayer(
5
2023-12-13 08:45:02+00:00
24k
AIFSH/NativeDancer
nativedancer/third_part/detectron2/utils/visualizer.py
[ { "identifier": "MetadataCatalog", "path": "nativedancer/third_part/detectron2/data/catalog.py", "snippet": "class _DatasetCatalog(UserDict):\nclass Metadata(types.SimpleNamespace):\nclass _MetadataCatalog(UserDict):\n def register(self, name, func):\n def get(self, name):\n def list(self) -> List[str]:\n def remove(self, name):\n def __str__(self):\n def __getattr__(self, key):\n def __setattr__(self, key, val):\n def as_dict(self):\n def set(self, **kwargs):\n def get(self, key, default=None):\n def get(self, name):\n def list(self):\n def remove(self, name):\n def __str__(self):\n _RENAMED = {\n \"class_names\": \"thing_classes\",\n \"dataset_id_to_contiguous_id\": \"thing_dataset_id_to_contiguous_id\",\n \"stuff_class_names\": \"stuff_classes\",\n }" }, { "identifier": "Boxes", "path": "nativedancer/third_part/detectron2/structures/boxes.py", "snippet": "class Boxes:\n \"\"\"\n This structure stores a list of boxes as a Nx4 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n\n Attributes:\n tensor (torch.Tensor): float matrix of Nx4. Each row is (x1, y1, x2, y2).\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx4 matrix. Each row is (x1, y1, x2, y2).\n \"\"\"\n if not isinstance(tensor, torch.Tensor):\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=torch.device(\"cpu\"))\n else:\n tensor = tensor.to(torch.float32)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((-1, 4)).to(dtype=torch.float32)\n assert tensor.dim() == 2 and tensor.size(-1) == 4, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"Boxes\":\n \"\"\"\n Clone the Boxes.\n\n Returns:\n Boxes\n \"\"\"\n return Boxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return Boxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = (box[:, 2] - box[:, 0]) * (box[:, 3] - box[:, 1])\n return area\n\n def clip(self, box_size: Tuple[int, int]) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n Args:\n box_size (height, width): The clipping box's size.\n \"\"\"\n assert torch.isfinite(self.tensor).all(), \"Box tensor contains infinite or NaN!\"\n h, w = box_size\n x1 = self.tensor[:, 0].clamp(min=0, max=w)\n y1 = self.tensor[:, 1].clamp(min=0, max=h)\n x2 = self.tensor[:, 2].clamp(min=0, max=w)\n y2 = self.tensor[:, 3].clamp(min=0, max=h)\n self.tensor = torch.stack((x1, y1, x2, y2), dim=-1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor:\n a binary vector which represents whether each box is empty\n (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2] - box[:, 0]\n heights = box[:, 3] - box[:, 1]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"Boxes\":\n \"\"\"\n Args:\n item: int, slice, or a BoolTensor\n\n Returns:\n Boxes: Create a new :class:`Boxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `Boxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.BoolTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Boxes might share storage with this Boxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Boxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on Boxes with {} failed to return a matrix!\".format(item)\n return Boxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"Boxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box.\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n inds_inside = (\n (self.tensor[..., 0] >= -boundary_threshold)\n & (self.tensor[..., 1] >= -boundary_threshold)\n & (self.tensor[..., 2] < width + boundary_threshold)\n & (self.tensor[..., 3] < height + boundary_threshold)\n )\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return (self.tensor[:, :2] + self.tensor[:, 2:]) / 2\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the box with horizontal and vertical scaling factors\n \"\"\"\n self.tensor[:, 0::2] *= scale_x\n self.tensor[:, 1::2] *= scale_y\n\n @classmethod\n def cat(cls, boxes_list: List[\"Boxes\"]) -> \"Boxes\":\n \"\"\"\n Concatenates a list of Boxes into a single Boxes\n\n Arguments:\n boxes_list (list[Boxes])\n\n Returns:\n Boxes: the concatenated Boxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, Boxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> device:\n return self.tensor.device\n\n # type \"Iterator[torch.Tensor]\", yield, and iter() not supported by torchscript\n # https://github.com/pytorch/pytorch/issues/18627\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (4,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "BoxMode", "path": "nativedancer/third_part/detectron2/structures/boxes.py", "snippet": "class BoxMode(IntEnum):\n \"\"\"\n Enum of different ways to represent a box.\n \"\"\"\n\n XYXY_ABS = 0\n \"\"\"\n (x0, y0, x1, y1) in absolute floating points coordinates.\n The coordinates in range [0, width or height].\n \"\"\"\n XYWH_ABS = 1\n \"\"\"\n (x0, y0, w, h) in absolute floating points coordinates.\n \"\"\"\n XYXY_REL = 2\n \"\"\"\n Not yet supported!\n (x0, y0, x1, y1) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWH_REL = 3\n \"\"\"\n Not yet supported!\n (x0, y0, w, h) in range [0, 1]. They are relative to the size of the image.\n \"\"\"\n XYWHA_ABS = 4\n \"\"\"\n (xc, yc, w, h, a) in absolute floating points coordinates.\n (xc, yc) is the center of the rotated box, and the angle a is in degrees ccw.\n \"\"\"\n\n @staticmethod\n def convert(box: _RawBoxType, from_mode: \"BoxMode\", to_mode: \"BoxMode\") -> _RawBoxType:\n \"\"\"\n Args:\n box: can be a k-tuple, k-list or an Nxk array/tensor, where k = 4 or 5\n from_mode, to_mode (BoxMode)\n\n Returns:\n The converted box of the same type.\n \"\"\"\n if from_mode == to_mode:\n return box\n\n original_type = type(box)\n is_numpy = isinstance(box, np.ndarray)\n single_box = isinstance(box, (list, tuple))\n if single_box:\n assert len(box) == 4 or len(box) == 5, (\n \"BoxMode.convert takes either a k-tuple/list or an Nxk array/tensor,\"\n \" where k == 4 or 5\"\n )\n arr = torch.tensor(box)[None, :]\n else:\n # avoid modifying the input box\n if is_numpy:\n arr = torch.from_numpy(np.asarray(box)).clone()\n else:\n arr = box.clone()\n\n assert to_mode not in [BoxMode.XYXY_REL, BoxMode.XYWH_REL] and from_mode not in [\n BoxMode.XYXY_REL,\n BoxMode.XYWH_REL,\n ], \"Relative mode not yet supported!\"\n\n if from_mode == BoxMode.XYWHA_ABS and to_mode == BoxMode.XYXY_ABS:\n assert (\n arr.shape[-1] == 5\n ), \"The last dimension of input shape must be 5 for XYWHA format\"\n original_dtype = arr.dtype\n arr = arr.double()\n\n w = arr[:, 2]\n h = arr[:, 3]\n a = arr[:, 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n new_w = c * w + s * h\n new_h = c * h + s * w\n\n # convert center to top-left corner\n arr[:, 0] -= new_w / 2.0\n arr[:, 1] -= new_h / 2.0\n # bottom-right corner\n arr[:, 2] = arr[:, 0] + new_w\n arr[:, 3] = arr[:, 1] + new_h\n\n arr = arr[:, :4].to(dtype=original_dtype)\n elif from_mode == BoxMode.XYWH_ABS and to_mode == BoxMode.XYWHA_ABS:\n original_dtype = arr.dtype\n arr = arr.double()\n arr[:, 0] += arr[:, 2] / 2.0\n arr[:, 1] += arr[:, 3] / 2.0\n angles = torch.zeros((arr.shape[0], 1), dtype=arr.dtype)\n arr = torch.cat((arr, angles), axis=1).to(dtype=original_dtype)\n else:\n if to_mode == BoxMode.XYXY_ABS and from_mode == BoxMode.XYWH_ABS:\n arr[:, 2] += arr[:, 0]\n arr[:, 3] += arr[:, 1]\n elif from_mode == BoxMode.XYXY_ABS and to_mode == BoxMode.XYWH_ABS:\n arr[:, 2] -= arr[:, 0]\n arr[:, 3] -= arr[:, 1]\n else:\n raise NotImplementedError(\n \"Conversion from BoxMode {} to {} is not supported yet\".format(\n from_mode, to_mode\n )\n )\n\n if single_box:\n return original_type(arr.flatten().tolist())\n if is_numpy:\n return arr.numpy()\n else:\n return arr" }, { "identifier": "Keypoints", "path": "nativedancer/third_part/detectron2/structures/keypoints.py", "snippet": "class Keypoints:\n \"\"\"\n Stores keypoint **annotation** data. GT Instances have a `gt_keypoints` property\n containing the x,y location and visibility flag of each keypoint. This tensor has shape\n (N, K, 3) where N is the number of instances and K is the number of keypoints per instance.\n\n The visibility flag follows the COCO format and must be one of three integers:\n\n * v=0: not labeled (in which case x=y=0)\n * v=1: labeled but not visible\n * v=2: labeled and visible\n \"\"\"\n\n def __init__(self, keypoints: Union[torch.Tensor, np.ndarray, List[List[float]]]):\n \"\"\"\n Arguments:\n keypoints: A Tensor, numpy array, or list of the x, y, and visibility of each keypoint.\n The shape should be (N, K, 3) where N is the number of\n instances, and K is the number of keypoints per instance.\n \"\"\"\n device = keypoints.device if isinstance(keypoints, torch.Tensor) else torch.device(\"cpu\")\n keypoints = torch.as_tensor(keypoints, dtype=torch.float32, device=device)\n assert keypoints.dim() == 3 and keypoints.shape[2] == 3, keypoints.shape\n self.tensor = keypoints\n\n def __len__(self) -> int:\n return self.tensor.size(0)\n\n def to(self, *args: Any, **kwargs: Any) -> \"Keypoints\":\n return type(self)(self.tensor.to(*args, **kwargs))\n\n @property\n def device(self) -> torch.device:\n return self.tensor.device\n\n def to_heatmap(self, boxes: torch.Tensor, heatmap_size: int) -> torch.Tensor:\n \"\"\"\n Convert keypoint annotations to a heatmap of one-hot labels for training,\n as described in :paper:`Mask R-CNN`.\n\n Arguments:\n boxes: Nx4 tensor, the boxes to draw the keypoints to\n\n Returns:\n heatmaps:\n A tensor of shape (N, K), each element is integer spatial label\n in the range [0, heatmap_size**2 - 1] for each keypoint in the input.\n valid:\n A tensor of shape (N, K) containing whether each keypoint is in the roi or not.\n \"\"\"\n return _keypoints_to_heatmap(self.tensor, boxes, heatmap_size)\n\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"Keypoints\":\n \"\"\"\n Create a new `Keypoints` by indexing on this `Keypoints`.\n\n The following usage are allowed:\n\n 1. `new_kpts = kpts[3]`: return a `Keypoints` which contains only one instance.\n 2. `new_kpts = kpts[2:10]`: return a slice of key points.\n 3. `new_kpts = kpts[vector]`, where vector is a torch.ByteTensor\n with `length = len(kpts)`. Nonzero elements in the vector will be selected.\n\n Note that the returned Keypoints might share storage with this Keypoints,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return Keypoints([self.tensor[item]])\n return Keypoints(self.tensor[item])\n\n def __repr__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={})\".format(len(self.tensor))\n return s\n\n @staticmethod\n def cat(keypoints_list: List[\"Keypoints\"]) -> \"Keypoints\":\n \"\"\"\n Concatenates a list of Keypoints into a single Keypoints\n\n Arguments:\n keypoints_list (list[Keypoints])\n\n Returns:\n Keypoints: the concatenated Keypoints\n \"\"\"\n assert isinstance(keypoints_list, (list, tuple))\n assert len(keypoints_list) > 0\n assert all(isinstance(keypoints, Keypoints) for keypoints in keypoints_list)\n\n cat_kpts = type(keypoints_list[0])(\n torch.cat([kpts.tensor for kpts in keypoints_list], dim=0)\n )\n return cat_kpts" }, { "identifier": "BitMasks", "path": "nativedancer/third_part/detectron2/structures/masks.py", "snippet": "class BitMasks:\n \"\"\"\n This class stores the segmentation masks for all objects in one image, in\n the form of bitmaps.\n\n Attributes:\n tensor: bool Tensor of N,H,W, representing N instances in the image.\n \"\"\"\n\n def __init__(self, tensor: Union[torch.Tensor, np.ndarray]):\n \"\"\"\n Args:\n tensor: bool Tensor of N,H,W, representing N instances in the image.\n \"\"\"\n if isinstance(tensor, torch.Tensor):\n tensor = tensor.to(torch.bool)\n else:\n tensor = torch.as_tensor(tensor, dtype=torch.bool, device=torch.device(\"cpu\"))\n assert tensor.dim() == 3, tensor.size()\n self.image_size = tensor.shape[1:]\n self.tensor = tensor\n\n @torch.jit.unused\n def to(self, *args: Any, **kwargs: Any) -> \"BitMasks\":\n return BitMasks(self.tensor.to(*args, **kwargs))\n\n @property\n def device(self) -> torch.device:\n return self.tensor.device\n\n @torch.jit.unused\n def __getitem__(self, item: Union[int, slice, torch.BoolTensor]) -> \"BitMasks\":\n \"\"\"\n Returns:\n BitMasks: Create a new :class:`BitMasks` by indexing.\n\n The following usage are allowed:\n\n 1. `new_masks = masks[3]`: return a `BitMasks` which contains only one mask.\n 2. `new_masks = masks[2:10]`: return a slice of masks.\n 3. `new_masks = masks[vector]`, where vector is a torch.BoolTensor\n with `length = len(masks)`. Nonzero elements in the vector will be selected.\n\n Note that the returned object might share storage with this object,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return BitMasks(self.tensor[item].unsqueeze(0))\n m = self.tensor[item]\n assert m.dim() == 3, \"Indexing on BitMasks with {} returns a tensor with shape {}!\".format(\n item, m.shape\n )\n return BitMasks(m)\n\n @torch.jit.unused\n def __iter__(self) -> torch.Tensor:\n yield from self.tensor\n\n @torch.jit.unused\n def __repr__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={})\".format(len(self.tensor))\n return s\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def nonempty(self) -> torch.Tensor:\n \"\"\"\n Find masks that are non-empty.\n\n Returns:\n Tensor: a BoolTensor which represents\n whether each mask is empty (False) or non-empty (True).\n \"\"\"\n return self.tensor.flatten(1).any(dim=1)\n\n @staticmethod\n def from_polygon_masks(\n polygon_masks: Union[\"PolygonMasks\", List[List[np.ndarray]]], height: int, width: int\n ) -> \"BitMasks\":\n \"\"\"\n Args:\n polygon_masks (list[list[ndarray]] or PolygonMasks)\n height, width (int)\n \"\"\"\n if isinstance(polygon_masks, PolygonMasks):\n polygon_masks = polygon_masks.polygons\n masks = [polygons_to_bitmask(p, height, width) for p in polygon_masks]\n if len(masks):\n return BitMasks(torch.stack([torch.from_numpy(x) for x in masks]))\n else:\n return BitMasks(torch.empty(0, height, width, dtype=torch.bool))\n\n @staticmethod\n def from_roi_masks(roi_masks: \"ROIMasks\", height: int, width: int) -> \"BitMasks\":\n \"\"\"\n Args:\n roi_masks:\n height, width (int):\n \"\"\"\n return roi_masks.to_bitmasks(height, width)\n\n def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\n \"\"\"\n Crop each bitmask by the given box, and resize results to (mask_size, mask_size).\n This can be used to prepare training targets for Mask R-CNN.\n It has less reconstruction error compared to rasterization with polygons.\n However we observe no difference in accuracy,\n but BitMasks requires more memory to store all the masks.\n\n Args:\n boxes (Tensor): Nx4 tensor storing the boxes for each mask\n mask_size (int): the size of the rasterized mask.\n\n Returns:\n Tensor:\n A bool tensor of shape (N, mask_size, mask_size), where\n N is the number of predicted boxes for this image.\n \"\"\"\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\n device = self.tensor.device\n\n batch_inds = torch.arange(len(boxes), device=device).to(dtype=boxes.dtype)[:, None]\n rois = torch.cat([batch_inds, boxes], dim=1) # Nx5\n\n bit_masks = self.tensor.to(dtype=torch.float32)\n rois = rois.to(device=device)\n output = (\n ROIAlign((mask_size, mask_size), 1.0, 0, aligned=True)\n .forward(bit_masks[:, None, :, :], rois)\n .squeeze(1)\n )\n output = output >= 0.5\n return output\n\n def get_bounding_boxes(self) -> Boxes:\n \"\"\"\n Returns:\n Boxes: tight bounding boxes around bitmasks.\n If a mask is empty, it's bounding box will be all zero.\n \"\"\"\n boxes = torch.zeros(self.tensor.shape[0], 4, dtype=torch.float32)\n x_any = torch.any(self.tensor, dim=1)\n y_any = torch.any(self.tensor, dim=2)\n for idx in range(self.tensor.shape[0]):\n x = torch.where(x_any[idx, :])[0]\n y = torch.where(y_any[idx, :])[0]\n if len(x) > 0 and len(y) > 0:\n boxes[idx, :] = torch.as_tensor(\n [x[0], y[0], x[-1] + 1, y[-1] + 1], dtype=torch.float32\n )\n return Boxes(boxes)\n\n @staticmethod\n def cat(bitmasks_list: List[\"BitMasks\"]) -> \"BitMasks\":\n \"\"\"\n Concatenates a list of BitMasks into a single BitMasks\n\n Arguments:\n bitmasks_list (list[BitMasks])\n\n Returns:\n BitMasks: the concatenated BitMasks\n \"\"\"\n assert isinstance(bitmasks_list, (list, tuple))\n assert len(bitmasks_list) > 0\n assert all(isinstance(bitmask, BitMasks) for bitmask in bitmasks_list)\n\n cat_bitmasks = type(bitmasks_list[0])(torch.cat([bm.tensor for bm in bitmasks_list], dim=0))\n return cat_bitmasks" }, { "identifier": "PolygonMasks", "path": "nativedancer/third_part/detectron2/structures/masks.py", "snippet": "class PolygonMasks:\n \"\"\"\n This class stores the segmentation masks for all objects in one image, in the form of polygons.\n\n Attributes:\n polygons: list[list[ndarray]]. Each ndarray is a float64 vector representing a polygon.\n \"\"\"\n\n def __init__(self, polygons: List[List[Union[torch.Tensor, np.ndarray]]]):\n \"\"\"\n Arguments:\n polygons (list[list[np.ndarray]]): The first\n level of the list correspond to individual instances,\n the second level to all the polygons that compose the\n instance, and the third level to the polygon coordinates.\n The third level array should have the format of\n [x0, y0, x1, y1, ..., xn, yn] (n >= 3).\n \"\"\"\n if not isinstance(polygons, list):\n raise ValueError(\n \"Cannot create PolygonMasks: Expect a list of list of polygons per image. \"\n \"Got '{}' instead.\".format(type(polygons))\n )\n\n def _make_array(t: Union[torch.Tensor, np.ndarray]) -> np.ndarray:\n # Use float64 for higher precision, because why not?\n # Always put polygons on CPU (self.to is a no-op) since they\n # are supposed to be small tensors.\n # May need to change this assumption if GPU placement becomes useful\n if isinstance(t, torch.Tensor):\n t = t.cpu().numpy()\n return np.asarray(t).astype(\"float64\")\n\n def process_polygons(\n polygons_per_instance: List[Union[torch.Tensor, np.ndarray]]\n ) -> List[np.ndarray]:\n if not isinstance(polygons_per_instance, list):\n raise ValueError(\n \"Cannot create polygons: Expect a list of polygons per instance. \"\n \"Got '{}' instead.\".format(type(polygons_per_instance))\n )\n # transform each polygon to a numpy array\n polygons_per_instance = [_make_array(p) for p in polygons_per_instance]\n for polygon in polygons_per_instance:\n if len(polygon) % 2 != 0 or len(polygon) < 6:\n raise ValueError(f\"Cannot create a polygon from {len(polygon)} coordinates.\")\n return polygons_per_instance\n\n self.polygons: List[List[np.ndarray]] = [\n process_polygons(polygons_per_instance) for polygons_per_instance in polygons\n ]\n\n def to(self, *args: Any, **kwargs: Any) -> \"PolygonMasks\":\n return self\n\n @property\n def device(self) -> torch.device:\n return torch.device(\"cpu\")\n\n def get_bounding_boxes(self) -> Boxes:\n \"\"\"\n Returns:\n Boxes: tight bounding boxes around polygon masks.\n \"\"\"\n boxes = torch.zeros(len(self.polygons), 4, dtype=torch.float32)\n for idx, polygons_per_instance in enumerate(self.polygons):\n minxy = torch.as_tensor([float(\"inf\"), float(\"inf\")], dtype=torch.float32)\n maxxy = torch.zeros(2, dtype=torch.float32)\n for polygon in polygons_per_instance:\n coords = torch.from_numpy(polygon).view(-1, 2).to(dtype=torch.float32)\n minxy = torch.min(minxy, torch.min(coords, dim=0).values)\n maxxy = torch.max(maxxy, torch.max(coords, dim=0).values)\n boxes[idx, :2] = minxy\n boxes[idx, 2:] = maxxy\n return Boxes(boxes)\n\n def nonempty(self) -> torch.Tensor:\n \"\"\"\n Find masks that are non-empty.\n\n Returns:\n Tensor:\n a BoolTensor which represents whether each mask is empty (False) or not (True).\n \"\"\"\n keep = [1 if len(polygon) > 0 else 0 for polygon in self.polygons]\n return torch.from_numpy(np.asarray(keep, dtype=bool))\n\n def __getitem__(self, item: Union[int, slice, List[int], torch.BoolTensor]) -> \"PolygonMasks\":\n \"\"\"\n Support indexing over the instances and return a `PolygonMasks` object.\n `item` can be:\n\n 1. An integer. It will return an object with only one instance.\n 2. A slice. It will return an object with the selected instances.\n 3. A list[int]. It will return an object with the selected instances,\n correpsonding to the indices in the list.\n 4. A vector mask of type BoolTensor, whose length is num_instances.\n It will return an object with the instances whose mask is nonzero.\n \"\"\"\n if isinstance(item, int):\n selected_polygons = [self.polygons[item]]\n elif isinstance(item, slice):\n selected_polygons = self.polygons[item]\n elif isinstance(item, list):\n selected_polygons = [self.polygons[i] for i in item]\n elif isinstance(item, torch.Tensor):\n # Polygons is a list, so we have to move the indices back to CPU.\n if item.dtype == torch.bool:\n assert item.dim() == 1, item.shape\n item = item.nonzero().squeeze(1).cpu().numpy().tolist()\n elif item.dtype in [torch.int32, torch.int64]:\n item = item.cpu().numpy().tolist()\n else:\n raise ValueError(\"Unsupported tensor dtype={} for indexing!\".format(item.dtype))\n selected_polygons = [self.polygons[i] for i in item]\n return PolygonMasks(selected_polygons)\n\n def __iter__(self) -> Iterator[List[np.ndarray]]:\n \"\"\"\n Yields:\n list[ndarray]: the polygons for one instance.\n Each Tensor is a float64 vector representing a polygon.\n \"\"\"\n return iter(self.polygons)\n\n def __repr__(self) -> str:\n s = self.__class__.__name__ + \"(\"\n s += \"num_instances={})\".format(len(self.polygons))\n return s\n\n def __len__(self) -> int:\n return len(self.polygons)\n\n def crop_and_resize(self, boxes: torch.Tensor, mask_size: int) -> torch.Tensor:\n \"\"\"\n Crop each mask by the given box, and resize results to (mask_size, mask_size).\n This can be used to prepare training targets for Mask R-CNN.\n\n Args:\n boxes (Tensor): Nx4 tensor storing the boxes for each mask\n mask_size (int): the size of the rasterized mask.\n\n Returns:\n Tensor: A bool tensor of shape (N, mask_size, mask_size), where\n N is the number of predicted boxes for this image.\n \"\"\"\n assert len(boxes) == len(self), \"{} != {}\".format(len(boxes), len(self))\n\n device = boxes.device\n # Put boxes on the CPU, as the polygon representation is not efficient GPU-wise\n # (several small tensors for representing a single instance mask)\n boxes = boxes.to(torch.device(\"cpu\"))\n\n results = [\n rasterize_polygons_within_box(poly, box.numpy(), mask_size)\n for poly, box in zip(self.polygons, boxes)\n ]\n \"\"\"\n poly: list[list[float]], the polygons for one instance\n box: a tensor of shape (4,)\n \"\"\"\n if len(results) == 0:\n return torch.empty(0, mask_size, mask_size, dtype=torch.bool, device=device)\n return torch.stack(results, dim=0).to(device=device)\n\n def area(self):\n \"\"\"\n Computes area of the mask.\n Only works with Polygons, using the shoelace formula:\n https://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates\n\n Returns:\n Tensor: a vector, area for each instance\n \"\"\"\n\n area = []\n for polygons_per_instance in self.polygons:\n area_per_instance = 0\n for p in polygons_per_instance:\n area_per_instance += polygon_area(p[0::2], p[1::2])\n area.append(area_per_instance)\n\n return torch.tensor(area)\n\n @staticmethod\n def cat(polymasks_list: List[\"PolygonMasks\"]) -> \"PolygonMasks\":\n \"\"\"\n Concatenates a list of PolygonMasks into a single PolygonMasks\n\n Arguments:\n polymasks_list (list[PolygonMasks])\n\n Returns:\n PolygonMasks: the concatenated PolygonMasks\n \"\"\"\n assert isinstance(polymasks_list, (list, tuple))\n assert len(polymasks_list) > 0\n assert all(isinstance(polymask, PolygonMasks) for polymask in polymasks_list)\n\n cat_polymasks = type(polymasks_list[0])(\n list(itertools.chain.from_iterable(pm.polygons for pm in polymasks_list))\n )\n return cat_polymasks" }, { "identifier": "RotatedBoxes", "path": "nativedancer/third_part/detectron2/structures/rotated_boxes.py", "snippet": "class RotatedBoxes(Boxes):\n \"\"\"\n This structure stores a list of rotated boxes as a Nx5 torch.Tensor.\n It supports some common methods about boxes\n (`area`, `clip`, `nonempty`, etc),\n and also behaves like a Tensor\n (support indexing, `to(device)`, `.device`, and iteration over all boxes)\n \"\"\"\n\n def __init__(self, tensor: torch.Tensor):\n \"\"\"\n Args:\n tensor (Tensor[float]): a Nx5 matrix. Each row is\n (x_center, y_center, width, height, angle),\n in which angle is represented in degrees.\n While there's no strict range restriction for it,\n the recommended principal range is between [-180, 180) degrees.\n\n Assume we have a horizontal box B = (x_center, y_center, width, height),\n where width is along the x-axis and height is along the y-axis.\n The rotated box B_rot (x_center, y_center, width, height, angle)\n can be seen as:\n\n 1. When angle == 0:\n B_rot == B\n 2. When angle > 0:\n B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CCW;\n 3. When angle < 0:\n B_rot is obtained by rotating B w.r.t its center by :math:`|angle|` degrees CW.\n\n Mathematically, since the right-handed coordinate system for image space\n is (y, x), where y is top->down and x is left->right, the 4 vertices of the\n rotated rectangle :math:`(yr_i, xr_i)` (i = 1, 2, 3, 4) can be obtained from\n the vertices of the horizontal rectangle :math:`(y_i, x_i)` (i = 1, 2, 3, 4)\n in the following way (:math:`\\\\theta = angle*\\\\pi/180` is the angle in radians,\n :math:`(y_c, x_c)` is the center of the rectangle):\n\n .. math::\n\n yr_i = \\\\cos(\\\\theta) (y_i - y_c) - \\\\sin(\\\\theta) (x_i - x_c) + y_c,\n\n xr_i = \\\\sin(\\\\theta) (y_i - y_c) + \\\\cos(\\\\theta) (x_i - x_c) + x_c,\n\n which is the standard rigid-body rotation transformation.\n\n Intuitively, the angle is\n (1) the rotation angle from y-axis in image space\n to the height vector (top->down in the box's local coordinate system)\n of the box in CCW, and\n (2) the rotation angle from x-axis in image space\n to the width vector (left->right in the box's local coordinate system)\n of the box in CCW.\n\n More intuitively, consider the following horizontal box ABCD represented\n in (x1, y1, x2, y2): (3, 2, 7, 4),\n covering the [3, 7] x [2, 4] region of the continuous coordinate system\n which looks like this:\n\n .. code:: none\n\n O--------> x\n |\n | A---B\n | | |\n | D---C\n |\n v y\n\n Note that each capital letter represents one 0-dimensional geometric point\n instead of a 'square pixel' here.\n\n In the example above, using (x, y) to represent a point we have:\n\n .. math::\n\n O = (0, 0), A = (3, 2), B = (7, 2), C = (7, 4), D = (3, 4)\n\n We name vector AB = vector DC as the width vector in box's local coordinate system, and\n vector AD = vector BC as the height vector in box's local coordinate system. Initially,\n when angle = 0 degree, they're aligned with the positive directions of x-axis and y-axis\n in the image space, respectively.\n\n For better illustration, we denote the center of the box as E,\n\n .. code:: none\n\n O--------> x\n |\n | A---B\n | | E |\n | D---C\n |\n v y\n\n where the center E = ((3+7)/2, (2+4)/2) = (5, 3).\n\n Also,\n\n .. math::\n\n width = |AB| = |CD| = 7 - 3 = 4,\n height = |AD| = |BC| = 4 - 2 = 2.\n\n Therefore, the corresponding representation for the same shape in rotated box in\n (x_center, y_center, width, height, angle) format is:\n\n (5, 3, 4, 2, 0),\n\n Now, let's consider (5, 3, 4, 2, 90), which is rotated by 90 degrees\n CCW (counter-clockwise) by definition. It looks like this:\n\n .. code:: none\n\n O--------> x\n | B-C\n | | |\n | |E|\n | | |\n | A-D\n v y\n\n The center E is still located at the same point (5, 3), while the vertices\n ABCD are rotated by 90 degrees CCW with regard to E:\n A = (4, 5), B = (4, 1), C = (6, 1), D = (6, 5)\n\n Here, 90 degrees can be seen as the CCW angle to rotate from y-axis to\n vector AD or vector BC (the top->down height vector in box's local coordinate system),\n or the CCW angle to rotate from x-axis to vector AB or vector DC (the left->right\n width vector in box's local coordinate system).\n\n .. math::\n\n width = |AB| = |CD| = 5 - 1 = 4,\n height = |AD| = |BC| = 6 - 4 = 2.\n\n Next, how about (5, 3, 4, 2, -90), which is rotated by 90 degrees CW (clockwise)\n by definition? It looks like this:\n\n .. code:: none\n\n O--------> x\n | D-A\n | | |\n | |E|\n | | |\n | C-B\n v y\n\n The center E is still located at the same point (5, 3), while the vertices\n ABCD are rotated by 90 degrees CW with regard to E:\n A = (6, 1), B = (6, 5), C = (4, 5), D = (4, 1)\n\n .. math::\n\n width = |AB| = |CD| = 5 - 1 = 4,\n height = |AD| = |BC| = 6 - 4 = 2.\n\n This covers exactly the same region as (5, 3, 4, 2, 90) does, and their IoU\n will be 1. However, these two will generate different RoI Pooling results and\n should not be treated as an identical box.\n\n On the other hand, it's easy to see that (X, Y, W, H, A) is identical to\n (X, Y, W, H, A+360N), for any integer N. For example (5, 3, 4, 2, 270) would be\n identical to (5, 3, 4, 2, -90), because rotating the shape 270 degrees CCW is\n equivalent to rotating the same shape 90 degrees CW.\n\n We could rotate further to get (5, 3, 4, 2, 180), or (5, 3, 4, 2, -180):\n\n .. code:: none\n\n O--------> x\n |\n | C---D\n | | E |\n | B---A\n |\n v y\n\n .. math::\n\n A = (7, 4), B = (3, 4), C = (3, 2), D = (7, 2),\n\n width = |AB| = |CD| = 7 - 3 = 4,\n height = |AD| = |BC| = 4 - 2 = 2.\n\n Finally, this is a very inaccurate (heavily quantized) illustration of\n how (5, 3, 4, 2, 60) looks like in case anyone wonders:\n\n .. code:: none\n\n O--------> x\n | B\\\n | / C\n | /E /\n | A /\n | `D\n v y\n\n It's still a rectangle with center of (5, 3), width of 4 and height of 2,\n but its angle (and thus orientation) is somewhere between\n (5, 3, 4, 2, 0) and (5, 3, 4, 2, 90).\n \"\"\"\n device = tensor.device if isinstance(tensor, torch.Tensor) else torch.device(\"cpu\")\n tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device)\n if tensor.numel() == 0:\n # Use reshape, so we don't end up creating a new tensor that does not depend on\n # the inputs (and consequently confuses jit)\n tensor = tensor.reshape((0, 5)).to(dtype=torch.float32, device=device)\n assert tensor.dim() == 2 and tensor.size(-1) == 5, tensor.size()\n\n self.tensor = tensor\n\n def clone(self) -> \"RotatedBoxes\":\n \"\"\"\n Clone the RotatedBoxes.\n\n Returns:\n RotatedBoxes\n \"\"\"\n return RotatedBoxes(self.tensor.clone())\n\n def to(self, device: torch.device):\n # Boxes are assumed float32 and does not support to(dtype)\n return RotatedBoxes(self.tensor.to(device=device))\n\n def area(self) -> torch.Tensor:\n \"\"\"\n Computes the area of all the boxes.\n\n Returns:\n torch.Tensor: a vector with areas of each box.\n \"\"\"\n box = self.tensor\n area = box[:, 2] * box[:, 3]\n return area\n\n # Avoid in-place operations so that we can torchscript; NOTE: this creates a new tensor\n def normalize_angles(self) -> None:\n \"\"\"\n Restrict angles to the range of [-180, 180) degrees\n \"\"\"\n angle_tensor = (self.tensor[:, 4] + 180.0) % 360.0 - 180.0\n self.tensor = torch.cat((self.tensor[:, :4], angle_tensor[:, None]), dim=1)\n\n def clip(self, box_size: Tuple[int, int], clip_angle_threshold: float = 1.0) -> None:\n \"\"\"\n Clip (in place) the boxes by limiting x coordinates to the range [0, width]\n and y coordinates to the range [0, height].\n\n For RRPN:\n Only clip boxes that are almost horizontal with a tolerance of\n clip_angle_threshold to maintain backward compatibility.\n\n Rotated boxes beyond this threshold are not clipped for two reasons:\n\n 1. There are potentially multiple ways to clip a rotated box to make it\n fit within the image.\n 2. It's tricky to make the entire rectangular box fit within the image\n and still be able to not leave out pixels of interest.\n\n Therefore we rely on ops like RoIAlignRotated to safely handle this.\n\n Args:\n box_size (height, width): The clipping box's size.\n clip_angle_threshold:\n Iff. abs(normalized(angle)) <= clip_angle_threshold (in degrees),\n we do the clipping as horizontal boxes.\n \"\"\"\n h, w = box_size\n\n # normalize angles to be within (-180, 180] degrees\n self.normalize_angles()\n\n idx = torch.where(torch.abs(self.tensor[:, 4]) <= clip_angle_threshold)[0]\n\n # convert to (x1, y1, x2, y2)\n x1 = self.tensor[idx, 0] - self.tensor[idx, 2] / 2.0\n y1 = self.tensor[idx, 1] - self.tensor[idx, 3] / 2.0\n x2 = self.tensor[idx, 0] + self.tensor[idx, 2] / 2.0\n y2 = self.tensor[idx, 1] + self.tensor[idx, 3] / 2.0\n\n # clip\n x1.clamp_(min=0, max=w)\n y1.clamp_(min=0, max=h)\n x2.clamp_(min=0, max=w)\n y2.clamp_(min=0, max=h)\n\n # convert back to (xc, yc, w, h)\n self.tensor[idx, 0] = (x1 + x2) / 2.0\n self.tensor[idx, 1] = (y1 + y2) / 2.0\n # make sure widths and heights do not increase due to numerical errors\n self.tensor[idx, 2] = torch.min(self.tensor[idx, 2], x2 - x1)\n self.tensor[idx, 3] = torch.min(self.tensor[idx, 3], y2 - y1)\n\n def nonempty(self, threshold: float = 0.0) -> torch.Tensor:\n \"\"\"\n Find boxes that are non-empty.\n A box is considered empty, if either of its side is no larger than threshold.\n\n Returns:\n Tensor: a binary vector which represents\n whether each box is empty (False) or non-empty (True).\n \"\"\"\n box = self.tensor\n widths = box[:, 2]\n heights = box[:, 3]\n keep = (widths > threshold) & (heights > threshold)\n return keep\n\n def __getitem__(self, item) -> \"RotatedBoxes\":\n \"\"\"\n Returns:\n RotatedBoxes: Create a new :class:`RotatedBoxes` by indexing.\n\n The following usage are allowed:\n\n 1. `new_boxes = boxes[3]`: return a `RotatedBoxes` which contains only one box.\n 2. `new_boxes = boxes[2:10]`: return a slice of boxes.\n 3. `new_boxes = boxes[vector]`, where vector is a torch.ByteTensor\n with `length = len(boxes)`. Nonzero elements in the vector will be selected.\n\n Note that the returned RotatedBoxes might share storage with this RotatedBoxes,\n subject to Pytorch's indexing semantics.\n \"\"\"\n if isinstance(item, int):\n return RotatedBoxes(self.tensor[item].view(1, -1))\n b = self.tensor[item]\n assert b.dim() == 2, \"Indexing on RotatedBoxes with {} failed to return a matrix!\".format(\n item\n )\n return RotatedBoxes(b)\n\n def __len__(self) -> int:\n return self.tensor.shape[0]\n\n def __repr__(self) -> str:\n return \"RotatedBoxes(\" + str(self.tensor) + \")\"\n\n def inside_box(self, box_size: Tuple[int, int], boundary_threshold: int = 0) -> torch.Tensor:\n \"\"\"\n Args:\n box_size (height, width): Size of the reference box covering\n [0, width] x [0, height]\n boundary_threshold (int): Boxes that extend beyond the reference box\n boundary by more than boundary_threshold are considered \"outside\".\n\n For RRPN, it might not be necessary to call this function since it's common\n for rotated box to extend to outside of the image boundaries\n (the clip function only clips the near-horizontal boxes)\n\n Returns:\n a binary vector, indicating whether each box is inside the reference box.\n \"\"\"\n height, width = box_size\n\n cnt_x = self.tensor[..., 0]\n cnt_y = self.tensor[..., 1]\n half_w = self.tensor[..., 2] / 2.0\n half_h = self.tensor[..., 3] / 2.0\n a = self.tensor[..., 4]\n c = torch.abs(torch.cos(a * math.pi / 180.0))\n s = torch.abs(torch.sin(a * math.pi / 180.0))\n # This basically computes the horizontal bounding rectangle of the rotated box\n max_rect_dx = c * half_w + s * half_h\n max_rect_dy = c * half_h + s * half_w\n\n inds_inside = (\n (cnt_x - max_rect_dx >= -boundary_threshold)\n & (cnt_y - max_rect_dy >= -boundary_threshold)\n & (cnt_x + max_rect_dx < width + boundary_threshold)\n & (cnt_y + max_rect_dy < height + boundary_threshold)\n )\n\n return inds_inside\n\n def get_centers(self) -> torch.Tensor:\n \"\"\"\n Returns:\n The box centers in a Nx2 array of (x, y).\n \"\"\"\n return self.tensor[:, :2]\n\n def scale(self, scale_x: float, scale_y: float) -> None:\n \"\"\"\n Scale the rotated box with horizontal and vertical scaling factors\n Note: when scale_factor_x != scale_factor_y,\n the rotated box does not preserve the rectangular shape when the angle\n is not a multiple of 90 degrees under resize transformation.\n Instead, the shape is a parallelogram (that has skew)\n Here we make an approximation by fitting a rotated rectangle to the parallelogram.\n \"\"\"\n self.tensor[:, 0] *= scale_x\n self.tensor[:, 1] *= scale_y\n theta = self.tensor[:, 4] * math.pi / 180.0\n c = torch.cos(theta)\n s = torch.sin(theta)\n\n # In image space, y is top->down and x is left->right\n # Consider the local coordintate system for the rotated box,\n # where the box center is located at (0, 0), and the four vertices ABCD are\n # A(-w / 2, -h / 2), B(w / 2, -h / 2), C(w / 2, h / 2), D(-w / 2, h / 2)\n # the midpoint of the left edge AD of the rotated box E is:\n # E = (A+D)/2 = (-w / 2, 0)\n # the midpoint of the top edge AB of the rotated box F is:\n # F(0, -h / 2)\n # To get the old coordinates in the global system, apply the rotation transformation\n # (Note: the right-handed coordinate system for image space is yOx):\n # (old_x, old_y) = (s * y + c * x, c * y - s * x)\n # E(old) = (s * 0 + c * (-w/2), c * 0 - s * (-w/2)) = (-c * w / 2, s * w / 2)\n # F(old) = (s * (-h / 2) + c * 0, c * (-h / 2) - s * 0) = (-s * h / 2, -c * h / 2)\n # After applying the scaling factor (sfx, sfy):\n # E(new) = (-sfx * c * w / 2, sfy * s * w / 2)\n # F(new) = (-sfx * s * h / 2, -sfy * c * h / 2)\n # The new width after scaling tranformation becomes:\n\n # w(new) = |E(new) - O| * 2\n # = sqrt[(sfx * c * w / 2)^2 + (sfy * s * w / 2)^2] * 2\n # = sqrt[(sfx * c)^2 + (sfy * s)^2] * w\n # i.e., scale_factor_w = sqrt[(sfx * c)^2 + (sfy * s)^2]\n #\n # For example,\n # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_w == scale_factor_x;\n # when |angle| = 90, c = 0, |s| = 1, scale_factor_w == scale_factor_y\n self.tensor[:, 2] *= torch.sqrt((scale_x * c) ** 2 + (scale_y * s) ** 2)\n\n # h(new) = |F(new) - O| * 2\n # = sqrt[(sfx * s * h / 2)^2 + (sfy * c * h / 2)^2] * 2\n # = sqrt[(sfx * s)^2 + (sfy * c)^2] * h\n # i.e., scale_factor_h = sqrt[(sfx * s)^2 + (sfy * c)^2]\n #\n # For example,\n # when angle = 0 or 180, |c| = 1, s = 0, scale_factor_h == scale_factor_y;\n # when |angle| = 90, c = 0, |s| = 1, scale_factor_h == scale_factor_x\n self.tensor[:, 3] *= torch.sqrt((scale_x * s) ** 2 + (scale_y * c) ** 2)\n\n # The angle is the rotation angle from y-axis in image space to the height\n # vector (top->down in the box's local coordinate system) of the box in CCW.\n #\n # angle(new) = angle_yOx(O - F(new))\n # = angle_yOx( (sfx * s * h / 2, sfy * c * h / 2) )\n # = atan2(sfx * s * h / 2, sfy * c * h / 2)\n # = atan2(sfx * s, sfy * c)\n #\n # For example,\n # when sfx == sfy, angle(new) == atan2(s, c) == angle(old)\n self.tensor[:, 4] = torch.atan2(scale_x * s, scale_y * c) * 180 / math.pi\n\n @classmethod\n def cat(cls, boxes_list: List[\"RotatedBoxes\"]) -> \"RotatedBoxes\":\n \"\"\"\n Concatenates a list of RotatedBoxes into a single RotatedBoxes\n\n Arguments:\n boxes_list (list[RotatedBoxes])\n\n Returns:\n RotatedBoxes: the concatenated RotatedBoxes\n \"\"\"\n assert isinstance(boxes_list, (list, tuple))\n if len(boxes_list) == 0:\n return cls(torch.empty(0))\n assert all([isinstance(box, RotatedBoxes) for box in boxes_list])\n\n # use torch.cat (v.s. layers.cat) so the returned boxes never share storage with input\n cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0))\n return cat_boxes\n\n @property\n def device(self) -> torch.device:\n return self.tensor.device\n\n @torch.jit.unused\n def __iter__(self):\n \"\"\"\n Yield a box as a Tensor of shape (5,) at a time.\n \"\"\"\n yield from self.tensor" }, { "identifier": "PathManager", "path": "nativedancer/third_part/detectron2/utils/file_io.py", "snippet": "class Detectron2Handler(PathHandler):\n PREFIX = \"detectron2://\"\n S3_DETECTRON2_PREFIX = \"https://dl.fbaipublicfiles.com/detectron2/\"\n def _get_supported_prefixes(self):\n def _get_local_path(self, path, **kwargs):\n def _open(self, path, mode=\"r\", **kwargs):" }, { "identifier": "random_color", "path": "nativedancer/third_part/detectron2/utils/colormap.py", "snippet": "def random_color(rgb=False, maximum=255):\n \"\"\"\n Args:\n rgb (bool): whether to return RGB colors or BGR colors.\n maximum (int): either 255 or 1\n\n Returns:\n ndarray: a vector of 3 numbers\n \"\"\"\n idx = np.random.randint(0, len(_COLORS))\n ret = _COLORS[idx] * maximum\n if not rgb:\n ret = ret[::-1]\n return ret" } ]
import colorsys import logging import math import numpy as np import cv2 import matplotlib as mpl import matplotlib.colors as mplc import matplotlib.figure as mplfigure import pycocotools.mask as mask_util import torch from enum import Enum, unique from matplotlib.backends.backend_agg import FigureCanvasAgg from PIL import Image from ..data import MetadataCatalog from ..structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes from ..utils.file_io import PathManager from .colormap import random_color from panopticapi.utils import rgb2id
17,119
polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha,), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """ Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """ Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype("f4").mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return tuple(np.clip(modified_color, 0.0, 1.0)) def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """ if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): return boxes.tensor.detach().numpy() else: return np.asarray(boxes) def _convert_masks(self, masks_or_polygons): """ Convert different format of masks or polygons to a tuple of masks and polygons. Returns: list[GenericMask]: """ m = masks_or_polygons if isinstance(m, PolygonMasks): m = m.polygons if isinstance(m, BitMasks): m = m.tensor.numpy() if isinstance(m, torch.Tensor): m = m.numpy() ret = [] for x in m: if isinstance(x, GenericMask): ret.append(x) else: ret.append(GenericMask(x, self.output.height, self.output.width)) return ret def _draw_text_in_mask(self, binary_mask, text, color): """ Find proper places to draw text given a binary mask. """ # TODO sometimes drawn on wrong objects. the heuristics here can improve. _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) if stats[1:, -1].size == 0: return largest_component_id = np.argmax(stats[1:, -1]) + 1 # draw text on the largest component, as well as other very large components. for cid in range(1, _num_cc): if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: # median is more stable than centroid # center = centroids[largest_component_id] center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] self.draw_text(text, center, color=color) def _convert_keypoints(self, keypoints):
# Copyright (c) Facebook, Inc. and its affiliates. logger = logging.getLogger(__name__) __all__ = ["ColorMode", "VisImage", "Visualizer"] _SMALL_OBJECT_AREA_THRESH = 1000 _LARGE_MASK_AREA_THRESH = 120000 _OFF_WHITE = (1.0, 1.0, 240.0 / 255) _BLACK = (0, 0, 0) _RED = (1.0, 0, 0) _KEYPOINT_THRESHOLD = 0.05 @unique class ColorMode(Enum): """ Enum of different color modes to use for instance visualizations. """ IMAGE = 0 """ Picks a random color for every instance and overlay segmentations with low opacity. """ SEGMENTATION = 1 """ Let instances of the same category have similar colors (from metadata.thing_colors), and overlay them with high opacity. This provides more attention on the quality of segmentation. """ IMAGE_BW = 2 """ Same as IMAGE, but convert all areas without masks to gray-scale. Only available for drawing per-instance mask predictions. """ class GenericMask: """ Attribute: polygons (list[ndarray]): list[ndarray]: polygons for this mask. Each ndarray has format [x, y, x, y, ...] mask (ndarray): a binary mask """ def __init__(self, mask_or_polygons, height, width): self._mask = self._polygons = self._has_holes = None self.height = height self.width = width m = mask_or_polygons if isinstance(m, dict): # RLEs assert "counts" in m and "size" in m if isinstance(m["counts"], list): # uncompressed RLEs h, w = m["size"] assert h == height and w == width m = mask_util.frPyObjects(m, h, w) self._mask = mask_util.decode(m)[:, :] return if isinstance(m, list): # list[ndarray] self._polygons = [np.asarray(x).reshape(-1) for x in m] return if isinstance(m, np.ndarray): # assumed to be a binary mask assert m.shape[1] != 2, m.shape assert m.shape == ( height, width, ), f"mask shape: {m.shape}, target dims: {height}, {width}" self._mask = m.astype("uint8") return raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) @property def mask(self): if self._mask is None: self._mask = self.polygons_to_mask(self._polygons) return self._mask @property def polygons(self): if self._polygons is None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) return self._polygons @property def has_holes(self): if self._has_holes is None: if self._mask is not None: self._polygons, self._has_holes = self.mask_to_polygons(self._mask) else: self._has_holes = False # if original format is polygon, does not have holes return self._has_holes def mask_to_polygons(self, mask): # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. # Internal contours (holes) are placed in hierarchy-2. # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) hierarchy = res[-1] if hierarchy is None: # empty mask return [], False has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 res = res[-2] res = [x.flatten() for x in res] # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. # We add 0.5 to turn them into real-value coordinate space. A better solution # would be to first +0.5 and then dilate the returned polygon by 0.5. res = [x + 0.5 for x in res if len(x) >= 6] return res, has_holes def polygons_to_mask(self, polygons): rle = mask_util.frPyObjects(polygons, self.height, self.width) rle = mask_util.merge(rle) return mask_util.decode(rle)[:, :] def area(self): return self.mask.sum() def bbox(self): p = mask_util.frPyObjects(self.polygons, self.height, self.width) p = mask_util.merge(p) bbox = mask_util.toBbox(p) bbox[2] += bbox[0] bbox[3] += bbox[1] return bbox class _PanopticPrediction: """ Unify different panoptic annotation/prediction formats """ def __init__(self, panoptic_seg, segments_info, metadata=None): if segments_info is None: assert metadata is not None # If "segments_info" is None, we assume "panoptic_img" is a # H*W int32 image storing the panoptic_id in the format of # category_id * label_divisor + instance_id. We reserve -1 for # VOID label. label_divisor = metadata.label_divisor segments_info = [] for panoptic_label in np.unique(panoptic_seg.numpy()): if panoptic_label == -1: # VOID region. continue pred_class = panoptic_label // label_divisor isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() segments_info.append( { "id": int(panoptic_label), "category_id": int(pred_class), "isthing": bool(isthing), } ) del metadata self._seg = panoptic_seg self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) areas = areas.numpy() sorted_idxs = np.argsort(-areas) self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] self._seg_ids = self._seg_ids.tolist() for sid, area in zip(self._seg_ids, self._seg_areas): if sid in self._sinfo: self._sinfo[sid]["area"] = float(area) def non_empty_mask(self): """ Returns: (H, W) array, a mask for all pixels that have a prediction """ empty_ids = [] for id in self._seg_ids: if id not in self._sinfo: empty_ids.append(id) if len(empty_ids) == 0: return np.zeros(self._seg.shape, dtype=np.uint8) assert ( len(empty_ids) == 1 ), ">1 ids corresponds to no labels. This is currently not supported" return (self._seg != empty_ids[0]).numpy().astype(bool) def semantic_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or sinfo["isthing"]: # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. continue yield (self._seg == sid).numpy().astype(bool), sinfo def instance_masks(self): for sid in self._seg_ids: sinfo = self._sinfo.get(sid) if sinfo is None or not sinfo["isthing"]: continue mask = (self._seg == sid).numpy().astype(bool) if mask.sum() > 0: yield mask, sinfo def _create_text_labels(classes, scores, class_names, is_crowd=None): """ Args: classes (list[int] or None): scores (list[float] or None): class_names (list[str] or None): is_crowd (list[bool] or None): Returns: list[str] or None """ labels = None if classes is not None: if class_names is not None and len(class_names) > 0: labels = [class_names[i] for i in classes] else: labels = [str(i) for i in classes] if scores is not None: if labels is None: labels = ["{:.0f}%".format(s * 100) for s in scores] else: labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] if labels is not None and is_crowd is not None: labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] return labels class VisImage: def __init__(self, img, scale=1.0): """ Args: img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. scale (float): scale the input image """ self.img = img self.scale = scale self.width, self.height = img.shape[1], img.shape[0] self._setup_figure(img) def _setup_figure(self, img): """ Args: Same as in :meth:`__init__()`. Returns: fig (matplotlib.pyplot.figure): top level container for all the image plot elements. ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. """ fig = mplfigure.Figure(frameon=False) self.dpi = fig.get_dpi() # add a small 1e-2 to avoid precision lost due to matplotlib's truncation # (https://github.com/matplotlib/matplotlib/issues/15363) fig.set_size_inches( (self.width * self.scale + 1e-2) / self.dpi, (self.height * self.scale + 1e-2) / self.dpi, ) self.canvas = FigureCanvasAgg(fig) # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) ax.axis("off") self.fig = fig self.ax = ax self.reset_image(img) def reset_image(self, img): """ Args: img: same as in __init__ """ img = img.astype("uint8") self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") def save(self, filepath): """ Args: filepath (str): a string that contains the absolute path, including the file name, where the visualized image will be saved. """ self.fig.savefig(filepath) def get_image(self): """ Returns: ndarray: the visualized image of shape (H, W, 3) (RGB) in uint8 type. The shape is scaled w.r.t the input image using the given `scale` argument. """ canvas = self.canvas s, (width, height) = canvas.print_to_buffer() # buf = io.BytesIO() # works for cairo backend # canvas.print_rgba(buf) # width, height = self.width, self.height # s = buf.getvalue() buffer = np.frombuffer(s, dtype="uint8") img_rgba = buffer.reshape(height, width, 4) rgb, alpha = np.split(img_rgba, [3], axis=2) return rgb.astype("uint8") class Visualizer: """ Visualizer that draws data about detection/segmentation on images. It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` that draw primitive objects to images, as well as high-level wrappers like `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` that draw composite data in some pre-defined style. Note that the exact visualization style for the high-level wrappers are subject to change. Style such as color, opacity, label contents, visibility of labels, or even the visibility of objects themselves (e.g. when the object is too small) may change according to different heuristics, as long as the results still look visually reasonable. To obtain a consistent style, you can implement custom drawing functions with the abovementioned primitive methods instead. If you need more customized visualization styles, you can process the data yourself following their format documented in tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not intend to satisfy everyone's preference on drawing styles. This visualizer focuses on high rendering quality rather than performance. It is not designed to be used for real-time applications. """ # TODO implement a fast, rasterized version using OpenCV def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): """ Args: img_rgb: a numpy array of shape (H, W, C), where H and W correspond to the height and width of the image respectively. C is the number of color channels. The image is required to be in RGB format since that is a requirement of the Matplotlib library. The image is also expected to be in the range [0, 255]. metadata (Metadata): dataset metadata (e.g. class names and colors) instance_mode (ColorMode): defines one of the pre-defined style for drawing instances on an image. """ self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) if metadata is None: metadata = MetadataCatalog.get("__nonexist__") self.metadata = metadata self.output = VisImage(self.img, scale=scale) self.cpu_device = torch.device("cpu") # too small texts are useless, therefore clamp to 9 self._default_font_size = max( np.sqrt(self.output.height * self.output.width) // 90, 10 // scale ) self._instance_mode = instance_mode self.keypoint_threshold = _KEYPOINT_THRESHOLD def draw_instance_predictions(self, predictions): """ Draw instance-level prediction results on an image. Args: predictions (Instances): the output of an instance detection/segmentation model. Following fields will be used to draw: "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). Returns: output (VisImage): image object with visualizations. """ boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None scores = predictions.scores if predictions.has("scores") else None classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None if predictions.has("pred_masks"): masks = np.asarray(predictions.pred_masks) masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] else: masks = None if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes ] alpha = 0.8 else: colors = None alpha = 0.5 if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image( self._create_grayscale_image( (predictions.pred_masks.any(dim=0) > 0).numpy() if predictions.has("pred_masks") else None ) ) alpha = 0.3 self.overlay_instances( masks=masks, boxes=boxes, labels=labels, keypoints=keypoints, assigned_colors=colors, alpha=alpha, ) return self.output def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.8): """ Draw semantic segmentation predictions/labels. Args: sem_seg (Tensor or ndarray): the segmentation of shape (H, W). Each value is the integer label of the pixel. area_threshold (int): segments with less than `area_threshold` are not drawn. alpha (float): the larger it is, the more opaque the segmentations are. Returns: output (VisImage): image object with visualizations. """ if isinstance(sem_seg, torch.Tensor): sem_seg = sem_seg.numpy() labels, areas = np.unique(sem_seg, return_counts=True) sorted_idxs = np.argsort(-areas).tolist() labels = labels[sorted_idxs] for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): try: mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] except (AttributeError, IndexError): mask_color = None binary_mask = (sem_seg == label).astype(np.uint8) text = self.metadata.stuff_classes[label] self.draw_binary_mask( binary_mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) return self.output def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): """ Draw panoptic prediction annotations or results. Args: panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. If it is a ``list[dict]``, each dict contains keys "id", "category_id". If None, category id of each pixel is computed by ``pixel // metadata.label_divisor``. area_threshold (int): stuff segments with less than `area_threshold` are not drawn. Returns: output (VisImage): image object with visualizations. """ pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) if self._instance_mode == ColorMode.IMAGE_BW: self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) # draw mask for all semantic segments first i.e. "stuff" for mask, sinfo in pred.semantic_masks(): category_idx = sinfo["category_id"] try: mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] except AttributeError: mask_color = None text = self.metadata.stuff_classes[category_idx] self.draw_binary_mask( mask, color=mask_color, edge_color=_OFF_WHITE, text=text, alpha=alpha, area_threshold=area_threshold, ) # draw mask for all instances second all_instances = list(pred.instance_masks()) if len(all_instances) == 0: return self.output masks, sinfo = list(zip(*all_instances)) category_ids = [x["category_id"] for x in sinfo] try: scores = [x["score"] for x in sinfo] except KeyError: scores = None labels = _create_text_labels( category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo] ) try: colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] except AttributeError: colors = None self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) return self.output draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility def draw_dataset_dict(self, dic): """ Draw annotations/segmentations in Detectron2 Dataset format. Args: dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. Returns: output (VisImage): image object with visualizations. """ annos = dic.get("annotations", None) if annos: if "segmentation" in annos[0]: masks = [x["segmentation"] for x in annos] else: masks = None if "keypoints" in annos[0]: keypts = [x["keypoints"] for x in annos] keypts = np.array(keypts).reshape(len(annos), -1, 3) else: keypts = None boxes = [ BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) if len(x["bbox"]) == 4 else x["bbox"] for x in annos ] colors = None category_ids = [x["category_id"] for x in annos] if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): colors = [ self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids ] names = self.metadata.get("thing_classes", None) labels = _create_text_labels( category_ids, scores=None, class_names=names, is_crowd=[x.get("iscrowd", 0) for x in annos], ) self.overlay_instances( labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors ) sem_seg = dic.get("sem_seg", None) if sem_seg is None and "sem_seg_file_name" in dic: with PathManager.open(dic["sem_seg_file_name"], "rb") as f: sem_seg = Image.open(f) sem_seg = np.asarray(sem_seg, dtype="uint8") if sem_seg is not None: self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.5) pan_seg = dic.get("pan_seg", None) if pan_seg is None and "pan_seg_file_name" in dic: with PathManager.open(dic["pan_seg_file_name"], "rb") as f: pan_seg = Image.open(f) pan_seg = np.asarray(pan_seg) pan_seg = rgb2id(pan_seg) if pan_seg is not None: segments_info = dic["segments_info"] pan_seg = torch.tensor(pan_seg) self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.5) return self.output def overlay_instances( self, *, boxes=None, labels=None, masks=None, keypoints=None, assigned_colors=None, alpha=0.5, ): """ Args: boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, or a :class:`RotatedBoxes`, or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image, labels (list[str]): the text to be displayed for each instance. masks (masks-like object): Supported types are: * :class:`detectron2.structures.PolygonMasks`, :class:`detectron2.structures.BitMasks`. * list[list[ndarray]]: contains the segmentation masks for all objects in one image. The first level of the list corresponds to individual instances. The second level to all the polygon that compose the instance, and the third level to the polygon coordinates. The third level should have the format of [x0, y0, x1, y1, ..., xn, yn] (n >= 3). * list[ndarray]: each ndarray is a binary mask of shape (H, W). * list[dict]: each dict is a COCO-style RLE. keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), where the N is the number of instances and K is the number of keypoints. The last dimension corresponds to (x, y, visibility or score). assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = 0 if boxes is not None: boxes = self._convert_boxes(boxes) num_instances = len(boxes) if masks is not None: masks = self._convert_masks(masks) if num_instances: assert len(masks) == num_instances else: num_instances = len(masks) if keypoints is not None: if num_instances: assert len(keypoints) == num_instances else: num_instances = len(keypoints) keypoints = self._convert_keypoints(keypoints) if labels is not None: assert len(labels) == num_instances if assigned_colors is None: assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] if num_instances == 0: return self.output if boxes is not None and boxes.shape[1] == 5: return self.overlay_rotated_instances( boxes=boxes, labels=labels, assigned_colors=assigned_colors ) # Display in largest to smallest order to reduce occlusion. areas = None if boxes is not None: areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) elif masks is not None: areas = np.asarray([x.area() for x in masks]) if areas is not None: sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] if boxes is not None else None labels = [labels[k] for k in sorted_idxs] if labels is not None else None masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] keypoints = keypoints[sorted_idxs] if keypoints is not None else None for i in range(num_instances): color = assigned_colors[i] if boxes is not None: self.draw_box(boxes[i], edge_color=color) if masks is not None: for segment in masks[i].polygons: self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) if labels is not None: # first get a box if boxes is not None: x0, y0, x1, y1 = boxes[i] text_pos = (x0, y0) # if drawing boxes, put text on the box corner. horiz_align = "left" elif masks is not None: # skip small mask without polygon if len(masks[i].polygons) == 0: continue x0, y0, x1, y1 = masks[i].bbox() # draw text in the center (defined by median) when box is not drawn # median is less sensitive to outliers. text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] horiz_align = "center" else: continue # drawing the box confidence for keypoints isn't very useful. # for small objects, draw text at the side to avoid occlusion instance_area = (y1 - y0) * (x1 - x0) if ( instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale or y1 - y0 < 40 * self.output.scale ): if y1 >= self.output.height - 5: text_pos = (x1, y0) else: text_pos = (x0, y1) height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) lighter_color = self._change_color_brightness(color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text( labels[i], text_pos, color=lighter_color, horizontal_alignment=horiz_align, font_size=font_size, ) # draw keypoints if keypoints is not None: for keypoints_per_instance in keypoints: self.draw_and_connect_keypoints(keypoints_per_instance) return self.output def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): """ Args: boxes (ndarray): an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format for the N objects in a single image. labels (list[str]): the text to be displayed for each instance. assigned_colors (list[matplotlib.colors]): a list of colors, where each color corresponds to each mask or box in the image. Refer to 'matplotlib.colors' for full list of formats that the colors are accepted in. Returns: output (VisImage): image object with visualizations. """ num_instances = len(boxes) if assigned_colors is None: assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] if num_instances == 0: return self.output # Display in largest to smallest order to reduce occlusion. if boxes is not None: areas = boxes[:, 2] * boxes[:, 3] sorted_idxs = np.argsort(-areas).tolist() # Re-order overlapped instances in descending order. boxes = boxes[sorted_idxs] labels = [labels[k] for k in sorted_idxs] if labels is not None else None colors = [assigned_colors[idx] for idx in sorted_idxs] for i in range(num_instances): self.draw_rotated_box_with_label( boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None ) return self.output def draw_and_connect_keypoints(self, keypoints): """ Draws keypoints of an instance and follows the rules for keypoint connections to draw lines between appropriate keypoints. This follows color heuristics for line color. Args: keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints and the last dimension corresponds to (x, y, probability). Returns: output (VisImage): image object with visualizations. """ visible = {} keypoint_names = self.metadata.get("keypoint_names") for idx, keypoint in enumerate(keypoints): # draw keypoint x, y, prob = keypoint if prob > self.keypoint_threshold: self.draw_circle((x, y), color=_RED) if keypoint_names: keypoint_name = keypoint_names[idx] visible[keypoint_name] = (x, y) if self.metadata.get("keypoint_connection_rules"): for kp0, kp1, color in self.metadata.keypoint_connection_rules: if kp0 in visible and kp1 in visible: x0, y0 = visible[kp0] x1, y1 = visible[kp1] color = tuple(x / 255.0 for x in color) self.draw_line([x0, x1], [y0, y1], color=color) # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip # Note that this strategy is specific to person keypoints. # For other keypoints, it should just do nothing try: ls_x, ls_y = visible["left_shoulder"] rs_x, rs_y = visible["right_shoulder"] mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 except KeyError: pass else: # draw line from nose to mid-shoulder nose_x, nose_y = visible.get("nose", (None, None)) if nose_x is not None: self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) try: # draw line from mid-shoulder to mid-hip lh_x, lh_y = visible["left_hip"] rh_x, rh_y = visible["right_hip"] except KeyError: pass else: mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) return self.output """ Primitive drawing functions: """ def draw_text( self, text, position, *, font_size=None, color="g", horizontal_alignment="center", rotation=0, ): """ Args: text (str): class label position (tuple): a tuple of the x and y coordinates to place text on image. font_size (int, optional): font of the text. If not provided, a font size proportional to the image width is calculated and used. color: color of the text. Refer to `matplotlib.colors` for full list of formats that are accepted. horizontal_alignment (str): see `matplotlib.text.Text` rotation: rotation angle in degrees CCW Returns: output (VisImage): image object with text drawn. """ if not font_size: font_size = self._default_font_size # since the text background is dark, we don't want the text to be dark color = np.maximum(list(mplc.to_rgb(color)), 0.2) color[np.argmax(color)] = max(0.8, np.max(color)) x, y = position self.output.ax.text( x, y, text, size=font_size * self.output.scale, family="sans-serif", bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, verticalalignment="top", horizontalalignment=horizontal_alignment, color=color, zorder=10, rotation=rotation, ) return self.output def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): """ Args: box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 are the coordinates of the image's top left corner. x1 and y1 are the coordinates of the image's bottom right corner. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. Returns: output (VisImage): image object with box drawn. """ x0, y0, x1, y1 = box_coord width = x1 - x0 height = y1 - y0 linewidth = max(self._default_font_size / 4, 1) self.output.ax.add_patch( mpl.patches.Rectangle( (x0, y0), width, height, fill=False, edgecolor=edge_color, linewidth=linewidth * self.output.scale, alpha=alpha, linestyle=line_style, ) ) return self.output def draw_rotated_box_with_label( self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None ): """ Draw a rotated box with label on its top-left corner. Args: rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), where cnt_x and cnt_y are the center coordinates of the box. w and h are the width and height of the box. angle represents how many degrees the box is rotated CCW with regard to the 0-degree box. alpha (float): blending efficient. Smaller values lead to more transparent masks. edge_color: color of the outline of the box. Refer to `matplotlib.colors` for full list of formats that are accepted. line_style (string): the string to use to create the outline of the boxes. label (string): label for rotated box. It will not be rendered when set to None. Returns: output (VisImage): image object with box drawn. """ cnt_x, cnt_y, w, h, angle = rotated_box area = w * h # use thinner lines when the box is small linewidth = self._default_font_size / ( 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 ) theta = angle * math.pi / 180.0 c = math.cos(theta) s = math.sin(theta) rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] # x: left->right ; y: top->down rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] for k in range(4): j = (k + 1) % 4 self.draw_line( [rotated_rect[k][0], rotated_rect[j][0]], [rotated_rect[k][1], rotated_rect[j][1]], color=edge_color, linestyle="--" if k == 1 else line_style, linewidth=linewidth, ) if label is not None: text_pos = rotated_rect[1] # topleft corner height_ratio = h / np.sqrt(self.output.height * self.output.width) label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) font_size = ( np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size ) self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) return self.output def draw_circle(self, circle_coord, color, radius=3): """ Args: circle_coord (list(int) or tuple(int)): contains the x and y coordinates of the center of the circle. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. radius (int): radius of the circle. Returns: output (VisImage): image object with box drawn. """ x, y = circle_coord self.output.ax.add_patch( mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) ) return self.output def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): """ Args: x_data (list[int]): a list containing x values of all the points being drawn. Length of list should match the length of y_data. y_data (list[int]): a list containing y values of all the points being drawn. Length of list should match the length of x_data. color: color of the line. Refer to `matplotlib.colors` for a full list of formats that are accepted. linestyle: style of the line. Refer to `matplotlib.lines.Line2D` for a full list of formats that are accepted. linewidth (float or None): width of the line. When it's None, a default value will be computed and used. Returns: output (VisImage): image object with line drawn. """ if linewidth is None: linewidth = self._default_font_size / 3 linewidth = max(linewidth, 1) self.output.ax.add_line( mpl.lines.Line2D( x_data, y_data, linewidth=linewidth * self.output.scale, color=color, linestyle=linestyle, ) ) return self.output def draw_binary_mask( self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.5, area_threshold=10 ): """ Args: binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and W is the image width. Each value in the array is either a 0 or 1 value of uint8 type. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. area_threshold (float): a connected component smaller than this area will not be shown. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) has_valid_segment = False binary_mask = binary_mask.astype("uint8") # opencv needs uint8 mask = GenericMask(binary_mask, self.output.height, self.output.width) shape2d = (binary_mask.shape[0], binary_mask.shape[1]) if not mask.has_holes: # draw polygons for regular masks for segment in mask.polygons: area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) if area < (area_threshold or 0): continue has_valid_segment = True segment = segment.reshape(-1, 2) self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) else: # TODO: Use Path/PathPatch to draw vector graphics: # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha has_valid_segment = True self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None and has_valid_segment: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): """ Args: soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. color: color of the mask. Refer to `matplotlib.colors` for a full list of formats that are accepted. If None, will pick a random color. text (str): if None, will be drawn on the object alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with mask drawn. """ if color is None: color = random_color(rgb=True, maximum=1) color = mplc.to_rgb(color) shape2d = (soft_mask.shape[0], soft_mask.shape[1]) rgba = np.zeros(shape2d + (4,), dtype="float32") rgba[:, :, :3] = color rgba[:, :, 3] = soft_mask * alpha self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) if text is not None: lighter_color = self._change_color_brightness(color, brightness_factor=0.7) binary_mask = (soft_mask > 0.5).astype("uint8") self._draw_text_in_mask(binary_mask, text, lighter_color) return self.output def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): """ Args: segment: numpy array of shape Nx2, containing all the points in the polygon. color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a full list of formats that are accepted. If not provided, a darker shade of the polygon color will be used instead. alpha (float): blending efficient. Smaller values lead to more transparent masks. Returns: output (VisImage): image object with polygon drawn. """ if edge_color is None: # make edge color darker than the polygon color if alpha > 0.8: edge_color = self._change_color_brightness(color, brightness_factor=-0.7) else: edge_color = color edge_color = mplc.to_rgb(edge_color) + (1,) polygon = mpl.patches.Polygon( segment, fill=True, facecolor=mplc.to_rgb(color) + (alpha,), edgecolor=edge_color, linewidth=max(self._default_font_size // 15 * self.output.scale, 1), ) self.output.ax.add_patch(polygon) return self.output """ Internal methods: """ def _jitter(self, color): """ Randomly modifies given color to produce a slightly different color than the color given. Args: color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color picked. The values in the list are in the [0.0, 1.0] range. Returns: jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color after being jittered. The values in the list are in the [0.0, 1.0] range. """ color = mplc.to_rgb(color) vec = np.random.rand(3) # better to do it in another color space vec = vec / np.linalg.norm(vec) * 0.5 res = np.clip(vec + color, 0, 1) return tuple(res) def _create_grayscale_image(self, mask=None): """ Create a grayscale version of the original image. The colors in masked area, if given, will be kept. """ img_bw = self.img.astype("f4").mean(axis=2) img_bw = np.stack([img_bw] * 3, axis=2) if mask is not None: img_bw[mask] = self.img[mask] return img_bw def _change_color_brightness(self, color, brightness_factor): """ Depending on the brightness_factor, gives a lighter or darker color i.e. a color with less or more saturation than the original color. Args: color: color of the polygon. Refer to `matplotlib.colors` for a full list of formats that are accepted. brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of 0 will correspond to no change, a factor in [-1.0, 0) range will result in a darker color and a factor in (0, 1.0] range will result in a lighter color. Returns: modified_color (tuple[double]): a tuple containing the RGB values of the modified color. Each value in the tuple is in the [0.0, 1.0] range. """ assert brightness_factor >= -1.0 and brightness_factor <= 1.0 color = mplc.to_rgb(color) polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) return tuple(np.clip(modified_color, 0.0, 1.0)) def _convert_boxes(self, boxes): """ Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. """ if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): return boxes.tensor.detach().numpy() else: return np.asarray(boxes) def _convert_masks(self, masks_or_polygons): """ Convert different format of masks or polygons to a tuple of masks and polygons. Returns: list[GenericMask]: """ m = masks_or_polygons if isinstance(m, PolygonMasks): m = m.polygons if isinstance(m, BitMasks): m = m.tensor.numpy() if isinstance(m, torch.Tensor): m = m.numpy() ret = [] for x in m: if isinstance(x, GenericMask): ret.append(x) else: ret.append(GenericMask(x, self.output.height, self.output.width)) return ret def _draw_text_in_mask(self, binary_mask, text, color): """ Find proper places to draw text given a binary mask. """ # TODO sometimes drawn on wrong objects. the heuristics here can improve. _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) if stats[1:, -1].size == 0: return largest_component_id = np.argmax(stats[1:, -1]) + 1 # draw text on the largest component, as well as other very large components. for cid in range(1, _num_cc): if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: # median is more stable than centroid # center = centroids[largest_component_id] center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] self.draw_text(text, center, color=color) def _convert_keypoints(self, keypoints):
if isinstance(keypoints, Keypoints):
3
2023-12-10 20:14:00+00:00
24k
mkang315/ASF-YOLO
segment/val.py
[ { "identifier": "DetectMultiBackend", "path": "models/common.py", "snippet": "class DetectMultiBackend(nn.Module):\n # YOLOv5 MultiBackend class for python inference on various backends\n def __init__(self, weights='yolov5s.pt', device=torch.device('cpu'), dnn=False, data=None, fp16=False, fuse=True):\n # Usage:\n # PyTorch: weights = *.pt\n # TorchScript: *.torchscript\n # ONNX Runtime: *.onnx\n # ONNX OpenCV DNN: *.onnx --dnn\n # OpenVINO: *_openvino_model\n # CoreML: *.mlmodel\n # TensorRT: *.engine\n # TensorFlow SavedModel: *_saved_model\n # TensorFlow GraphDef: *.pb\n # TensorFlow Lite: *.tflite\n # TensorFlow Edge TPU: *_edgetpu.tflite\n # PaddlePaddle: *_paddle_model\n from models.experimental import attempt_download, attempt_load # scoped to avoid circular import\n\n super().__init__()\n w = str(weights[0] if isinstance(weights, list) else weights)\n pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle, triton = self._model_type(w)\n fp16 &= pt or jit or onnx or engine # FP16\n nhwc = coreml or saved_model or pb or tflite or edgetpu # BHWC formats (vs torch BCWH)\n stride = 32 # default stride\n cuda = torch.cuda.is_available() and device.type != 'cpu' # use CUDA\n if not (pt or triton):\n w = attempt_download(w) # download if not local\n\n if pt: # PyTorch\n model = attempt_load(weights if isinstance(weights, list) else w, device=device, inplace=True, fuse=fuse)\n stride = max(int(model.stride.max()), 32) # model stride\n names = model.module.names if hasattr(model, 'module') else model.names # get class names\n model.half() if fp16 else model.float()\n self.model = model # explicitly assign for to(), cpu(), cuda(), half()\n elif jit: # TorchScript\n LOGGER.info(f'Loading {w} for TorchScript inference...')\n extra_files = {'config.txt': ''} # model metadata\n model = torch.jit.load(w, _extra_files=extra_files, map_location=device)\n model.half() if fp16 else model.float()\n if extra_files['config.txt']: # load metadata dict\n d = json.loads(extra_files['config.txt'],\n object_hook=lambda d: {int(k) if k.isdigit() else k: v\n for k, v in d.items()})\n stride, names = int(d['stride']), d['names']\n elif dnn: # ONNX OpenCV DNN\n LOGGER.info(f'Loading {w} for ONNX OpenCV DNN inference...')\n check_requirements('opencv-python>=4.5.4')\n net = cv2.dnn.readNetFromONNX(w)\n elif onnx: # ONNX Runtime\n LOGGER.info(f'Loading {w} for ONNX Runtime inference...')\n check_requirements(('onnx', 'onnxruntime-gpu' if cuda else 'onnxruntime'))\n import onnxruntime\n providers = ['CUDAExecutionProvider', 'CPUExecutionProvider'] if cuda else ['CPUExecutionProvider']\n session = onnxruntime.InferenceSession(w, providers=providers)\n output_names = [x.name for x in session.get_outputs()]\n meta = session.get_modelmeta().custom_metadata_map # metadata\n if 'stride' in meta:\n stride, names = int(meta['stride']), eval(meta['names'])\n elif xml: # OpenVINO\n LOGGER.info(f'Loading {w} for OpenVINO inference...')\n check_requirements('openvino') # requires openvino-dev: https://pypi.org/project/openvino-dev/\n from openvino.runtime import Core, Layout, get_batch\n ie = Core()\n if not Path(w).is_file(): # if not *.xml\n w = next(Path(w).glob('*.xml')) # get *.xml file from *_openvino_model dir\n network = ie.read_model(model=w, weights=Path(w).with_suffix('.bin'))\n if network.get_parameters()[0].get_layout().empty:\n network.get_parameters()[0].set_layout(Layout(\"NCHW\"))\n batch_dim = get_batch(network)\n if batch_dim.is_static:\n batch_size = batch_dim.get_length()\n executable_network = ie.compile_model(network, device_name=\"CPU\") # device_name=\"MYRIAD\" for Intel NCS2\n stride, names = self._load_metadata(Path(w).with_suffix('.yaml')) # load metadata\n elif engine: # TensorRT\n LOGGER.info(f'Loading {w} for TensorRT inference...')\n import tensorrt as trt # https://developer.nvidia.com/nvidia-tensorrt-download\n check_version(trt.__version__, '7.0.0', hard=True) # require tensorrt>=7.0.0\n if device.type == 'cpu':\n device = torch.device('cuda:0')\n Binding = namedtuple('Binding', ('name', 'dtype', 'shape', 'data', 'ptr'))\n logger = trt.Logger(trt.Logger.INFO)\n with open(w, 'rb') as f, trt.Runtime(logger) as runtime:\n model = runtime.deserialize_cuda_engine(f.read())\n context = model.create_execution_context()\n bindings = OrderedDict()\n output_names = []\n fp16 = False # default updated below\n dynamic = False\n for i in range(model.num_bindings):\n name = model.get_binding_name(i)\n dtype = trt.nptype(model.get_binding_dtype(i))\n if model.binding_is_input(i):\n if -1 in tuple(model.get_binding_shape(i)): # dynamic\n dynamic = True\n context.set_binding_shape(i, tuple(model.get_profile_shape(0, i)[2]))\n if dtype == np.float16:\n fp16 = True\n else: # output\n output_names.append(name)\n shape = tuple(context.get_binding_shape(i))\n im = torch.from_numpy(np.empty(shape, dtype=dtype)).to(device)\n bindings[name] = Binding(name, dtype, shape, im, int(im.data_ptr()))\n binding_addrs = OrderedDict((n, d.ptr) for n, d in bindings.items())\n batch_size = bindings['images'].shape[0] # if dynamic, this is instead max batch size\n elif coreml: # CoreML\n LOGGER.info(f'Loading {w} for CoreML inference...')\n import coremltools as ct\n model = ct.models.MLModel(w)\n elif saved_model: # TF SavedModel\n LOGGER.info(f'Loading {w} for TensorFlow SavedModel inference...')\n import tensorflow as tf\n keras = False # assume TF1 saved_model\n model = tf.keras.models.load_model(w) if keras else tf.saved_model.load(w)\n elif pb: # GraphDef https://www.tensorflow.org/guide/migrate#a_graphpb_or_graphpbtxt\n LOGGER.info(f'Loading {w} for TensorFlow GraphDef inference...')\n import tensorflow as tf\n\n def wrap_frozen_graph(gd, inputs, outputs):\n x = tf.compat.v1.wrap_function(lambda: tf.compat.v1.import_graph_def(gd, name=\"\"), []) # wrapped\n ge = x.graph.as_graph_element\n return x.prune(tf.nest.map_structure(ge, inputs), tf.nest.map_structure(ge, outputs))\n\n def gd_outputs(gd):\n name_list, input_list = [], []\n for node in gd.node: # tensorflow.core.framework.node_def_pb2.NodeDef\n name_list.append(node.name)\n input_list.extend(node.input)\n return sorted(f'{x}:0' for x in list(set(name_list) - set(input_list)) if not x.startswith('NoOp'))\n\n gd = tf.Graph().as_graph_def() # TF GraphDef\n with open(w, 'rb') as f:\n gd.ParseFromString(f.read())\n frozen_func = wrap_frozen_graph(gd, inputs=\"x:0\", outputs=gd_outputs(gd))\n elif tflite or edgetpu: # https://www.tensorflow.org/lite/guide/python#install_tensorflow_lite_for_python\n try: # https://coral.ai/docs/edgetpu/tflite-python/#update-existing-tf-lite-code-for-the-edge-tpu\n from tflite_runtime.interpreter import Interpreter, load_delegate\n except ImportError:\n import tensorflow as tf\n Interpreter, load_delegate = tf.lite.Interpreter, tf.lite.experimental.load_delegate,\n if edgetpu: # TF Edge TPU https://coral.ai/software/#edgetpu-runtime\n LOGGER.info(f'Loading {w} for TensorFlow Lite Edge TPU inference...')\n delegate = {\n 'Linux': 'libedgetpu.so.1',\n 'Darwin': 'libedgetpu.1.dylib',\n 'Windows': 'edgetpu.dll'}[platform.system()]\n interpreter = Interpreter(model_path=w, experimental_delegates=[load_delegate(delegate)])\n else: # TFLite\n LOGGER.info(f'Loading {w} for TensorFlow Lite inference...')\n interpreter = Interpreter(model_path=w) # load TFLite model\n interpreter.allocate_tensors() # allocate\n input_details = interpreter.get_input_details() # inputs\n output_details = interpreter.get_output_details() # outputs\n # load metadata\n with contextlib.suppress(zipfile.BadZipFile):\n with zipfile.ZipFile(w, \"r\") as model:\n meta_file = model.namelist()[0]\n meta = ast.literal_eval(model.read(meta_file).decode(\"utf-8\"))\n stride, names = int(meta['stride']), meta['names']\n elif tfjs: # TF.js\n raise NotImplementedError('ERROR: YOLOv5 TF.js inference is not supported')\n elif paddle: # PaddlePaddle\n LOGGER.info(f'Loading {w} for PaddlePaddle inference...')\n check_requirements('paddlepaddle-gpu' if cuda else 'paddlepaddle')\n import paddle.inference as pdi\n if not Path(w).is_file(): # if not *.pdmodel\n w = next(Path(w).rglob('*.pdmodel')) # get *.pdmodel file from *_paddle_model dir\n weights = Path(w).with_suffix('.pdiparams')\n config = pdi.Config(str(w), str(weights))\n if cuda:\n config.enable_use_gpu(memory_pool_init_size_mb=2048, device_id=0)\n predictor = pdi.create_predictor(config)\n input_handle = predictor.get_input_handle(predictor.get_input_names()[0])\n output_names = predictor.get_output_names()\n elif triton: # NVIDIA Triton Inference Server\n LOGGER.info(f'Using {w} as Triton Inference Server...')\n check_requirements('tritonclient[all]')\n from utils.triton import TritonRemoteModel\n model = TritonRemoteModel(url=w)\n nhwc = model.runtime.startswith(\"tensorflow\")\n else:\n raise NotImplementedError(f'ERROR: {w} is not a supported format')\n\n # class names\n if 'names' not in locals():\n names = yaml_load(data)['names'] if data else {i: f'class{i}' for i in range(999)}\n if names[0] == 'n01440764' and len(names) == 1000: # ImageNet\n names = yaml_load(ROOT / 'data/ImageNet.yaml')['names'] # human-readable names\n\n self.__dict__.update(locals()) # assign all variables to self\n\n def forward(self, im, augment=False, visualize=False):\n # YOLOv5 MultiBackend inference\n b, ch, h, w = im.shape # batch, channel, height, width\n if self.fp16 and im.dtype != torch.float16:\n im = im.half() # to FP16\n if self.nhwc:\n im = im.permute(0, 2, 3, 1) # torch BCHW to numpy BHWC shape(1,320,192,3)\n\n if self.pt: # PyTorch\n y = self.model(im, augment=augment, visualize=visualize) if augment or visualize else self.model(im)\n elif self.jit: # TorchScript\n y = self.model(im)\n elif self.dnn: # ONNX OpenCV DNN\n im = im.cpu().numpy() # torch to numpy\n self.net.setInput(im)\n y = self.net.forward()\n elif self.onnx: # ONNX Runtime\n im = im.cpu().numpy() # torch to numpy\n y = self.session.run(self.output_names, {self.session.get_inputs()[0].name: im})\n elif self.xml: # OpenVINO\n im = im.cpu().numpy() # FP32\n y = list(self.executable_network([im]).values())\n elif self.engine: # TensorRT\n if self.dynamic and im.shape != self.bindings['images'].shape:\n i = self.model.get_binding_index('images')\n self.context.set_binding_shape(i, im.shape) # reshape if dynamic\n self.bindings['images'] = self.bindings['images']._replace(shape=im.shape)\n for name in self.output_names:\n i = self.model.get_binding_index(name)\n self.bindings[name].data.resize_(tuple(self.context.get_binding_shape(i)))\n s = self.bindings['images'].shape\n assert im.shape == s, f\"input size {im.shape} {'>' if self.dynamic else 'not equal to'} max model size {s}\"\n self.binding_addrs['images'] = int(im.data_ptr())\n self.context.execute_v2(list(self.binding_addrs.values()))\n y = [self.bindings[x].data for x in sorted(self.output_names)]\n elif self.coreml: # CoreML\n im = im.cpu().numpy()\n im = Image.fromarray((im[0] * 255).astype('uint8'))\n # im = im.resize((192, 320), Image.ANTIALIAS)\n y = self.model.predict({'image': im}) # coordinates are xywh normalized\n if 'confidence' in y:\n box = xywh2xyxy(y['coordinates'] * [[w, h, w, h]]) # xyxy pixels\n conf, cls = y['confidence'].max(1), y['confidence'].argmax(1).astype(np.float)\n y = np.concatenate((box, conf.reshape(-1, 1), cls.reshape(-1, 1)), 1)\n else:\n y = list(reversed(y.values())) # reversed for segmentation models (pred, proto)\n elif self.paddle: # PaddlePaddle\n im = im.cpu().numpy().astype(np.float32)\n self.input_handle.copy_from_cpu(im)\n self.predictor.run()\n y = [self.predictor.get_output_handle(x).copy_to_cpu() for x in self.output_names]\n elif self.triton: # NVIDIA Triton Inference Server\n y = self.model(im)\n else: # TensorFlow (SavedModel, GraphDef, Lite, Edge TPU)\n im = im.cpu().numpy()\n if self.saved_model: # SavedModel\n y = self.model(im, training=False) if self.keras else self.model(im)\n elif self.pb: # GraphDef\n y = self.frozen_func(x=self.tf.constant(im))\n else: # Lite or Edge TPU\n input = self.input_details[0]\n int8 = input['dtype'] == np.uint8 # is TFLite quantized uint8 model\n if int8:\n scale, zero_point = input['quantization']\n im = (im / scale + zero_point).astype(np.uint8) # de-scale\n self.interpreter.set_tensor(input['index'], im)\n self.interpreter.invoke()\n y = []\n for output in self.output_details:\n x = self.interpreter.get_tensor(output['index'])\n if int8:\n scale, zero_point = output['quantization']\n x = (x.astype(np.float32) - zero_point) * scale # re-scale\n y.append(x)\n y = [x if isinstance(x, np.ndarray) else x.numpy() for x in y]\n y[0][..., :4] *= [w, h, w, h] # xywh normalized to pixels\n\n if isinstance(y, (list, tuple)):\n return self.from_numpy(y[0]) if len(y) == 1 else [self.from_numpy(x) for x in y]\n else:\n return self.from_numpy(y)\n\n def from_numpy(self, x):\n return torch.from_numpy(x).to(self.device) if isinstance(x, np.ndarray) else x\n\n def warmup(self, imgsz=(1, 3, 640, 640)):\n # Warmup model by running inference once\n warmup_types = self.pt, self.jit, self.onnx, self.engine, self.saved_model, self.pb, self.triton\n if any(warmup_types) and (self.device.type != 'cpu' or self.triton):\n im = torch.empty(*imgsz, dtype=torch.half if self.fp16 else torch.float, device=self.device) # input\n for _ in range(2 if self.jit else 1): #\n self.forward(im) # warmup\n\n @staticmethod\n def _model_type(p='path/to/model.pt'):\n # Return model type from model path, i.e. path='path/to/model.onnx' -> type=onnx\n # types = [pt, jit, onnx, xml, engine, coreml, saved_model, pb, tflite, edgetpu, tfjs, paddle]\n from export import export_formats\n from utils.downloads import is_url\n sf = list(export_formats().Suffix) # export suffixes\n if not is_url(p, check=False):\n check_suffix(p, sf) # checks\n url = urlparse(p) # if url may be Triton inference server\n types = [s in Path(p).name for s in sf]\n types[8] &= not types[9] # tflite &= not edgetpu\n triton = not any(types) and all([any(s in url.scheme for s in [\"http\", \"grpc\"]), url.netloc])\n return types + [triton]\n\n @staticmethod\n def _load_metadata(f=Path('path/to/meta.yaml')):\n # Load metadata from meta.yaml if it exists\n if f.exists():\n d = yaml_load(f)\n return d['stride'], d['names'] # assign stride, names\n return None, None" }, { "identifier": "SegmentationModel", "path": "models/yolo.py", "snippet": "class SegmentationModel(DetectionModel):\n # YOLOv5 segmentation model\n def __init__(self, cfg='yolov5s-seg.yaml', ch=3, nc=None, anchors=None):\n super().__init__(cfg, ch, nc, anchors)" }, { "identifier": "Callbacks", "path": "utils/callbacks.py", "snippet": "class Callbacks:\n \"\"\"\"\n Handles all registered callbacks for YOLOv5 Hooks\n \"\"\"\n\n def __init__(self):\n # Define the available callbacks\n self._callbacks = {\n 'on_pretrain_routine_start': [],\n 'on_pretrain_routine_end': [],\n 'on_train_start': [],\n 'on_train_epoch_start': [],\n 'on_train_batch_start': [],\n 'optimizer_step': [],\n 'on_before_zero_grad': [],\n 'on_train_batch_end': [],\n 'on_train_epoch_end': [],\n 'on_val_start': [],\n 'on_val_batch_start': [],\n 'on_val_image_end': [],\n 'on_val_batch_end': [],\n 'on_val_end': [],\n 'on_fit_epoch_end': [], # fit = train + val\n 'on_model_save': [],\n 'on_train_end': [],\n 'on_params_update': [],\n 'teardown': [],}\n self.stop_training = False # set True to interrupt training\n\n def register_action(self, hook, name='', callback=None):\n \"\"\"\n Register a new action to a callback hook\n\n Args:\n hook: The callback hook name to register the action to\n name: The name of the action for later reference\n callback: The callback to fire\n \"\"\"\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n assert callable(callback), f\"callback '{callback}' is not callable\"\n self._callbacks[hook].append({'name': name, 'callback': callback})\n\n def get_registered_actions(self, hook=None):\n \"\"\"\"\n Returns all the registered actions by callback hook\n\n Args:\n hook: The name of the hook to check, defaults to all\n \"\"\"\n return self._callbacks[hook] if hook else self._callbacks\n\n def run(self, hook, *args, thread=False, **kwargs):\n \"\"\"\n Loop through the registered actions and fire all callbacks on main thread\n\n Args:\n hook: The name of the hook to check, defaults to all\n args: Arguments to receive from YOLOv5\n thread: (boolean) Run callbacks in daemon thread\n kwargs: Keyword Arguments to receive from YOLOv5\n \"\"\"\n\n assert hook in self._callbacks, f\"hook '{hook}' not found in callbacks {self._callbacks}\"\n for logger in self._callbacks[hook]:\n if thread:\n threading.Thread(target=logger['callback'], args=args, kwargs=kwargs, daemon=True).start()\n else:\n logger['callback'](*args, **kwargs)" }, { "identifier": "LOGGER", "path": "utils/general.py", "snippet": "LOGGER = logging.getLogger(LOGGING_NAME) # define globally (used in train.py, val.py, detect.py, etc.)" }, { "identifier": "NUM_THREADS", "path": "utils/general.py", "snippet": "NUM_THREADS = min(8, max(1, os.cpu_count() - 1)) # number of YOLOv5 multiprocessing threads" }, { "identifier": "TQDM_BAR_FORMAT", "path": "utils/general.py", "snippet": "TQDM_BAR_FORMAT = '{l_bar}{bar:10}| {n_fmt}/{total_fmt} {elapsed}' # tqdm bar format" }, { "identifier": "Profile", "path": "utils/general.py", "snippet": "class Profile(contextlib.ContextDecorator):\n # YOLOv5 Profile class. Usage: @Profile() decorator or 'with Profile():' context manager\n def __init__(self, t=0.0):\n self.t = t\n self.cuda = torch.cuda.is_available()\n\n def __enter__(self):\n self.start = self.time()\n return self\n\n def __exit__(self, type, value, traceback):\n self.dt = self.time() - self.start # delta-time\n self.t += self.dt # accumulate dt\n\n def time(self):\n if self.cuda:\n torch.cuda.synchronize()\n return time.time()" }, { "identifier": "check_dataset", "path": "utils/general.py", "snippet": "def check_dataset(data, autodownload=True):\n # Download, check and/or unzip dataset if not found locally\n\n # Download (optional)\n extract_dir = ''\n if isinstance(data, (str, Path)) and (is_zipfile(data) or is_tarfile(data)):\n download(data, dir=f'{DATASETS_DIR}/{Path(data).stem}', unzip=True, delete=False, curl=False, threads=1)\n data = next((DATASETS_DIR / Path(data).stem).rglob('*.yaml'))\n extract_dir, autodownload = data.parent, False\n\n # Read yaml (optional)\n if isinstance(data, (str, Path)):\n data = yaml_load(data) # dictionary\n\n # Checks\n for k in 'train', 'val', 'names':\n assert k in data, emojis(f\"data.yaml '{k}:' field missing ❌\")\n if isinstance(data['names'], (list, tuple)): # old array format\n data['names'] = dict(enumerate(data['names'])) # convert to dict\n assert all(isinstance(k, int) for k in data['names'].keys()), 'data.yaml names keys must be integers, i.e. 2: car'\n data['nc'] = len(data['names'])\n\n # Resolve paths\n path = Path(extract_dir or data.get('path') or '') # optional 'path' default to '.'\n if not path.is_absolute():\n path = (ROOT / path).resolve()\n data['path'] = path # download scripts\n for k in 'train', 'val', 'test':\n if data.get(k): # prepend path\n if isinstance(data[k], str):\n x = (path / data[k]).resolve()\n if not x.exists() and data[k].startswith('../'):\n x = (path / data[k][3:]).resolve()\n data[k] = str(x)\n else:\n data[k] = [str((path / x).resolve()) for x in data[k]]\n\n # Parse yaml\n train, val, test, s = (data.get(x) for x in ('train', 'val', 'test', 'download'))\n if val:\n val = [Path(x).resolve() for x in (val if isinstance(val, list) else [val])] # val path\n if not all(x.exists() for x in val):\n LOGGER.info('\\nDataset not found ⚠️, missing paths %s' % [str(x) for x in val if not x.exists()])\n if not s or not autodownload:\n raise Exception('Dataset not found ❌')\n t = time.time()\n if s.startswith('http') and s.endswith('.zip'): # URL\n f = Path(s).name # filename\n LOGGER.info(f'Downloading {s} to {f}...')\n torch.hub.download_url_to_file(s, f)\n Path(DATASETS_DIR).mkdir(parents=True, exist_ok=True) # create root\n unzip_file(f, path=DATASETS_DIR) # unzip\n Path(f).unlink() # remove zip\n r = None # success\n elif s.startswith('bash '): # bash script\n LOGGER.info(f'Running {s} ...')\n r = os.system(s)\n else: # python script\n r = exec(s, {'yaml': data}) # return None\n dt = f'({round(time.time() - t, 1)}s)'\n s = f\"success ✅ {dt}, saved to {colorstr('bold', DATASETS_DIR)}\" if r in (0, None) else f\"failure {dt} ❌\"\n LOGGER.info(f\"Dataset download {s}\")\n check_font('Arial.ttf' if is_ascii(data['names']) else 'Arial.Unicode.ttf', progress=True) # download fonts\n return data # dictionary" }, { "identifier": "check_img_size", "path": "utils/general.py", "snippet": "def check_img_size(imgsz, s=32, floor=0):\n # Verify image size is a multiple of stride s in each dimension\n if isinstance(imgsz, int): # integer i.e. img_size=640\n new_size = max(make_divisible(imgsz, int(s)), floor)\n else: # list i.e. img_size=[640, 480]\n imgsz = list(imgsz) # convert to list if tuple\n new_size = [max(make_divisible(x, int(s)), floor) for x in imgsz]\n if new_size != imgsz:\n LOGGER.warning(f'WARNING ⚠️ --img-size {imgsz} must be multiple of max stride {s}, updating to {new_size}')\n return new_size" }, { "identifier": "check_requirements", "path": "utils/general.py", "snippet": "@TryExcept()\ndef check_requirements(requirements=ROOT / 'requirements.txt', exclude=(), install=True, cmds=''):\n # Check installed dependencies meet YOLOv5 requirements (pass *.txt file or list of packages or single package str)\n prefix = colorstr('red', 'bold', 'requirements:')\n check_python() # check python version\n if isinstance(requirements, Path): # requirements.txt file\n file = requirements.resolve()\n assert file.exists(), f\"{prefix} {file} not found, check failed.\"\n with file.open() as f:\n requirements = [f'{x.name}{x.specifier}' for x in pkg.parse_requirements(f) if x.name not in exclude]\n elif isinstance(requirements, str):\n requirements = [requirements]\n\n s = ''\n n = 0\n for r in requirements:\n try:\n pkg.require(r)\n except (pkg.VersionConflict, pkg.DistributionNotFound): # exception if requirements not met\n s += f'\"{r}\" '\n n += 1\n\n if s and install and AUTOINSTALL: # check environment variable\n LOGGER.info(f\"{prefix} YOLOv5 requirement{'s' * (n > 1)} {s}not found, attempting AutoUpdate...\")\n try:\n # assert check_online(), \"AutoUpdate skipped (offline)\"\n LOGGER.info(check_output(f'pip install {s} {cmds}', shell=True).decode())\n source = file if 'file' in locals() else requirements\n s = f\"{prefix} {n} package{'s' * (n > 1)} updated per {source}\\n\" \\\n f\"{prefix} ⚠️ {colorstr('bold', 'Restart runtime or rerun command for updates to take effect')}\\n\"\n LOGGER.info(s)\n except Exception as e:\n LOGGER.warning(f'{prefix} ❌ {e}')" }, { "identifier": "check_yaml", "path": "utils/general.py", "snippet": "def check_yaml(file, suffix=('.yaml', '.yml')):\n # Search/download YAML file (if necessary) and return path, checking suffix\n return check_file(file, suffix)" }, { "identifier": "coco80_to_coco91_class", "path": "utils/general.py", "snippet": "def coco80_to_coco91_class(): # converts 80-index (val2014) to 91-index (paper)\n # https://tech.amikelive.com/node-718/what-object-categories-labels-are-in-coco-dataset/\n # a = np.loadtxt('data/coco.names', dtype='str', delimiter='\\n')\n # b = np.loadtxt('data/coco_paper.names', dtype='str', delimiter='\\n')\n # x1 = [list(a[i] == b).index(True) + 1 for i in range(80)] # darknet to coco\n # x2 = [list(b[i] == a).index(True) if any(b[i] == a) else None for i in range(91)] # coco to darknet\n return [\n 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 27, 28, 31, 32, 33, 34,\n 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63,\n 64, 65, 67, 70, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 84, 85, 86, 87, 88, 89, 90]" }, { "identifier": "colorstr", "path": "utils/general.py", "snippet": "def colorstr(*input):\n # Colors a string https://en.wikipedia.org/wiki/ANSI_escape_code, i.e. colorstr('blue', 'hello world')\n *args, string = input if len(input) > 1 else ('blue', 'bold', input[0]) # color arguments, string\n colors = {\n 'black': '\\033[30m', # basic colors\n 'red': '\\033[31m',\n 'green': '\\033[32m',\n 'yellow': '\\033[33m',\n 'blue': '\\033[34m',\n 'magenta': '\\033[35m',\n 'cyan': '\\033[36m',\n 'white': '\\033[37m',\n 'bright_black': '\\033[90m', # bright colors\n 'bright_red': '\\033[91m',\n 'bright_green': '\\033[92m',\n 'bright_yellow': '\\033[93m',\n 'bright_blue': '\\033[94m',\n 'bright_magenta': '\\033[95m',\n 'bright_cyan': '\\033[96m',\n 'bright_white': '\\033[97m',\n 'end': '\\033[0m', # misc\n 'bold': '\\033[1m',\n 'underline': '\\033[4m'}\n return ''.join(colors[x] for x in args) + f'{string}' + colors['end']" }, { "identifier": "increment_path", "path": "utils/general.py", "snippet": "def increment_path(path, exist_ok=False, sep='', mkdir=False):\n # Increment file or directory path, i.e. runs/exp --> runs/exp{sep}2, runs/exp{sep}3, ... etc.\n path = Path(path) # os-agnostic\n if path.exists() and not exist_ok:\n path, suffix = (path.with_suffix(''), path.suffix) if path.is_file() else (path, '')\n\n # Method 1\n for n in range(2, 9999):\n p = f'{path}{sep}{n}{suffix}' # increment path\n if not os.path.exists(p): #\n break\n path = Path(p)\n\n # Method 2 (deprecated)\n # dirs = glob.glob(f\"{path}{sep}*\") # similar paths\n # matches = [re.search(rf\"{path.stem}{sep}(\\d+)\", d) for d in dirs]\n # i = [int(m.groups()[0]) for m in matches if m] # indices\n # n = max(i) + 1 if i else 2 # increment number\n # path = Path(f\"{path}{sep}{n}{suffix}\") # increment path\n\n if mkdir:\n path.mkdir(parents=True, exist_ok=True) # make directory\n\n return path" }, { "identifier": "non_max_suppression", "path": "utils/general.py", "snippet": "def non_max_suppression(\n prediction,\n conf_thres=0.25,\n iou_thres=0.45,\n classes=None,\n agnostic=False,\n multi_label=False,\n labels=(),\n max_det=300,\n nm=0, # number of masks\n):\n \"\"\"Non-Maximum Suppression (NMS) on inference results to reject overlapping detections\n\n Returns:\n list of detections, on (n,6) tensor per image [xyxy, conf, cls]\n \"\"\"\n\n if isinstance(prediction, (list, tuple)): # YOLOv5 model in validation model, output = (inference_out, loss_out)\n prediction = prediction[0] # select only inference output\n\n device = prediction.device\n mps = 'mps' in device.type # Apple MPS\n if mps: # MPS not fully supported yet, convert tensors to CPU before NMS\n prediction = prediction.cpu()\n bs = prediction.shape[0] # batch size\n nc = prediction.shape[2] - nm - 5 # number of classes\n xc = prediction[..., 4] > conf_thres # candidates\n\n # Checks\n assert 0 <= conf_thres <= 1, f'Invalid Confidence threshold {conf_thres}, valid values are between 0.0 and 1.0'\n assert 0 <= iou_thres <= 1, f'Invalid IoU {iou_thres}, valid values are between 0.0 and 1.0'\n\n # Settings\n # min_wh = 2 # (pixels) minimum box width and height\n max_wh = 7680 # (pixels) maximum box width and height\n max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()\n time_limit = 0.5 + 0.05 * bs # seconds to quit after\n redundant = True # require redundant detections\n multi_label &= nc > 1 # multiple labels per box (adds 0.5ms/img)\n merge = False # use merge-NMS\n\n t = time.time()\n mi = 5 + nc # mask start index\n output = [torch.zeros((0, 6 + nm), device=prediction.device)] * bs\n for xi, x in enumerate(prediction): # image index, image inference\n # Apply constraints\n # x[((x[..., 2:4] < min_wh) | (x[..., 2:4] > max_wh)).any(1), 4] = 0 # width-height\n x = x[xc[xi]] # confidence\n\n # Cat apriori labels if autolabelling\n if labels and len(labels[xi]):\n lb = labels[xi]\n v = torch.zeros((len(lb), nc + nm + 5), device=x.device)\n v[:, :4] = lb[:, 1:5] # box\n v[:, 4] = 1.0 # conf\n v[range(len(lb)), lb[:, 0].long() + 5] = 1.0 # cls\n x = torch.cat((x, v), 0)\n\n # If none remain process next image\n if not x.shape[0]:\n continue\n\n # Compute conf\n x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf\n\n # Box/Mask\n box = xywh2xyxy(x[:, :4]) # center_x, center_y, width, height) to (x1, y1, x2, y2)\n mask = x[:, mi:] # zero columns if no masks\n\n # Detections matrix nx6 (xyxy, conf, cls)\n if multi_label:\n i, j = (x[:, 5:mi] > conf_thres).nonzero(as_tuple=False).T\n x = torch.cat((box[i], x[i, 5 + j, None], j[:, None].float(), mask[i]), 1)\n else: # best class only\n conf, j = x[:, 5:mi].max(1, keepdim=True)\n x = torch.cat((box, conf, j.float(), mask), 1)[conf.view(-1) > conf_thres]\n\n # Filter by class\n if classes is not None:\n x = x[(x[:, 5:6] == torch.tensor(classes, device=x.device)).any(1)]\n\n # Apply finite constraint\n # if not torch.isfinite(x).all():\n # x = x[torch.isfinite(x).all(1)]\n\n # Check shape\n n = x.shape[0] # number of boxes\n if not n: # no boxes\n continue\n elif n > max_nms: # excess boxes\n x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence\n else:\n x = x[x[:, 4].argsort(descending=True)] # sort by confidence\n\n # Batched NMS\n c = x[:, 5:6] * (0 if agnostic else max_wh) # classes\n boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores\n i = torchvision.ops.nms(boxes, scores, iou_thres) # NMS\n #i = my_soft_nms(boxes, scores, iou_thres) \n if i.shape[0] > max_det: # limit detections\n i = i[:max_det]\n if merge and (1 < n < 3E3): # Merge NMS (boxes merged using weighted mean)\n # update boxes as boxes(i,4) = weights(i,n) * boxes(n,4)\n iou = box_iou(boxes[i], boxes) > iou_thres # iou matrix\n weights = iou * scores[None] # box weights\n x[i, :4] = torch.mm(weights, x[:, :4]).float() / weights.sum(1, keepdim=True) # merged boxes\n if redundant:\n i = i[iou.sum(1) > 1] # require redundancy\n\n output[xi] = x[i]\n if mps:\n output[xi] = output[xi].to(device)\n if (time.time() - t) > time_limit:\n LOGGER.warning(f'WARNING ⚠️ NMS time limit {time_limit:.3f}s exceeded')\n break # time limit exceeded\n\n return output" }, { "identifier": "print_args", "path": "utils/general.py", "snippet": "def print_args(args: Optional[dict] = None, show_file=True, show_func=False):\n # Print function arguments (optional args dict)\n x = inspect.currentframe().f_back # previous frame\n file, _, func, _, _ = inspect.getframeinfo(x)\n if args is None: # get args automatically\n args, _, _, frm = inspect.getargvalues(x)\n args = {k: v for k, v in frm.items() if k in args}\n try:\n file = Path(file).resolve().relative_to(ROOT).with_suffix('')\n except ValueError:\n file = Path(file).stem\n s = (f'{file}: ' if show_file else '') + (f'{func}: ' if show_func else '')\n LOGGER.info(colorstr(s) + ', '.join(f'{k}={v}' for k, v in args.items()))" }, { "identifier": "scale_boxes", "path": "utils/general.py", "snippet": "def scale_boxes(img1_shape, boxes, img0_shape, ratio_pad=None):\n # Rescale boxes (xyxy) from img1_shape to img0_shape\n if ratio_pad is None: # calculate from img0_shape\n gain = min(img1_shape[0] / img0_shape[0], img1_shape[1] / img0_shape[1]) # gain = old / new\n pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2 # wh padding\n else:\n gain = ratio_pad[0][0]\n pad = ratio_pad[1]\n\n boxes[:, [0, 2]] -= pad[0] # x padding\n boxes[:, [1, 3]] -= pad[1] # y padding\n boxes[:, :4] /= gain\n clip_boxes(boxes, img0_shape)\n return boxes" }, { "identifier": "xywh2xyxy", "path": "utils/general.py", "snippet": "def xywh2xyxy(x):\n # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x\n y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y\n y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x\n y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y\n return y" }, { "identifier": "xyxy2xywh", "path": "utils/general.py", "snippet": "def xyxy2xywh(x):\n # Convert nx4 boxes from [x1, y1, x2, y2] to [x, y, w, h] where xy1=top-left, xy2=bottom-right\n y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)\n y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center\n y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center\n y[:, 2] = x[:, 2] - x[:, 0] # width\n y[:, 3] = x[:, 3] - x[:, 1] # height\n return y" }, { "identifier": "ConfusionMatrix", "path": "utils/metrics.py", "snippet": "class ConfusionMatrix:\n # Updated version of https://github.com/kaanakan/object_detection_confusion_matrix\n def __init__(self, nc, conf=0.25, iou_thres=0.45):\n self.matrix = np.zeros((nc + 1, nc + 1))\n self.nc = nc # number of classes\n self.conf = conf\n self.iou_thres = iou_thres\n\n def process_batch(self, detections, labels):\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n detections (Array[N, 6]), x1, y1, x2, y2, conf, class\n labels (Array[M, 5]), class, x1, y1, x2, y2\n Returns:\n None, updates confusion matrix accordingly\n \"\"\"\n if detections is None:\n gt_classes = labels.int()\n for gc in gt_classes:\n self.matrix[self.nc, gc] += 1 # background FN\n return\n\n detections = detections[detections[:, 4] > self.conf]\n gt_classes = labels[:, 0].int()\n detection_classes = detections[:, 5].int()\n iou = box_iou(labels[:, 1:], detections[:, :4])\n\n x = torch.where(iou > self.iou_thres)\n if x[0].shape[0]:\n matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy()\n if x[0].shape[0] > 1:\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 1], return_index=True)[1]]\n matches = matches[matches[:, 2].argsort()[::-1]]\n matches = matches[np.unique(matches[:, 0], return_index=True)[1]]\n else:\n matches = np.zeros((0, 3))\n\n n = matches.shape[0] > 0\n m0, m1, _ = matches.transpose().astype(int)\n for i, gc in enumerate(gt_classes):\n j = m0 == i\n if n and sum(j) == 1:\n self.matrix[detection_classes[m1[j]], gc] += 1 # correct\n else:\n self.matrix[self.nc, gc] += 1 # true background\n\n if n:\n for i, dc in enumerate(detection_classes):\n if not any(m1 == i):\n self.matrix[dc, self.nc] += 1 # predicted background\n\n def tp_fp(self):\n tp = self.matrix.diagonal() # true positives\n fp = self.matrix.sum(1) - tp # false positives\n # fn = self.matrix.sum(0) - tp # false negatives (missed detections)\n return tp[:-1], fp[:-1] # remove background class\n\n @TryExcept('WARNING ⚠️ ConfusionMatrix plot failure')\n def plot(self, normalize=True, save_dir='', names=()):\n import seaborn as sn\n\n array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-9) if normalize else 1) # normalize columns\n array[array < 0.005] = np.nan # don't annotate (would appear as 0.00)\n\n fig, ax = plt.subplots(1, 1, figsize=(12, 9), tight_layout=True)\n nc, nn = self.nc, len(names) # number of classes, names\n sn.set(font_scale=1.0 if nc < 50 else 0.8) # for label size\n labels = (0 < nn < 99) and (nn == nc) # apply names to ticklabels\n ticklabels = (names + ['background']) if labels else \"auto\"\n with warnings.catch_warnings():\n warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered\n sn.heatmap(array,\n ax=ax,\n annot=nc < 30,\n annot_kws={\n \"size\": 8},\n cmap='Blues',\n fmt='.2f',\n square=True,\n vmin=0.0,\n xticklabels=ticklabels,\n yticklabels=ticklabels).set_facecolor((1, 1, 1))\n ax.set_ylabel('True')\n ax.set_ylabel('Predicted')\n ax.set_title('Confusion Matrix')\n fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250)\n plt.close(fig)\n\n def print(self):\n for i in range(self.nc + 1):\n print(' '.join(map(str, self.matrix[i])))" }, { "identifier": "box_iou", "path": "utils/metrics.py", "snippet": "def box_iou(box1, box2, eps=1e-7):\n # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py\n \"\"\"\n Return intersection-over-union (Jaccard index) of boxes.\n Both sets of boxes are expected to be in (x1, y1, x2, y2) format.\n Arguments:\n box1 (Tensor[N, 4])\n box2 (Tensor[M, 4])\n Returns:\n iou (Tensor[N, M]): the NxM matrix containing the pairwise\n IoU values for every element in boxes1 and boxes2\n \"\"\"\n\n # inter(N,M) = (rb(N,M,2) - lt(N,M,2)).clamp(0).prod(2)\n (a1, a2), (b1, b2) = box1.unsqueeze(1).chunk(2, 2), box2.unsqueeze(0).chunk(2, 2)\n inter = (torch.min(a2, b2) - torch.max(a1, b1)).clamp(0).prod(2)\n\n # IoU = inter / (area1 + area2 - inter)\n return inter / ((a2 - a1).prod(2) + (b2 - b1).prod(2) - inter + eps)" }, { "identifier": "output_to_target", "path": "utils/plots.py", "snippet": "def output_to_target(output, max_det=300):\n # Convert model output to target format [batch_id, class_id, x, y, w, h, conf] for plotting\n targets = []\n for i, o in enumerate(output):\n box, conf, cls = o[:max_det, :6].cpu().split((4, 1, 1), 1)\n j = torch.full((conf.shape[0], 1), i)\n targets.append(torch.cat((j, cls, xyxy2xywh(box), conf), 1))\n return torch.cat(targets, 0).numpy()" }, { "identifier": "plot_val_study", "path": "utils/plots.py", "snippet": "def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study()\n # Plot file=study.txt generated by val.py (or plot all study*.txt in dir)\n save_dir = Path(file).parent if file else Path(dir)\n plot2 = False # plot additional results\n if plot2:\n ax = plt.subplots(2, 4, figsize=(10, 6), tight_layout=True)[1].ravel()\n\n fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True)\n # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]:\n for f in sorted(save_dir.glob('study*.txt')):\n y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T\n x = np.arange(y.shape[1]) if x is None else np.array(x)\n if plot2:\n s = ['P', 'R', '[email protected]', '[email protected]:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)']\n for i in range(7):\n ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8)\n ax[i].set_title(s[i])\n\n j = y[3].argmax() + 1\n ax2.plot(y[5, 1:j],\n y[3, 1:j] * 1E2,\n '.-',\n linewidth=2,\n markersize=8,\n label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO'))\n\n ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5],\n 'k.-',\n linewidth=2,\n markersize=8,\n alpha=.25,\n label='EfficientDet')\n\n ax2.grid(alpha=0.2)\n ax2.set_yticks(np.arange(20, 60, 5))\n ax2.set_xlim(0, 57)\n ax2.set_ylim(25, 55)\n ax2.set_xlabel('GPU Speed (ms/img)')\n ax2.set_ylabel('COCO AP val')\n ax2.legend(loc='lower right')\n f = save_dir / 'study.png'\n print(f'Saving {f}...')\n plt.savefig(f, dpi=300)" }, { "identifier": "create_dataloader", "path": "utils/segment/dataloaders.py", "snippet": "def create_dataloader(path,\n imgsz,\n batch_size,\n stride,\n single_cls=False,\n hyp=None,\n augment=False,\n cache=False,\n pad=0.0,\n rect=False,\n rank=-1,\n workers=8,\n image_weights=False,\n quad=False,\n prefix='',\n shuffle=False,\n mask_downsample_ratio=1,\n overlap_mask=False):\n if rect and shuffle:\n LOGGER.warning('WARNING ⚠️ --rect is incompatible with DataLoader shuffle, setting shuffle=False')\n shuffle = False\n with torch_distributed_zero_first(rank): # init dataset *.cache only once if DDP\n dataset = LoadImagesAndLabelsAndMasks(\n path,\n imgsz,\n batch_size,\n augment=augment, # augmentation\n hyp=hyp, # hyperparameters\n rect=rect, # rectangular batches\n cache_images=cache,\n single_cls=single_cls,\n stride=int(stride),\n pad=pad,\n image_weights=image_weights,\n prefix=prefix,\n downsample_ratio=mask_downsample_ratio,\n overlap=overlap_mask)\n\n batch_size = min(batch_size, len(dataset))\n nd = torch.cuda.device_count() # number of CUDA devices\n nw = min([os.cpu_count() // max(nd, 1), batch_size if batch_size > 1 else 0, workers]) # number of workers\n sampler = None if rank == -1 else distributed.DistributedSampler(dataset, shuffle=shuffle)\n loader = DataLoader if image_weights else InfiniteDataLoader # only DataLoader allows for attribute updates\n generator = torch.Generator()\n generator.manual_seed(6148914691236517205 + RANK)\n return loader(\n dataset,\n batch_size=batch_size,\n shuffle=shuffle and sampler is None,\n num_workers=nw,\n sampler=sampler,\n pin_memory=True,\n collate_fn=LoadImagesAndLabelsAndMasks.collate_fn4 if quad else LoadImagesAndLabelsAndMasks.collate_fn,\n worker_init_fn=seed_worker,\n generator=generator,\n ), dataset" }, { "identifier": "mask_iou", "path": "utils/segment/general.py", "snippet": "def mask_iou(mask1, mask2, eps=1e-7):\n \"\"\"\n mask1: [N, n] m1 means number of predicted objects\n mask2: [M, n] m2 means number of gt objects\n Note: n means image_w x image_h\n\n return: masks iou, [N, M]\n \"\"\"\n intersection = torch.matmul(mask1, mask2.t()).clamp(0)\n union = (mask1.sum(1)[:, None] + mask2.sum(1)[None]) - intersection # (area1 + area2) - intersection\n return intersection / (union + eps)" }, { "identifier": "process_mask", "path": "utils/segment/general.py", "snippet": "def process_mask(protos, masks_in, bboxes, shape, upsample=False):\n \"\"\"\n Crop before upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n ih, iw = shape\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw) # CHW\n\n downsampled_bboxes = bboxes.clone()\n downsampled_bboxes[:, 0] *= mw / iw\n downsampled_bboxes[:, 2] *= mw / iw\n downsampled_bboxes[:, 3] *= mh / ih\n downsampled_bboxes[:, 1] *= mh / ih\n\n masks = crop_mask(masks, downsampled_bboxes) # CHW\n if upsample:\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n return masks.gt_(0.5)" }, { "identifier": "process_mask_upsample", "path": "utils/segment/general.py", "snippet": "def process_mask_upsample(protos, masks_in, bboxes, shape):\n \"\"\"\n Crop after upsample.\n proto_out: [mask_dim, mask_h, mask_w]\n out_masks: [n, mask_dim], n is number of masks after nms\n bboxes: [n, 4], n is number of masks after nms\n shape:input_image_size, (h, w)\n\n return: h, w, n\n \"\"\"\n\n c, mh, mw = protos.shape # CHW\n masks = (masks_in @ protos.float().view(c, -1)).sigmoid().view(-1, mh, mw)\n masks = F.interpolate(masks[None], shape, mode='bilinear', align_corners=False)[0] # CHW\n masks = crop_mask(masks, bboxes) # CHW\n return masks.gt_(0.5)" }, { "identifier": "scale_image", "path": "utils/segment/general.py", "snippet": "def scale_image(im1_shape, masks, im0_shape, ratio_pad=None):\n \"\"\"\n img1_shape: model input shape, [h, w]\n img0_shape: origin pic shape, [h, w, 3]\n masks: [h, w, num]\n \"\"\"\n # Rescale coordinates (xyxy) from im1_shape to im0_shape\n if ratio_pad is None: # calculate from im0_shape\n gain = min(im1_shape[0] / im0_shape[0], im1_shape[1] / im0_shape[1]) # gain = old / new\n pad = (im1_shape[1] - im0_shape[1] * gain) / 2, (im1_shape[0] - im0_shape[0] * gain) / 2 # wh padding\n else:\n pad = ratio_pad[1]\n top, left = int(pad[1]), int(pad[0]) # y, x\n bottom, right = int(im1_shape[0] - pad[1]), int(im1_shape[1] - pad[0])\n\n if len(masks.shape) < 2:\n raise ValueError(f'\"len of masks shape\" should be 2 or 3, but got {len(masks.shape)}')\n masks = masks[top:bottom, left:right]\n # masks = masks.permute(2, 0, 1).contiguous()\n # masks = F.interpolate(masks[None], im0_shape[:2], mode='bilinear', align_corners=False)[0]\n # masks = masks.permute(1, 2, 0).contiguous()\n masks = cv2.resize(masks, (im0_shape[1], im0_shape[0]))\n\n if len(masks.shape) == 2:\n masks = masks[:, :, None]\n return masks" }, { "identifier": "Metrics", "path": "utils/segment/metrics.py", "snippet": "class Metrics:\n \"\"\"Metric for boxes and masks.\"\"\"\n\n def __init__(self) -> None:\n self.metric_box = Metric()\n self.metric_mask = Metric()\n\n def update(self, results):\n \"\"\"\n Args:\n results: Dict{'boxes': Dict{}, 'masks': Dict{}}\n \"\"\"\n self.metric_box.update(list(results[\"boxes\"].values()))\n self.metric_mask.update(list(results[\"masks\"].values()))\n\n def mean_results(self):\n return self.metric_box.mean_results() + self.metric_mask.mean_results()\n\n def class_result(self, i):\n return self.metric_box.class_result(i) + self.metric_mask.class_result(i)\n\n def get_maps(self, nc):\n return self.metric_box.get_maps(nc) + self.metric_mask.get_maps(nc)\n\n @property\n def ap_class_index(self):\n # boxes and masks have the same ap_class_index\n return self.metric_box.ap_class_index" }, { "identifier": "ap_per_class_box_and_mask", "path": "utils/segment/metrics.py", "snippet": "def ap_per_class_box_and_mask(\n tp_m,\n tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=False,\n save_dir=\".\",\n names=(),\n):\n \"\"\"\n Args:\n tp_b: tp of boxes.\n tp_m: tp of masks.\n other arguments see `func: ap_per_class`.\n \"\"\"\n results_boxes = ap_per_class(tp_b,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix=\"Box\")[2:]\n results_masks = ap_per_class(tp_m,\n conf,\n pred_cls,\n target_cls,\n plot=plot,\n save_dir=save_dir,\n names=names,\n prefix=\"Mask\")[2:]\n\n results = {\n \"boxes\": {\n \"p\": results_boxes[0],\n \"r\": results_boxes[1],\n \"ap\": results_boxes[3],\n \"f1\": results_boxes[2],\n \"ap_class\": results_boxes[4]},\n \"masks\": {\n \"p\": results_masks[0],\n \"r\": results_masks[1],\n \"ap\": results_masks[3],\n \"f1\": results_masks[2],\n \"ap_class\": results_masks[4]}}\n return results" }, { "identifier": "plot_images_and_masks", "path": "utils/segment/plots.py", "snippet": "@threaded\ndef plot_images_and_masks(images, targets, masks, paths=None, fname='images.jpg', names=None):\n # Plot image grid with labels\n if isinstance(images, torch.Tensor):\n images = images.cpu().float().numpy()\n if isinstance(targets, torch.Tensor):\n targets = targets.cpu().numpy()\n if isinstance(masks, torch.Tensor):\n masks = masks.cpu().numpy().astype(int)\n\n max_size = 1920 # max image size\n max_subplots = 16 # max image subplots, i.e. 4x4\n bs, _, h, w = images.shape # batch size, _, height, width\n bs = min(bs, max_subplots) # limit plot images\n ns = np.ceil(bs ** 0.5) # number of subplots (square)\n if np.max(images[0]) <= 1:\n images *= 255 # de-normalise (optional)\n\n # Build Image\n mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init\n for i, im in enumerate(images):\n if i == max_subplots: # if last batch has fewer images than we expect\n break\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n im = im.transpose(1, 2, 0)\n mosaic[y:y + h, x:x + w, :] = im\n\n # Resize (optional)\n scale = max_size / ns / max(h, w)\n if scale < 1:\n h = math.ceil(scale * h)\n w = math.ceil(scale * w)\n mosaic = cv2.resize(mosaic, tuple(int(x * ns) for x in (w, h)))\n\n # Annotate\n fs = int((h + w) * ns * 0.01) # font size\n annotator = Annotator(mosaic, line_width=round(fs / 10), font_size=fs, pil=True, example=names)\n for i in range(i + 1):\n x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin\n annotator.rectangle([x, y, x + w, y + h], None, (255, 255, 255), width=2) # borders\n if paths:\n annotator.text((x + 5, y + 5 + h), text=Path(paths[i]).name[:40], txt_color=(220, 220, 220)) # filenames\n if len(targets) > 0:\n idx = targets[:, 0] == i\n ti = targets[idx] # image targets\n\n boxes = xywh2xyxy(ti[:, 2:6]).T\n classes = ti[:, 1].astype('int')\n labels = ti.shape[1] == 6 # labels if no conf column\n conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred)\n\n if boxes.shape[1]:\n if boxes.max() <= 1.01: # if normalized with tolerance 0.01\n boxes[[0, 2]] *= w # scale to pixels\n boxes[[1, 3]] *= h\n elif scale < 1: # absolute coords need scale if image scales\n boxes *= scale\n boxes[[0, 2]] += x\n boxes[[1, 3]] += y\n for j, box in enumerate(boxes.T.tolist()):\n cls = classes[j]\n color = colors(cls)\n cls = names[cls] if names else cls\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}'\n annotator.box_label(box, label, color=color)\n\n # Plot masks\n if len(masks):\n if masks.max() > 1.0: # mean that masks are overlap\n image_masks = masks[[i]] # (1, 640, 640)\n nl = len(ti)\n index = np.arange(nl).reshape(nl, 1, 1) + 1\n image_masks = np.repeat(image_masks, nl, axis=0)\n image_masks = np.where(image_masks == index, 1.0, 0.0)\n else:\n image_masks = masks[idx]\n\n im = np.asarray(annotator.im).copy()\n for j, box in enumerate(boxes.T.tolist()):\n if labels or conf[j] > 0.25: # 0.25 conf thresh\n color = colors(classes[j])\n mh, mw = image_masks[j].shape\n if mh != h or mw != w:\n mask = image_masks[j].astype(np.uint8)\n mask = cv2.resize(mask, (w, h))\n mask = mask.astype(bool)\n else:\n mask = image_masks[j].astype(bool)\n with contextlib.suppress(Exception):\n im[y:y + h, x:x + w, :][mask] = im[y:y + h, x:x + w, :][mask] * 0.4 + np.array(color) * 0.6\n annotator.fromarray(im)\n annotator.im.save(fname) # save" }, { "identifier": "de_parallel", "path": "utils/torch_utils.py", "snippet": "def de_parallel(model):\n # De-parallelize a model: returns single-GPU model if model is of type DP or DDP\n return model.module if is_parallel(model) else model" }, { "identifier": "select_device", "path": "utils/torch_utils.py", "snippet": "def select_device(device='', batch_size=0, newline=True):\n # device = None or 'cpu' or 0 or '0' or '0,1,2,3'\n s = f'YOLOv5 🚀 {git_describe() or file_date()} Python-{platform.python_version()} torch-{torch.__version__} '\n device = str(device).strip().lower().replace('cuda:', '').replace('none', '') # to string, 'cuda:0' to '0'\n cpu = device == 'cpu'\n mps = device == 'mps' # Apple Metal Performance Shaders (MPS)\n if cpu or mps:\n os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False\n elif device: # non-cpu device requested\n os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - must be before assert is_available()\n assert torch.cuda.is_available() and torch.cuda.device_count() >= len(device.replace(',', '')), \\\n f\"Invalid CUDA '--device {device}' requested, use '--device cpu' or pass valid CUDA device(s)\"\n\n if not cpu and not mps and torch.cuda.is_available(): # prefer GPU if available\n devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7\n n = len(devices) # device count\n if n > 1 and batch_size > 0: # check batch_size is divisible by device_count\n assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}'\n space = ' ' * (len(s) + 1)\n for i, d in enumerate(devices):\n p = torch.cuda.get_device_properties(i)\n s += f\"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / (1 << 20):.0f}MiB)\\n\" # bytes to MB\n arg = 'cuda:0'\n elif mps and getattr(torch, 'has_mps', False) and torch.backends.mps.is_available(): # prefer MPS if available\n s += 'MPS\\n'\n arg = 'mps'\n else: # revert to CPU\n s += 'CPU\\n'\n arg = 'cpu'\n\n if not newline:\n s = s.rstrip()\n LOGGER.info(s)\n return torch.device(arg)" }, { "identifier": "smart_inference_mode", "path": "utils/torch_utils.py", "snippet": "def smart_inference_mode(torch_1_9=check_version(torch.__version__, '1.9.0')):\n # Applies torch.inference_mode() decorator if torch>=1.9.0 else torch.no_grad() decorator\n def decorate(fn):\n return (torch.inference_mode if torch_1_9 else torch.no_grad)()(fn)\n\n return decorate" } ]
import argparse import json import os import sys import numpy as np import torch import torch.nn.functional as F import time from multiprocessing.pool import ThreadPool from pathlib import Path from tqdm import tqdm from models.common import DetectMultiBackend from models.yolo import SegmentationModel from utils.callbacks import Callbacks from utils.general import (LOGGER, NUM_THREADS, TQDM_BAR_FORMAT, Profile, check_dataset, check_img_size, check_requirements, check_yaml, coco80_to_coco91_class, colorstr, increment_path, non_max_suppression, print_args, scale_boxes, xywh2xyxy, xyxy2xywh) from utils.metrics import ConfusionMatrix, box_iou from utils.plots import output_to_target, plot_val_study from utils.segment.dataloaders import create_dataloader from utils.segment.general import mask_iou, process_mask, process_mask_upsample, scale_image from utils.segment.metrics import Metrics, ap_per_class_box_and_mask from utils.segment.plots import plot_images_and_masks from utils.torch_utils import de_parallel, select_device, smart_inference_mode from pycocotools.mask import encode from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval
21,426
labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) if plots: confusion_matrix.process_batch(predn, labelsn) stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot # Save/log if save_txt: save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: pred_masks = scale_image(im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: if len(plot_masks): plot_masks = torch.cat(plot_masks, dim=0) plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred # callbacks.run('on_val_batch_end') # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) metrics.update(results) nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class # Print results pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(metrics.ap_class_index): LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) # Print speeds t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) # callbacks.run('on_val_end') mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api results = [] for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate eval.evaluate() eval.accumulate() eval.summarize() results.extend(eval.stats[:2]) # update results ([email protected]:0.95, [email protected]) map_bbox, map50_bbox, map_mask, map50_mask = results except Exception as e: LOGGER.info(f'pycocotools unable to run: {e}') # Return results model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/BCC.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs_2/train-seg/base/weights/best.pt', help='model path(s)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.01, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='6', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') parser.add_argument('--project', default=ROOT / 'runs_2/val_test', help='save results to project/name') parser.add_argument('--name', default='base', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args()
# YOLOv5 🚀 by Ultralytics, GPL-3.0 license """ Validate a trained YOLOv5 segment model on a segment dataset Usage: $ bash data/scripts/get_coco.sh --val --segments # download COCO-segments val split (1G, 5000 images) $ python segment/val.py --weights yolov5s-seg.pt --data coco.yaml --img 640 # validate COCO-segments Usage - formats: $ python segment/val.py --weights yolov5s-seg.pt # PyTorch yolov5s-seg.torchscript # TorchScript yolov5s-seg.onnx # ONNX Runtime or OpenCV DNN with --dnn yolov5s-seg_openvino_label # OpenVINO yolov5s-seg.engine # TensorRT yolov5s-seg.mlmodel # CoreML (macOS-only) yolov5s-seg_saved_model # TensorFlow SavedModel yolov5s-seg.pb # TensorFlow GraphDef yolov5s-seg.tflite # TensorFlow Lite yolov5s-seg_edgetpu.tflite # TensorFlow Edge TPU yolov5s-seg_paddle_model # PaddlePaddle """ FILE = Path(__file__).resolve() ROOT = FILE.parents[1] # YOLOv5 root directory if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative def save_one_txt(predn, save_conf, shape, file): # Save one txt result gn = torch.tensor(shape)[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(file, 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') def save_one_json(predn, jdict, path, class_map, pred_masks): # Save one JSON result {"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236} def single_encode(x): rle = encode(np.asarray(x[:, :, None], order="F", dtype="uint8"))[0] rle["counts"] = rle["counts"].decode("utf-8") return rle image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner pred_masks = np.transpose(pred_masks, (2, 0, 1)) with ThreadPool(NUM_THREADS) as pool: rles = pool.map(single_encode, pred_masks) for i, (p, b) in enumerate(zip(predn.tolist(), box.tolist())): jdict.append({ 'image_id': image_id, 'category_id': class_map[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5), 'segmentation': rles[i]}) def process_batch(detections, labels, iouv, pred_masks=None, gt_masks=None, overlap=False, masks=False): """ Return correct prediction matrix Arguments: detections (array[N, 6]), x1, y1, x2, y2, conf, class labels (array[M, 5]), class, x1, y1, x2, y2 Returns: correct (array[N, 10]), for 10 IoU levels """ if masks: if overlap: nl = len(labels) index = torch.arange(nl, device=gt_masks.device).view(nl, 1, 1) + 1 gt_masks = gt_masks.repeat(nl, 1, 1) # shape(1,640,640) -> (n,640,640) gt_masks = torch.where(gt_masks == index, 1.0, 0.0) if gt_masks.shape[1:] != pred_masks.shape[1:]: gt_masks = F.interpolate(gt_masks[None], pred_masks.shape[1:], mode="bilinear", align_corners=False)[0] gt_masks = gt_masks.gt_(0.5) iou = mask_iou(gt_masks.view(gt_masks.shape[0], -1), pred_masks.view(pred_masks.shape[0], -1)) else: # boxes iou = box_iou(labels[:, 1:], detections[:, :4]) correct = np.zeros((detections.shape[0], iouv.shape[0])).astype(bool) correct_class = labels[:, 0:1] == detections[:, 5] for i in range(len(iouv)): x = torch.where((iou >= iouv[i]) & correct_class) # IoU > threshold and classes match if x[0].shape[0]: matches = torch.cat((torch.stack(x, 1), iou[x[0], x[1]][:, None]), 1).cpu().numpy() # [label, detect, iou] if x[0].shape[0] > 1: matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 1], return_index=True)[1]] # matches = matches[matches[:, 2].argsort()[::-1]] matches = matches[np.unique(matches[:, 0], return_index=True)[1]] correct[matches[:, 1].astype(int), i] = True return torch.tensor(correct, dtype=torch.bool, device=iouv.device) @smart_inference_mode() def run( data, weights=None, # model.pt path(s) batch_size=32, # batch size imgsz=640, # inference size (pixels) conf_thres=0.001, # confidence threshold iou_thres=0.6, # NMS IoU threshold max_det=300, # maximum detections per image task='val', # train, val, test, speed or study device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu workers=8, # max dataloader workers (per RANK in DDP mode) single_cls=False, # treat as single-class dataset augment=False, # augmented inference verbose=False, # verbose output save_txt=False, # save results to *.txt save_hybrid=False, # save label+prediction hybrid results to *.txt save_conf=False, # save confidences in --save-txt labels save_json=False, # save a COCO-JSON results file project=ROOT / 'runs/val-seg', # save to project/name name='exp', # save to project/name exist_ok=False, # existing project/name ok, do not increment half=True, # use FP16 half-precision inference dnn=False, # use OpenCV DNN for ONNX inference model=None, dataloader=None, save_dir=Path(''), plots=True, overlap=False, mask_downsample_ratio=1, compute_loss=None, callbacks=Callbacks(), ): if save_json: check_requirements(['pycocotools']) process = process_mask_upsample # more accurate else: process = process_mask # faster # Initialize/load model and set device training = model is not None if training: # called by train.py device, pt, jit, engine = next(model.parameters()).device, True, False, False # get model device, PyTorch model half &= device.type != 'cpu' # half precision only supported on CUDA model.half() if half else model.float() nm = de_parallel(model).model[-1].nm # number of masks else: # called directly device = select_device(device, batch_size=batch_size) # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = DetectMultiBackend(weights, device=device, dnn=dnn, data=data, fp16=half) stride, pt, jit, engine = model.stride, model.pt, model.jit, model.engine imgsz = check_img_size(imgsz, s=stride) # check image size half = model.fp16 # FP16 supported on limited backends with CUDA nm = de_parallel(model).model.model[-1].nm if isinstance(model, SegmentationModel) else 32 # number of masks if engine: batch_size = model.batch_size else: device = model.device if not (pt or jit): batch_size = 1 # export.py models default to batch-size 1 LOGGER.info(f'Forcing --batch-size 1 square inference (1,3,{imgsz},{imgsz}) for non-PyTorch models') # Data data = check_dataset(data) # check # Configure model.eval() cuda = device.type != 'cpu' is_coco = isinstance(data.get('val'), str) and data['val'].endswith(f'coco{os.sep}val2017.txt') # COCO dataset nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10, device=device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Dataloader if not training: if pt and not single_cls: # check --weights are trained on --data ncm = model.model.nc assert ncm == nc, f'{weights} ({ncm} classes) trained on different --data than what you passed ({nc} ' \ f'classes). Pass correct combination of --weights and --data that are trained together.' model.warmup(imgsz=(1 if pt else batch_size, 3, imgsz, imgsz)) # warmup pad, rect = (0.0, False) if task == 'speed' else (0.5, pt) # square inference for benchmarks task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images dataloader = create_dataloader(data[task], imgsz, batch_size, stride, single_cls, pad=pad, rect=rect, workers=workers, prefix=colorstr(f'{task}: '), overlap_mask=overlap, mask_downsample_ratio=mask_downsample_ratio)[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = model.names if hasattr(model, 'names') else model.module.names # get class names if isinstance(names, (list, tuple)): # old format names = dict(enumerate(names)) class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) s = ('%22s' + '%11s' * 10) % ('Class', 'Images', 'Instances', 'Box(P', "R", "mAP50", "mAP50-95)", "Mask(P", "R", "mAP50", "mAP50-95)") dt = Profile(), Profile(), Profile() metrics = Metrics() loss = torch.zeros(4, device=device) jdict, stats = [], [] # callbacks.run('on_val_start') pbar = tqdm(dataloader, desc=s, bar_format=TQDM_BAR_FORMAT) # progress bar for batch_i, (im, targets, paths, shapes, masks) in enumerate(pbar): # callbacks.run('on_val_batch_start') with dt[0]: if cuda: im = im.to(device, non_blocking=True) targets = targets.to(device) masks = masks.to(device) masks = masks.float() im = im.half() if half else im.float() # uint8 to fp16/32 im /= 255 # 0 - 255 to 0.0 - 1.0 nb, _, height, width = im.shape # batch size, channels, height, width # Inference with dt[1]: act = time.time() preds, protos, train_out = model(im) if compute_loss else (*model(im, augment=augment)[:2], None) #print('time.time():',time.time()-act) # Loss if compute_loss: loss += compute_loss((train_out, protos), targets, masks)[1] # box, obj, cls # NMS targets[:, 2:] *= torch.tensor((width, height, width, height), device=device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling with dt[2]: preds = non_max_suppression(preds, conf_thres, iou_thres, labels=lb, multi_label=True, agnostic=single_cls, max_det=max_det, nm=nm) # Metrics plot_masks = [] # masks for plotting for si, (pred, proto) in enumerate(zip(preds, protos)): labels = targets[targets[:, 0] == si, 1:] nl, npr = labels.shape[0], pred.shape[0] # number of labels, predictions path, shape = Path(paths[si]), shapes[si][0] correct_masks = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init correct_bboxes = torch.zeros(npr, niou, dtype=torch.bool, device=device) # init seen += 1 if npr == 0: if nl: stats.append((correct_masks, correct_bboxes, *torch.zeros((2, 0), device=device), labels[:, 0])) if plots: confusion_matrix.process_batch(detections=None, labels=labels[:, 0]) continue # Masks midx = [si] if overlap else targets[:, 0] == si gt_masks = masks[midx] pred_masks = process(proto, pred[:, 6:], pred[:, :4], shape=im[si].shape[1:]) # Predictions if single_cls: pred[:, 5] = 0 predn = pred.clone() scale_boxes(im[si].shape[1:], predn[:, :4], shape, shapes[si][1]) # native-space pred # Evaluate if nl: tbox = xywh2xyxy(labels[:, 1:5]) # target boxes scale_boxes(im[si].shape[1:], tbox, shape, shapes[si][1]) # native-space labels labelsn = torch.cat((labels[:, 0:1], tbox), 1) # native-space labels correct_bboxes = process_batch(predn, labelsn, iouv) correct_masks = process_batch(predn, labelsn, iouv, pred_masks, gt_masks, overlap=overlap, masks=True) if plots: confusion_matrix.process_batch(predn, labelsn) stats.append((correct_masks, correct_bboxes, pred[:, 4], pred[:, 5], labels[:, 0])) # (conf, pcls, tcls) pred_masks = torch.as_tensor(pred_masks, dtype=torch.uint8) if plots and batch_i < 3: plot_masks.append(pred_masks[:15].cpu()) # filter top 15 to plot # Save/log if save_txt: save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / f'{path.stem}.txt') if save_json: pred_masks = scale_image(im[si].shape[1:], pred_masks.permute(1, 2, 0).contiguous().cpu().numpy(), shape, shapes[si][1]) save_one_json(predn, jdict, path, class_map, pred_masks) # append to COCO-JSON dictionary # callbacks.run('on_val_image_end', pred, predn, path, names, im[si]) # Plot images if plots and batch_i < 3: if len(plot_masks): plot_masks = torch.cat(plot_masks, dim=0) plot_images_and_masks(im, targets, masks, paths, save_dir / f'val_batch{batch_i}_labels.jpg', names) plot_images_and_masks(im, output_to_target(preds, max_det=15), plot_masks, paths, save_dir / f'val_batch{batch_i}_pred.jpg', names) # pred # callbacks.run('on_val_batch_end') # Compute metrics stats = [torch.cat(x, 0).cpu().numpy() for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): results = ap_per_class_box_and_mask(*stats, plot=plots, save_dir=save_dir, names=names) metrics.update(results) nt = np.bincount(stats[4].astype(int), minlength=nc) # number of targets per class # Print results pf = '%22s' + '%11i' * 2 + '%11.3g' * 8 # print format LOGGER.info(pf % ("all", seen, nt.sum(), *metrics.mean_results())) if nt.sum() == 0: LOGGER.warning(f'WARNING ⚠️ no labels found in {task} set, can not compute metrics without labels') # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): for i, c in enumerate(metrics.ap_class_index): LOGGER.info(pf % (names[c], seen, nt[c], *metrics.class_result(i))) # Print speeds t = tuple(x.t / seen * 1E3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) LOGGER.info(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) # callbacks.run('on_val_end') mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask = metrics.mean_results() # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json LOGGER.info(f'\nEvaluating pycocotools mAP... saving {pred_json}...') with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api results = [] for eval in COCOeval(anno, pred, 'bbox'), COCOeval(anno, pred, 'segm'): if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.im_files] # img ID to evaluate eval.evaluate() eval.accumulate() eval.summarize() results.extend(eval.stats[:2]) # update results ([email protected]:0.95, [email protected]) map_bbox, map50_bbox, map_mask, map50_mask = results except Exception as e: LOGGER.info(f'pycocotools unable to run: {e}') # Return results model.float() # for training if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' LOGGER.info(f"Results saved to {colorstr('bold', save_dir)}{s}") final_metric = mp_bbox, mr_bbox, map50_bbox, map_bbox, mp_mask, mr_mask, map50_mask, map_mask return (*final_metric, *(loss.cpu() / len(dataloader)).tolist()), metrics.get_maps(nc), t def parse_opt(): parser = argparse.ArgumentParser() parser.add_argument('--data', type=str, default=ROOT / 'data/BCC.yaml', help='dataset.yaml path') parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'runs_2/train-seg/base/weights/best.pt', help='model path(s)') parser.add_argument('--batch-size', type=int, default=1, help='batch size') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') parser.add_argument('--conf-thres', type=float, default=0.01, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=300, help='maximum detections per image') parser.add_argument('--task', default='val', help='train, val, test, speed or study') parser.add_argument('--device', default='6', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--workers', type=int, default=8, help='max dataloader workers (per RANK in DDP mode)') parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') parser.add_argument('--augment', action='store_true', help='augmented inference') parser.add_argument('--verbose', action='store_true', help='report mAP by class') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') parser.add_argument('--project', default=ROOT / 'runs_2/val_test', help='save results to project/name') parser.add_argument('--name', default='base', help='save to project/name') parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') parser.add_argument('--dnn', action='store_true', help='use OpenCV DNN for ONNX inference') opt = parser.parse_args()
opt.data = check_yaml(opt.data) # check YAML
10
2023-12-10 14:18:29+00:00
24k
youngskkim/CRN
exps/base_exp.py
[ { "identifier": "NuscDatasetRadarDet", "path": "datasets/nusc_det_dataset.py", "snippet": "class NuscDatasetRadarDet(Dataset):\n def __init__(self,\n ida_aug_conf,\n bda_aug_conf,\n rda_aug_conf,\n classes,\n data_root,\n info_paths,\n is_train,\n load_interval=1,\n num_sweeps=1,\n img_conf=dict(img_mean=[123.675, 116.28, 103.53],\n img_std=[58.395, 57.12, 57.375],\n to_rgb=True),\n img_backbone_conf=dict(\n x_bound=[-51.2, 51.2, 0.8],\n y_bound=[-51.2, 51.2, 0.8],\n z_bound=[-5, 3, 8],\n d_bound=[2.0, 58.0, 0.5]\n ),\n drop_aug_conf=None,\n return_image=True,\n return_depth=False,\n return_radar_pv=False,\n depth_path='depth_gt',\n radar_pv_path='radar_pv_filter',\n remove_z_axis=False,\n use_cbgs=False,\n gt_for_radar_only=False,\n sweep_idxes=list(),\n key_idxes=list()):\n \"\"\"Dataset used for bevdetection task.\n Args:\n ida_aug_conf (dict): Config for ida augmentation.\n bda_aug_conf (dict): Config for bda augmentation.\n classes (list): Class names.\n use_cbgs (bool): Whether to use cbgs strategy,\n Default: False.\n num_sweeps (int): Number of sweeps to be used for each sample.\n default: 1.\n img_conf (dict): Config for image.\n return_depth (bool): Whether to use depth gt.\n default: False.\n sweep_idxes (list): List of sweep idxes to be used.\n default: list().\n key_idxes (list): List of key idxes to be used.\n default: list().\n \"\"\"\n super().__init__()\n if isinstance(info_paths, list):\n self.infos = list()\n for info_path in info_paths:\n self.infos.extend(mmcv.load(info_path))\n else:\n self.infos = mmcv.load(info_paths)\n self.is_train = is_train\n self.ida_aug_conf = ida_aug_conf\n self.bda_aug_conf = bda_aug_conf\n self.rda_aug_conf = rda_aug_conf\n self.drop_aug_conf = drop_aug_conf\n self.data_root = data_root\n self.classes = classes\n self.use_cbgs = use_cbgs\n if self.use_cbgs:\n self.cat2id = {name: i for i, name in enumerate(self.classes)}\n self.sample_indices = self._get_sample_indices()\n self.num_sweeps = num_sweeps\n self.img_mean = np.array(img_conf['img_mean'], np.float32)\n self.img_std = np.array(img_conf['img_std'], np.float32)\n self.to_rgb = img_conf['to_rgb']\n self.img_backbone_conf = img_backbone_conf\n\n self.return_image = return_image\n self.return_depth = return_depth\n self.return_radar_pv = return_radar_pv\n\n self.remove_z_axis = remove_z_axis\n self.gt_for_radar_only = gt_for_radar_only\n\n assert sum([sweep_idx >= 0 for sweep_idx in sweep_idxes]) \\\n == len(sweep_idxes), 'All `sweep_idxes` must greater \\\n than or equal to 0.'\n\n self.sweeps_idx = sweep_idxes\n assert sum([key_idx < 0 for key_idx in key_idxes]) == len(key_idxes),\\\n 'All `key_idxes` must less than 0.'\n self.key_idxes = [0] + key_idxes\n if load_interval > 1:\n self.infos = self.infos[::load_interval]\n self.depth_path = depth_path\n self.radar_pv_path = radar_pv_path\n\n self.max_radar_points_pv = 1536\n self.max_distance_pv = self.img_backbone_conf['d_bound'][1]\n\n def _get_sample_indices(self):\n \"\"\"Load annotations from ann_file.\n\n Args:\n ann_file (str): Path of the annotation file.\n\n Returns:\n list[dict]: List of annotations after class sampling.\n \"\"\"\n class_sample_idxs = {cat_id: [] for cat_id in self.cat2id.values()}\n for idx, info in enumerate(self.infos):\n gt_names = set(\n [ann_info['category_name'] for ann_info in info['ann_infos']])\n for gt_name in gt_names:\n gt_name = map_name_from_general_to_detection[gt_name]\n if gt_name not in self.classes:\n continue\n class_sample_idxs[self.cat2id[gt_name]].append(idx)\n duplicated_samples = sum(\n [len(v) for _, v in class_sample_idxs.items()])\n class_distribution = {\n k: len(v) / duplicated_samples\n for k, v in class_sample_idxs.items()\n }\n\n sample_indices = []\n\n frac = 1.0 / len(self.classes)\n ratios = [frac / v for v in class_distribution.values()]\n for cls_inds, ratio in zip(list(class_sample_idxs.values()), ratios):\n sample_indices += np.random.choice(cls_inds,\n int(len(cls_inds) *\n ratio)).tolist()\n return sample_indices\n\n def sample_ida_augmentation(self):\n \"\"\"Generate ida augmentation values based on ida_config.\"\"\"\n H, W = self.ida_aug_conf['H'], self.ida_aug_conf['W']\n fH, fW = self.ida_aug_conf['final_dim']\n if self.is_train:\n resize = np.random.uniform(*self.ida_aug_conf['resize_lim'])\n resize_dims = (int(W * resize), int(H * resize))\n newW, newH = resize_dims\n crop_h = int(\n (1 - np.random.uniform(*self.ida_aug_conf['bot_pct_lim'])) *\n newH) - fH\n crop_w = int(np.random.uniform(0, max(0, newW - fW)))\n crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)\n flip = False\n if self.ida_aug_conf['rand_flip'] and np.random.choice([0, 1]):\n flip = True\n rotate_ida = np.random.uniform(*self.ida_aug_conf['rot_lim'])\n else:\n resize = max(fH / H, fW / W)\n resize_dims = (int(W * resize), int(H * resize))\n newW, newH = resize_dims\n crop_h = int(\n (1 - np.mean(self.ida_aug_conf['bot_pct_lim'])) * newH) - fH\n crop_w = int(max(0, newW - fW) / 2)\n crop = (crop_w, crop_h, crop_w + fW, crop_h + fH)\n flip = False\n rotate_ida = 0\n return resize, resize_dims, crop, flip, rotate_ida\n\n def sample_bda_augmentation(self):\n \"\"\"Generate bda augmentation values based on bda_config.\"\"\"\n if self.is_train:\n if np.random.uniform() < self.bda_aug_conf['rot_ratio']:\n rotate_bda = np.random.uniform(*self.bda_aug_conf['rot_lim'])\n else:\n rotate_bda = 0\n scale_bda = np.random.uniform(*self.bda_aug_conf['scale_lim'])\n flip_dx = np.random.uniform() < self.bda_aug_conf['flip_dx_ratio']\n flip_dy = np.random.uniform() < self.bda_aug_conf['flip_dy_ratio']\n else:\n rotate_bda = 0\n scale_bda = 1.0\n flip_dx = False\n flip_dy = False\n return rotate_bda, scale_bda, flip_dx, flip_dy\n\n def sample_radar_augmentation(self):\n \"\"\"Generate bda augmentation values based on bda_config.\"\"\"\n if self.is_train:\n radar_idx = np.random.choice(self.rda_aug_conf['N_sweeps'],\n self.rda_aug_conf['N_use'],\n replace=False)\n else:\n radar_idx = np.arange(self.rda_aug_conf['N_sweeps'])\n return radar_idx\n\n def transform_radar_pv(self, points, resize, resize_dims, crop, flip, rotate, radar_idx):\n points = points[points[:, 2] < self.max_distance_pv, :]\n\n H, W = resize_dims\n points[:, :2] = points[:, :2] * resize\n points[:, 0] -= crop[0]\n points[:, 1] -= crop[1]\n if flip:\n points[:, 0] = resize_dims[1] - points[:, 0]\n\n points[:, 0] -= W / 2.0\n points[:, 1] -= H / 2.0\n\n h = rotate / 180 * np.pi\n rot_matrix = [\n [np.cos(h), np.sin(h)],\n [-np.sin(h), np.cos(h)],\n ]\n points[:, :2] = np.matmul(rot_matrix, points[:, :2].T).T\n\n points[:, 0] += W / 2.0\n points[:, 1] += H / 2.0\n\n depth_coords = points[:, :2].astype(np.int16)\n\n valid_mask = ((depth_coords[:, 1] < resize_dims[0])\n & (depth_coords[:, 0] < resize_dims[1])\n & (depth_coords[:, 1] >= 0)\n & (depth_coords[:, 0] >= 0))\n\n points = torch.Tensor(points[valid_mask])\n\n if self.remove_z_axis:\n points[:, 1] = 1. # dummy height value\n\n points_save = []\n for i in radar_idx:\n points_save.append(points[points[:, 6] == i])\n points = torch.cat(points_save, dim=0)\n\n # mean, std of rcs and speed are from train set\n points[:, 3] = (points[:, 3] - 4.783) / 7.576\n points[:, 4] = (torch.norm(points[:, 4:6], dim=1) - 0.677) / 1.976\n\n if self.is_train:\n drop_idx = np.random.uniform(size=points.shape[0]) # randomly drop points\n points = points[drop_idx > self.rda_aug_conf['drop_ratio']]\n\n num_points, num_feat = points.shape\n if num_points > self.max_radar_points_pv:\n choices = np.random.choice(num_points, self.max_radar_points_pv, replace=False)\n points = points[choices]\n else:\n num_append = self.max_radar_points_pv - num_points\n points = torch.cat([points, -999*torch.ones(num_append, num_feat)], dim=0)\n\n if num_points == 0:\n points[0, :] = points.new_tensor([0.1, 0.1, self.max_distance_pv-1, 0, 0, 0, 0])\n\n points[..., [0, 1, 2]] = points[..., [0, 2, 1]] # convert [w, h, d] to [w, d, h]\n\n return points[..., :5]\n\n def depth_transform(self, cam_depth, resize, resize_dims, crop, flip, rotate):\n \"\"\"Transform depth based on ida augmentation configuration.\n\n Args:\n cam_depth (np array): Nx3, 3: x,y,d.\n resize (float): Resize factor.\n resize_dims (tuple): Final dimension.\n crop (tuple): x1, y1, x2, y2\n flip (bool): Whether to flip.\n rotate (float): Rotation value.\n\n Returns:\n np array: [h/down_ratio, w/down_ratio, d]\n \"\"\"\n valid_depth = cam_depth[:, 2] < self.img_backbone_conf['d_bound'][1]\n cam_depth = cam_depth[valid_depth, :]\n\n H, W = resize_dims\n cam_depth[:, :2] = cam_depth[:, :2] * resize\n cam_depth[:, 0] -= crop[0]\n cam_depth[:, 1] -= crop[1]\n if flip:\n cam_depth[:, 0] = resize_dims[1] - cam_depth[:, 0]\n\n cam_depth[:, 0] -= W / 2.0\n cam_depth[:, 1] -= H / 2.0\n\n h = rotate / 180 * np.pi\n rot_matrix = [\n [np.cos(h), np.sin(h)],\n [-np.sin(h), np.cos(h)],\n ]\n cam_depth[:, :2] = np.matmul(rot_matrix, cam_depth[:, :2].T).T\n\n cam_depth[:, 0] += W / 2.0\n cam_depth[:, 1] += H / 2.0\n\n depth_coords = cam_depth[:, :2].astype(np.int16)\n\n depth_map = np.zeros(resize_dims)\n valid_mask = ((depth_coords[:, 1] < resize_dims[0])\n & (depth_coords[:, 0] < resize_dims[1])\n & (depth_coords[:, 1] >= 0)\n & (depth_coords[:, 0] >= 0))\n depth_map[depth_coords[valid_mask, 1],\n depth_coords[valid_mask, 0]] = cam_depth[valid_mask, 2]\n\n return torch.Tensor(depth_map)\n\n def get_image(self, cam_infos, cams):\n \"\"\"Given data and cam_names, return image data needed.\n\n Args:\n sweeps_data (list): Raw data used to generate the data we needed.\n cams (list): Camera names.\n\n Returns:\n Tensor: Image data after processing.\n Tensor: Transformation matrix from camera to ego.\n Tensor: Intrinsic matrix.\n Tensor: Transformation matrix for ida.\n Tensor: Transformation matrix from key\n frame camera to sweep frame camera.\n Tensor: timestamps.\n dict: meta infos needed for evaluation.\n \"\"\"\n assert len(cam_infos) > 0\n sweep_imgs = list()\n sweep_sensor2ego_mats = list()\n sweep_intrin_mats = list()\n sweep_ida_mats = list()\n sweep_sensor2sensor_mats = list()\n sweep_timestamps = list()\n sweep_gt_depths = list()\n sweep_radar_points = list()\n for cam in cams:\n imgs = list()\n sensor2ego_mats = list()\n intrin_mats = list()\n ida_mats = list()\n sensor2sensor_mats = list()\n timestamps = list()\n gt_depths = list()\n radar_points = list()\n key_info = cam_infos[0]\n resize, resize_dims, crop, flip, \\\n rotate_ida = self.sample_ida_augmentation()\n radar_idx = self.sample_radar_augmentation()\n\n for sweep_idx, cam_info in enumerate(cam_infos):\n img = Image.open(\n os.path.join(self.data_root, cam_info[cam]['filename']))\n\n w, x, y, z = cam_info[cam]['calibrated_sensor']['rotation']\n # sweep sensor to sweep ego\n sweepsensor2sweepego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepsensor2sweepego_tran = torch.Tensor(\n cam_info[cam]['calibrated_sensor']['translation'])\n sweepsensor2sweepego = sweepsensor2sweepego_rot.new_zeros(\n (4, 4))\n sweepsensor2sweepego[3, 3] = 1\n sweepsensor2sweepego[:3, :3] = sweepsensor2sweepego_rot\n sweepsensor2sweepego[:3, -1] = sweepsensor2sweepego_tran\n # sweep ego to global\n w, x, y, z = cam_info[cam]['ego_pose']['rotation']\n sweepego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepego2global_tran = torch.Tensor(\n cam_info[cam]['ego_pose']['translation'])\n sweepego2global = sweepego2global_rot.new_zeros((4, 4))\n sweepego2global[3, 3] = 1\n sweepego2global[:3, :3] = sweepego2global_rot\n sweepego2global[:3, -1] = sweepego2global_tran\n\n # global sensor to cur ego\n w, x, y, z = key_info[cam]['ego_pose']['rotation']\n keyego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keyego2global_tran = torch.Tensor(\n key_info[cam]['ego_pose']['translation'])\n keyego2global = keyego2global_rot.new_zeros((4, 4))\n keyego2global[3, 3] = 1\n keyego2global[:3, :3] = keyego2global_rot\n keyego2global[:3, -1] = keyego2global_tran\n global2keyego = keyego2global.inverse()\n\n # cur ego to sensor\n w, x, y, z = key_info[cam]['calibrated_sensor']['rotation']\n keysensor2keyego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keysensor2keyego_tran = torch.Tensor(\n key_info[cam]['calibrated_sensor']['translation'])\n keysensor2keyego = keysensor2keyego_rot.new_zeros((4, 4))\n keysensor2keyego[3, 3] = 1\n keysensor2keyego[:3, :3] = keysensor2keyego_rot\n keysensor2keyego[:3, -1] = keysensor2keyego_tran\n keyego2keysensor = keysensor2keyego.inverse()\n keysensor2sweepsensor = (\n keyego2keysensor @ global2keyego @ sweepego2global\n @ sweepsensor2sweepego).inverse()\n sweepsensor2keyego = global2keyego @ sweepego2global @\\\n sweepsensor2sweepego\n sensor2ego_mats.append(sweepsensor2keyego)\n sensor2sensor_mats.append(keysensor2sweepsensor)\n intrin_mat = torch.zeros((4, 4))\n intrin_mat[3, 3] = 1\n intrin_mat[:3, :3] = torch.Tensor(\n cam_info[cam]['calibrated_sensor']['camera_intrinsic'])\n\n file_name = os.path.split(cam_info[cam]['filename'])[-1]\n if self.return_depth:\n point_depth = np.fromfile(os.path.join(\n self.data_root, self.depth_path, f'{file_name}.bin'),\n dtype=np.float32,\n count=-1)\n point_depth = point_depth.reshape(-1, 3)\n point_depth_augmented = self.depth_transform(\n point_depth, resize, self.ida_aug_conf['final_dim'],\n crop, flip, rotate_ida)\n gt_depths.append(point_depth_augmented)\n\n if self.return_radar_pv:\n radar_point = np.fromfile(os.path.join(\n self.data_root, self.radar_pv_path, f'{file_name}.bin'),\n dtype=np.float32,\n count=-1).reshape(-1, 7)\n radar_point_augmented = self.transform_radar_pv(\n radar_point, resize, self.ida_aug_conf['final_dim'],\n crop, flip, rotate_ida, radar_idx)\n radar_points.append(radar_point_augmented)\n\n img, ida_mat = img_transform(\n img,\n resize=resize,\n resize_dims=resize_dims,\n crop=crop,\n flip=flip,\n rotate=rotate_ida,\n )\n ida_mats.append(ida_mat)\n img = mmcv.imnormalize(np.array(img), self.img_mean,\n self.img_std, self.to_rgb)\n img = torch.from_numpy(img).permute(2, 0, 1)\n imgs.append(img)\n intrin_mats.append(intrin_mat)\n timestamps.append(cam_info[cam]['timestamp'])\n sweep_imgs.append(torch.stack(imgs))\n sweep_sensor2ego_mats.append(torch.stack(sensor2ego_mats))\n sweep_intrin_mats.append(torch.stack(intrin_mats))\n sweep_ida_mats.append(torch.stack(ida_mats))\n sweep_sensor2sensor_mats.append(torch.stack(sensor2sensor_mats))\n sweep_timestamps.append(torch.tensor(timestamps))\n if self.return_depth:\n sweep_gt_depths.append(torch.stack(gt_depths))\n if self.return_radar_pv:\n sweep_radar_points.append(torch.stack(radar_points))\n\n ret_list = [\n torch.stack(sweep_imgs).permute(1, 0, 2, 3, 4),\n torch.stack(sweep_sensor2ego_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_intrin_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_ida_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_sensor2sensor_mats).permute(1, 0, 2, 3),\n torch.stack(sweep_timestamps).permute(1, 0),\n ]\n if self.return_depth:\n ret_list.append(torch.stack(sweep_gt_depths).permute(1, 0, 2, 3),)\n else:\n ret_list.append(None)\n if self.return_radar_pv:\n ret_list.append(torch.stack(sweep_radar_points).permute(1, 0, 2, 3),)\n else:\n ret_list.append(None)\n return ret_list\n\n def get_image_meta(self, cam_infos, cams):\n key_info = cam_infos[0]\n\n # Get mean pose of all cams.\n ego2global_rotation = np.mean(\n [key_info[cam]['ego_pose']['rotation'] for cam in cams], 0)\n ego2global_translation = np.mean(\n [key_info[cam]['ego_pose']['translation'] for cam in cams], 0)\n img_metas = dict(\n box_type_3d=LiDARInstance3DBoxes,\n ego2global_translation=ego2global_translation,\n ego2global_rotation=ego2global_rotation,\n )\n return img_metas\n\n def get_image_sensor2ego_mats(self, cam_infos, cams):\n sweep_sensor2ego_mats = list()\n for cam in cams:\n sensor2ego_mats = list()\n key_info = cam_infos[0]\n for sweep_idx, cam_info in enumerate(cam_infos):\n w, x, y, z = cam_info[cam]['calibrated_sensor']['rotation']\n # sweep sensor to sweep ego\n sweepsensor2sweepego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepsensor2sweepego_tran = torch.Tensor(\n cam_info[cam]['calibrated_sensor']['translation'])\n sweepsensor2sweepego = sweepsensor2sweepego_rot.new_zeros(\n (4, 4))\n sweepsensor2sweepego[3, 3] = 1\n sweepsensor2sweepego[:3, :3] = sweepsensor2sweepego_rot\n sweepsensor2sweepego[:3, -1] = sweepsensor2sweepego_tran\n # sweep ego to global\n w, x, y, z = cam_info[cam]['ego_pose']['rotation']\n sweepego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n sweepego2global_tran = torch.Tensor(\n cam_info[cam]['ego_pose']['translation'])\n sweepego2global = sweepego2global_rot.new_zeros((4, 4))\n sweepego2global[3, 3] = 1\n sweepego2global[:3, :3] = sweepego2global_rot\n sweepego2global[:3, -1] = sweepego2global_tran\n\n # global sensor to cur ego\n w, x, y, z = key_info[cam]['ego_pose']['rotation']\n keyego2global_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keyego2global_tran = torch.Tensor(\n key_info[cam]['ego_pose']['translation'])\n keyego2global = keyego2global_rot.new_zeros((4, 4))\n keyego2global[3, 3] = 1\n keyego2global[:3, :3] = keyego2global_rot\n keyego2global[:3, -1] = keyego2global_tran\n global2keyego = keyego2global.inverse()\n\n # cur ego to sensor\n w, x, y, z = key_info[cam]['calibrated_sensor']['rotation']\n keysensor2keyego_rot = torch.Tensor(\n Quaternion(w, x, y, z).rotation_matrix)\n keysensor2keyego_tran = torch.Tensor(\n key_info[cam]['calibrated_sensor']['translation'])\n keysensor2keyego = keysensor2keyego_rot.new_zeros((4, 4))\n keysensor2keyego[3, 3] = 1\n keysensor2keyego[:3, :3] = keysensor2keyego_rot\n keysensor2keyego[:3, -1] = keysensor2keyego_tran\n sweepsensor2keyego = global2keyego @ sweepego2global @\\\n sweepsensor2sweepego\n sensor2ego_mats.append(sweepsensor2keyego)\n sweep_sensor2ego_mats.append(torch.stack(sensor2ego_mats))\n return torch.stack(sweep_sensor2ego_mats).permute(1, 0, 2, 3)\n\n def get_gt(self, info, cams, return_corners=False):\n \"\"\"Generate gt labels from info.\n\n Args:\n info(dict): Infos needed to generate gt labels.\n cams(list): Camera names.\n\n Returns:\n Tensor: GT bboxes.\n Tensor: GT labels.\n \"\"\"\n ego2global_rotation = np.mean(\n [info['cam_infos'][cam]['ego_pose']['rotation'] for cam in cams],\n 0)\n ego2global_translation = np.mean([\n info['cam_infos'][cam]['ego_pose']['translation'] for cam in cams\n ], 0)\n trans = -np.array(ego2global_translation)\n rot = Quaternion(ego2global_rotation).inverse\n gt_boxes = list()\n gt_labels = list()\n if return_corners: # for debugging and visualization\n gt_corners = list()\n else:\n gt_corners = None\n for ann_info in info['ann_infos']:\n # Use ego coordinate.\n if self.gt_for_radar_only:\n if ann_info['num_radar_pts'] == 0:\n continue\n if map_name_from_general_to_detection[ann_info['category_name']] not in self.classes:\n continue\n if ann_info['num_lidar_pts'] + ann_info['num_radar_pts'] == 0:\n continue\n\n box = Box(\n ann_info['translation'],\n ann_info['size'],\n Quaternion(ann_info['rotation']),\n velocity=ann_info['velocity'],\n )\n box.translate(trans)\n box.rotate(rot)\n box_xyz = np.array(box.center)\n box_dxdydz = np.array(box.wlh)[[1, 0, 2]]\n box_yaw = np.array([box.orientation.yaw_pitch_roll[0]])\n box_velo = np.array(box.velocity[:2])\n gt_box = np.concatenate([box_xyz, box_dxdydz, box_yaw, box_velo])\n gt_boxes.append(gt_box)\n gt_labels.append(\n self.classes.index(map_name_from_general_to_detection[\n ann_info['category_name']]))\n if return_corners: # for debugging and visualization\n gt_corners.append(box.corners())\n\n return torch.Tensor(gt_boxes), torch.tensor(gt_labels), gt_corners\n\n def choose_cams(self):\n \"\"\"Choose cameras randomly.\n\n Returns:\n list: Cameras to be used.\n \"\"\"\n if self.is_train and self.ida_aug_conf['Ncams'] < len(\n self.ida_aug_conf['cams']):\n cams = np.random.choice(self.ida_aug_conf['cams'],\n self.ida_aug_conf['Ncams'],\n replace=False)\n else:\n cams = self.ida_aug_conf['cams']\n return cams\n\n def __getitem__(self, idx):\n if self.use_cbgs:\n idx = self.sample_indices[idx]\n cam_infos = list()\n pts_infos = list()\n cams = self.choose_cams()\n for key_idx in self.key_idxes:\n cur_idx = key_idx + idx\n # Handle scenarios when current idx doesn't have previous key\n # frame or previous key frame is from another scene.\n while self.infos[cur_idx]['scene_token'] != self.infos[idx]['scene_token']:\n cur_idx += 1\n info = self.infos[cur_idx]\n cam_infos.append(info['cam_infos'])\n pts_infos.append([info['lidar_infos']] + info['lidar_sweeps'])\n for sweep_idx in self.sweeps_idx:\n if len(info['cam_sweeps']) == 0:\n cam_infos.append(info['cam_infos'])\n else:\n # Handle scenarios when current sweep doesn't have all cam keys.\n for i in range(min(len(info['cam_sweeps']) - 1, sweep_idx), -1,\n -1):\n if sum([cam in info['cam_sweeps'][i]\n for cam in cams]) == len(cams):\n cam_infos.append(info['cam_sweeps'][i])\n break\n\n if self.return_image or self.return_depth or self.return_radar_pv:\n image_data_list = self.get_image(cam_infos, cams)\n (\n sweep_imgs,\n sweep_sensor2ego_mats,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n sweep_timestamps,\n ) = image_data_list[:6]\n else:\n (\n sweep_imgs,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n sweep_timestamps,\n ) = None, None, None, None, None\n sweep_sensor2ego_mats = self.get_image_sensor2ego_mats(cam_infos, cams)\n\n img_metas = self.get_image_meta(cam_infos, cams)\n img_metas['token'] = self.infos[idx]['sample_token']\n gt_boxes_3d, gt_labels_3d, gt_corners = self.get_gt(self.infos[idx], cams, return_corners=False)\n\n rotate_bda, scale_bda, flip_dx, flip_dy = self.sample_bda_augmentation()\n gt_boxes_3d, bda_rot = bev_det_transform(gt_boxes_3d, rotate_bda, scale_bda, flip_dx, flip_dy)\n\n bda_mat = torch.zeros(4, 4, dtype=torch.float32)\n bda_mat[:3, :3] = bda_rot\n bda_mat[3, 3] = 1\n\n ret_list = [\n sweep_imgs,\n sweep_sensor2ego_mats,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n bda_mat,\n sweep_timestamps,\n img_metas,\n gt_boxes_3d,\n gt_labels_3d,\n ]\n\n if self.return_depth:\n ret_list.append(image_data_list[6])\n else:\n ret_list.append(None)\n if self.return_radar_pv:\n ret_list.append(image_data_list[7])\n else:\n ret_list.append(None)\n\n return ret_list\n\n def __str__(self):\n return f\"\"\"NuscData: {len(self)} samples. Split: \\\n {\"train\" if self.is_train else \"val\"}.\n Augmentation Conf: {self.ida_aug_conf}\"\"\"\n\n def __len__(self):\n if self.use_cbgs:\n return len(self.sample_indices)\n else:\n return len(self.infos)" }, { "identifier": "collate_fn", "path": "datasets/nusc_det_dataset.py", "snippet": "def collate_fn(data,\n is_return_image=True,\n is_return_depth=False,\n is_return_radar_pv=False):\n assert (is_return_image or is_return_depth or is_return_radar_pv) is True\n imgs_batch = list()\n sensor2ego_mats_batch = list()\n intrin_mats_batch = list()\n ida_mats_batch = list()\n sensor2sensor_mats_batch = list()\n bda_mat_batch = list()\n gt_boxes_3d_batch = list()\n gt_labels_3d_batch = list()\n img_metas_batch = list()\n depth_labels_batch = list()\n radar_pv_batch = list()\n\n for iter_data in data:\n (\n sweep_imgs,\n sweep_sensor2ego_mats,\n sweep_intrins,\n sweep_ida_mats,\n sweep_sensor2sensor_mats,\n bda_mat,\n sweep_timestamps,\n img_metas,\n gt_boxes,\n gt_labels,\n ) = iter_data[:10]\n if is_return_depth:\n gt_depth = iter_data[10]\n depth_labels_batch.append(gt_depth)\n if is_return_radar_pv:\n radar_pv = iter_data[11]\n radar_pv_batch.append(radar_pv)\n\n imgs_batch.append(sweep_imgs)\n sensor2ego_mats_batch.append(sweep_sensor2ego_mats)\n intrin_mats_batch.append(sweep_intrins)\n ida_mats_batch.append(sweep_ida_mats)\n sensor2sensor_mats_batch.append(sweep_sensor2sensor_mats)\n bda_mat_batch.append(bda_mat)\n img_metas_batch.append(img_metas)\n gt_boxes_3d_batch.append(gt_boxes)\n gt_labels_3d_batch.append(gt_labels)\n\n if is_return_image:\n mats_dict = dict()\n mats_dict['sensor2ego_mats'] = torch.stack(sensor2ego_mats_batch)\n mats_dict['intrin_mats'] = torch.stack(intrin_mats_batch)\n mats_dict['ida_mats'] = torch.stack(ida_mats_batch)\n mats_dict['sensor2sensor_mats'] = torch.stack(sensor2sensor_mats_batch)\n mats_dict['bda_mat'] = torch.stack(bda_mat_batch)\n ret_list = [\n torch.stack(imgs_batch),\n mats_dict,\n img_metas_batch,\n gt_boxes_3d_batch,\n gt_labels_3d_batch,\n None, # reserve for segmentation\n ]\n else:\n ret_list = [\n None,\n None,\n img_metas_batch,\n gt_boxes_3d_batch,\n gt_labels_3d_batch,\n None,\n ]\n if is_return_depth:\n ret_list.append(torch.stack(depth_labels_batch))\n else:\n ret_list.append(None)\n if is_return_radar_pv:\n ret_list.append(torch.stack(radar_pv_batch))\n else:\n ret_list.append(None)\n\n return ret_list" }, { "identifier": "DetNuscEvaluator", "path": "evaluators/det_evaluators.py", "snippet": "class DetNuscEvaluator():\n ErrNameMapping = {\n 'trans_err': 'mATE',\n 'scale_err': 'mASE',\n 'orient_err': 'mAOE',\n 'vel_err': 'mAVE',\n 'attr_err': 'mAAE',\n }\n\n DefaultAttribute = {\n 'car': 'vehicle.parked',\n 'pedestrian': 'pedestrian.moving',\n 'trailer': 'vehicle.parked',\n 'truck': 'vehicle.parked',\n 'bus': 'vehicle.moving',\n 'motorcycle': 'cycle.without_rider',\n 'construction_vehicle': 'vehicle.parked',\n 'bicycle': 'cycle.without_rider',\n 'barrier': '',\n 'traffic_cone': '',\n }\n\n def __init__(\n self,\n class_names,\n eval_version='detection_cvpr_2019',\n data_root='./data/nuScenes',\n version='v1.0-trainval',\n modality=dict(use_lidar=False,\n use_camera=True,\n use_radar=True,\n use_map=False,\n use_external=False),\n output_dir=None,\n ) -> None:\n self.eval_version = eval_version\n self.data_root = data_root\n\n # Load config file and deserialize it.\n this_dir = osp.dirname(osp.abspath(__file__))\n with open(osp.join(this_dir, 'configs', '%s.json' % eval_version), 'r') as f:\n data = json.load(f)\n self.eval_detection_configs = DetectionConfig.deserialize(data)\n\n self.version = version\n self.class_names = class_names\n self.modality = modality\n self.output_dir = output_dir\n\n def _evaluate_single(self,\n result_path,\n logger=None,\n metric='bbox',\n result_name='pts_bbox'):\n \"\"\"Evaluation for a single model in nuScenes protocol.\n\n Args:\n result_path (str): Path of the result file.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n metric (str): Metric name used for evaluation. Default: 'bbox'.\n result_name (str): Result name in the metric prefix.\n Default: 'pts_bbox'.\n\n Returns:\n dict: Dictionary of evaluation details.\n \"\"\"\n from nuscenes import NuScenes\n from nuscenes.eval.detection.evaluate import NuScenesEval\n\n output_dir = osp.join(*osp.split(result_path)[:-1])\n nusc = NuScenes(version=self.version,\n dataroot=self.data_root,\n verbose=False)\n eval_set_map = {\n 'v1.0-mini': 'mini_val',\n 'v1.0-trainval': 'val',\n }\n nusc_eval = NuScenesEval(nusc,\n config=self.eval_detection_configs,\n result_path=result_path,\n eval_set=eval_set_map[self.version],\n output_dir=output_dir,\n verbose=False)\n nusc_eval.main(render_curves=False)\n # nusc_eval.main(render_curves=True, plot_examples=40)\n\n # record metrics\n metrics = mmcv.load(osp.join(output_dir, 'metrics_summary.json'))\n detail = dict()\n metric_prefix = f'{result_name}_NuScenes'\n for class_name in self.class_names:\n for k, v in metrics['label_aps'][class_name].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}_AP_dist_{}'.format(metric_prefix, class_name,\n k)] = val\n for k, v in metrics['label_tp_errors'][class_name].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}_{}'.format(metric_prefix, class_name, k)] = val\n for k, v in metrics['tp_errors'].items():\n val = float('{:.4f}'.format(v))\n detail['{}/{}'.format(metric_prefix,\n self.ErrNameMapping[k])] = val\n\n detail['{}/NDS'.format(metric_prefix)] = metrics['nd_score']\n detail['{}/mAP'.format(metric_prefix)] = metrics['mean_ap']\n return detail\n\n def format_results(self,\n results,\n img_metas,\n result_names=['img_bbox'],\n jsonfile_prefix=None,\n **kwargs):\n \"\"\"Format the results to json (standard format for COCO evaluation).\n\n Args:\n results (list[tuple | numpy.ndarray]): Testing results of the\n dataset.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n\n Returns:\n tuple: (result_files, tmp_dir), result_files is a dict containing \\\n the json filepaths, tmp_dir is the temporal directory created \\\n for saving json files when jsonfile_prefix is not specified.\n \"\"\"\n assert isinstance(results, list), 'results must be a list'\n\n if jsonfile_prefix is None:\n tmp_dir = tempfile.TemporaryDirectory()\n jsonfile_prefix = osp.join(tmp_dir.name, 'results')\n else:\n tmp_dir = None\n\n # currently the output prediction results could be in two formats\n # 1. list of dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...)\n # 2. list of dict('pts_bbox' or 'img_bbox':\n # dict('boxes_3d': ..., 'scores_3d': ..., 'labels_3d': ...))\n # this is a workaround to enable evaluation of both formats on nuScenes\n # refer to https://github.com/open-mmlab/mmdetection3d/issues/449\n # should take the inner dict out of 'pts_bbox' or 'img_bbox' dict\n result_files = dict()\n # refactor this.\n for rasult_name in result_names:\n # not evaluate 2D predictions on nuScenes\n if '2d' in rasult_name:\n continue\n print(f'\\nFormating bboxes of {rasult_name}')\n tmp_file_ = osp.join(jsonfile_prefix, rasult_name)\n if self.output_dir:\n result_files.update({\n rasult_name:\n self._format_bbox(results, img_metas, self.output_dir)\n })\n else:\n result_files.update({\n rasult_name:\n self._format_bbox(results, img_metas, tmp_file_)\n })\n return result_files, tmp_dir\n\n def evaluate(\n self,\n results,\n img_metas,\n metric='bbox',\n logger=None,\n jsonfile_prefix=None,\n result_names=['img_bbox'],\n show=False,\n out_dir=None,\n pipeline=None,\n ):\n \"\"\"Evaluation in nuScenes protocol.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n metric (str | list[str]): Metrics to be evaluated.\n logger (logging.Logger | str | None): Logger used for printing\n related information during evaluation. Default: None.\n jsonfile_prefix (str | None): The prefix of json files. It includes\n the file path and the prefix of filename, e.g., \"a/b/prefix\".\n If not specified, a temp file will be created. Default: None.\n show (bool): Whether to visualize.\n Default: False.\n out_dir (str): Path to save the visualization results.\n Default: None.\n pipeline (list[dict], optional): raw data loading for showing.\n Default: None.\n\n Returns:\n dict[str, float]: Results of each evaluation metric.\n \"\"\"\n result_files, tmp_dir = self.format_results(results, img_metas,\n result_names,\n jsonfile_prefix)\n if isinstance(result_files, dict):\n for name in result_names:\n print('Evaluating bboxes of {}'.format(name))\n print()\n self._evaluate_single(result_files[name])\n elif isinstance(result_files, str):\n self._evaluate_single(result_files)\n\n if tmp_dir is not None:\n tmp_dir.cleanup()\n\n def _format_bbox(self, results, img_metas, jsonfile_prefix=None):\n \"\"\"Convert the results to the standard format.\n\n Args:\n results (list[dict]): Testing results of the dataset.\n jsonfile_prefix (str): The prefix of the output jsonfile.\n You can specify the output directory/filename by\n modifying the jsonfile_prefix. Default: None.\n\n Returns:\n str: Path of the output json file.\n \"\"\"\n nusc_annos = {}\n mapped_class_names = self.class_names\n\n print('Start to convert detection format...')\n\n for sample_id, det in enumerate(mmcv.track_iter_progress(results)):\n boxes, scores, labels = det\n\n order = np.argsort(scores)[::-1]\n order = order[:500]\n\n boxes = boxes[order]\n scores = scores[order]\n labels = labels[order]\n\n sample_token = img_metas[sample_id]['token']\n trans = np.array(img_metas[sample_id]['ego2global_translation'])\n rot = Quaternion(img_metas[sample_id]['ego2global_rotation'])\n annos = list()\n for i, box in enumerate(boxes):\n name = mapped_class_names[labels[i]]\n center = box[:3]\n wlh = box[[4, 3, 5]]\n box_yaw = box[6]\n box_vel = box[7:].tolist()\n box_vel.append(0)\n quat = pyquaternion.Quaternion(axis=[0, 0, 1], radians=box_yaw)\n nusc_box = Box(center, wlh, quat, velocity=box_vel)\n nusc_box.rotate(rot)\n nusc_box.translate(trans)\n if np.sqrt(nusc_box.velocity[0]**2 +\n nusc_box.velocity[1]**2) > 0.2:\n if name in [\n 'car',\n 'construction_vehicle',\n 'bus',\n 'truck',\n 'trailer',\n ]:\n attr = 'vehicle.moving'\n elif name in ['bicycle', 'motorcycle']:\n attr = 'cycle.with_rider'\n else:\n attr = self.DefaultAttribute[name]\n else:\n if name in ['pedestrian']:\n attr = 'pedestrian.standing'\n elif name in ['bus']:\n attr = 'vehicle.stopped'\n else:\n attr = self.DefaultAttribute[name]\n nusc_anno = dict(\n sample_token=sample_token,\n translation=nusc_box.center.tolist(),\n size=nusc_box.wlh.tolist(),\n rotation=nusc_box.orientation.elements.tolist(),\n velocity=nusc_box.velocity[:2],\n detection_name=name,\n detection_score=float(scores[i]),\n attribute_name=attr,\n )\n annos.append(nusc_anno)\n # other views results of the same frame should be concatenated\n if sample_token in nusc_annos:\n nusc_annos[sample_token].extend(annos)\n else:\n nusc_annos[sample_token] = annos\n nusc_submissions = {\n 'meta': self.modality,\n 'results': nusc_annos,\n }\n mmcv.mkdir_or_exist(jsonfile_prefix)\n res_path = osp.join(jsonfile_prefix, 'results_nusc.json')\n print('Results writes to', res_path)\n mmcv.dump(nusc_submissions, res_path)\n return res_path" }, { "identifier": "BaseBEVDepth", "path": "models/base_bev_depth.py", "snippet": "class BaseBEVDepth(nn.Module):\n \"\"\"Source code of `BEVDepth`, `https://arxiv.org/abs/2112.11790`.\n\n Args:\n backbone_conf (dict): Config of backbone.\n head_conf (dict): Config of head.\n \"\"\"\n\n def __init__(self, backbone_conf, head_conf):\n super(BaseBEVDepth, self).__init__()\n self.backbone_img = BaseLSSFPN(**backbone_conf)\n self.head = BEVDepthHead(**head_conf)\n\n # for inference time measurement\n self.idx = 0\n self.times_dict = {\n 'img': [],\n 'img_backbone': [],\n 'img_dep': [],\n 'img_transform': [],\n 'img_pool': [],\n\n 'head': [],\n 'head_backbone': [],\n 'head_head': [],\n }\n\n def forward(self,\n sweep_imgs,\n mats_dict,\n is_train=False\n ):\n \"\"\"Forward function for BEVDepth\n\n Args:\n sweep_imgs (Tensor): Input images.\n mats_dict(dict):\n sensor2ego_mats(Tensor): Transformation matrix from\n camera to ego with shape of (B, num_sweeps,\n num_cameras, 4, 4).\n intrin_mats(Tensor): Intrinsic matrix with shape\n of (B, num_sweeps, num_cameras, 4, 4).\n ida_mats(Tensor): Transformation matrix for ida with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n sensor2sensor_mats(Tensor): Transformation matrix\n from key frame camera to sweep frame camera with\n shape of (B, num_sweeps, num_cameras, 4, 4).\n bda_mat(Tensor): Rotation matrix for bda with shape\n of (B, 4, 4).\n\n Returns:\n tuple(list[dict]): Output results for tasks.\n \"\"\"\n if is_train:\n self.time = None\n\n x, depth, _ = self.backbone_img(sweep_imgs, mats_dict,\n is_return_depth=True)\n preds, _ = self.head(x)\n return preds, depth\n else:\n if self.idx < 100: # skip few iterations for warmup\n self.times = None\n elif self.idx == 100:\n self.times = self.times_dict\n\n x, self.times = self.backbone_img(sweep_imgs, mats_dict,\n times=self.times)\n preds, self.times = self.head(x, times=self.times)\n\n if self.idx == 1000:\n time_mean = {}\n for k, v in self.times.items():\n time_mean[k] = sum(v) / len(v)\n print('img: %.2f' % time_mean['img'])\n print(' img_backbone: %.2f' % time_mean['img_backbone'])\n print(' img_dep: %.2f' % time_mean['img_dep'])\n print(' img_transform: %.2f' % time_mean['img_transform'])\n print(' img_pool: %.2f' % time_mean['img_pool'])\n print('head: %.2f' % time_mean['head'])\n print(' head_backbone: %.2f' % time_mean['head_backbone'])\n print(' head_head: %.2f' % time_mean['head_head'])\n total = time_mean['img'] + time_mean['head']\n print('total: %.2f' % total)\n print(' ')\n print('FPS: %.2f' % (1000/total))\n\n self.idx += 1\n return preds\n\n def get_targets(self, gt_boxes, gt_labels):\n \"\"\"Generate training targets for a single sample.\n\n Args:\n gt_bboxes_3d (:obj:`LiDARInstance3DBoxes`): Ground truth gt boxes.\n gt_labels_3d (torch.Tensor): Labels of boxes.\n\n Returns:\n tuple[list[torch.Tensor]]: Tuple of target including \\\n the following results in order.\n\n - list[torch.Tensor]: Heatmap scores.\n - list[torch.Tensor]: Ground truth boxes.\n - list[torch.Tensor]: Indexes indicating the position \\\n of the valid boxes.\n - list[torch.Tensor]: Masks indicating which boxes \\\n are valid.\n \"\"\"\n return self.head.get_targets(gt_boxes, gt_labels)\n\n def loss(self, targets, preds_dicts):\n \"\"\"Loss function for BEVDepth.\n\n Args:\n gt_bboxes_3d (list[:obj:`LiDARInstance3DBoxes`]): Ground\n truth gt boxes.\n gt_labels_3d (list[torch.Tensor]): Labels of boxes.\n preds_dicts (dict): Output of forward function.\n\n Returns:\n dict[str:torch.Tensor]: Loss of heatmap and bbox of each task.\n \"\"\"\n return self.head.loss(targets, preds_dicts)\n\n def get_bboxes(self, preds_dicts, img_metas=None, img=None, rescale=False):\n \"\"\"Generate bboxes from bbox head predictions.\n\n Args:\n preds_dicts (tuple[list[dict]]): Prediction results.\n img_metas (list[dict]): Point cloud and image's meta info.\n\n Returns:\n list[dict]: Decoded bbox, scores and labels after nms.\n \"\"\"\n return self.head.get_bboxes(preds_dicts, img_metas, img, rescale)" }, { "identifier": "all_gather_object", "path": "utils/torch_dist.py", "snippet": "def all_gather_object(obj):\n world_size = get_world_size()\n if world_size < 2:\n return [obj]\n output = [None for _ in range(world_size)]\n dist.all_gather_object(output, obj)\n return output" }, { "identifier": "synchronize", "path": "utils/torch_dist.py", "snippet": "def synchronize():\n \"\"\"Helper function to synchronize (barrier)\n among all processes when using distributed training\"\"\"\n if not dist.is_available():\n return\n if not dist.is_initialized():\n return\n current_world_size = dist.get_world_size()\n if current_world_size == 1:\n return\n dist.barrier()" } ]
from functools import partial from pytorch_lightning.core import LightningModule from torch.cuda.amp.autocast_mode import autocast from torch.optim.lr_scheduler import MultiStepLR from mmcv.runner import build_optimizer from datasets.nusc_det_dataset import NuscDatasetRadarDet, collate_fn from evaluators.det_evaluators import DetNuscEvaluator from models.base_bev_depth import BaseBEVDepth from utils.torch_dist import all_gather_object, synchronize import mmcv import torch import torch.nn.functional as F import torch.nn.parallel import torch.utils.data import torch.utils.data.distributed import torchvision.models as models
16,127
Output: gt_depths: [B*N*h*w, d] """ B, N, H, W = gt_depths.shape gt_depths = gt_depths.view( B * N, H // self.downsample_factor, self.downsample_factor, W // self.downsample_factor, self.downsample_factor, 1, ) gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous() gt_depths = gt_depths.view( -1, self.downsample_factor * self.downsample_factor) gt_depths_tmp = torch.where(gt_depths == 0.0, 1e5 * torch.ones_like(gt_depths), gt_depths) gt_depths = torch.min(gt_depths_tmp, dim=-1).values gt_depths = gt_depths.view(B * N, H // self.downsample_factor, W // self.downsample_factor) gt_depths = (gt_depths - (self.dbound[0] - self.dbound[2])) / self.dbound[2] gt_depths = torch.where( (gt_depths < self.depth_channels + 1) & (gt_depths > 0.), gt_depths, torch.zeros_like(gt_depths)) gt_depths = F.one_hot(gt_depths.long(), num_classes=self.depth_channels + 1).view( -1, self.depth_channels + 1)[:, 1:] return gt_depths.float() def eval_step(self, batch, batch_idx, prefix: str): (sweep_imgs, mats, img_metas, _, _, _, _, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=False) if isinstance(self.model, torch.nn.parallel.DistributedDataParallel): results = self.model.module.get_bboxes(preds, img_metas) else: results = self.model.get_bboxes(preds, img_metas) for i in range(len(results)): results[i][0] = results[i][0].tensor.detach().cpu().numpy() results[i][1] = results[i][1].detach().cpu().numpy() results[i][2] = results[i][2].detach().cpu().numpy() results[i].append(img_metas[i]) return results def validation_epoch_end(self, validation_step_outputs): detection_losses = list() heatmap_losses = list() bbox_losses = list() depth_losses = list() for validation_step_output in validation_step_outputs: detection_losses.append(validation_step_output[0]) heatmap_losses.append(validation_step_output[1]) bbox_losses.append(validation_step_output[2]) depth_losses.append(validation_step_output[3]) synchronize() self.log('val/detection', torch.mean(torch.stack(detection_losses)), on_epoch=True) self.log('val/heatmap', torch.mean(torch.stack(heatmap_losses)), on_epoch=True) self.log('val/bbox', torch.mean(torch.stack(bbox_losses)), on_epoch=True) self.log('val/depth', torch.mean(torch.stack(depth_losses)), on_epoch=True) def validation_step(self, batch, batch_idx): (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] with torch.no_grad(): preds, depth_preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds, weight=3.) return loss_detection, loss_heatmap, loss_bbox, loss_depth def test_epoch_end(self, test_step_outputs): all_pred_results = list() all_img_metas = list() for test_step_output in test_step_outputs: for i in range(len(test_step_output)): all_pred_results.append(test_step_output[i][:3]) all_img_metas.append(test_step_output[i][3]) synchronize() # TODO: Change another way. dataset_length = len(self.val_dataloader().dataset) all_pred_results = sum( map(list, zip(*all_gather_object(all_pred_results))), [])[:dataset_length] all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))), [])[:dataset_length] if self.global_rank == 0: self.evaluator.evaluate(all_pred_results, all_img_metas) def configure_optimizers(self): optimizer = build_optimizer(self.model, self.optimizer_config) scheduler = MultiStepLR(optimizer, [19, 23]) return [[optimizer], [scheduler]] def train_dataloader(self):
# Copyright (c) Megvii Inc. All rights reserved. pretrain_config = dict( img_model_path=None, img_load_key=[], img_freeze_key=None, pts_model_path=None, pts_load_key=[]) optimizer_config = dict( type='AdamW', lr=2e-4, weight_decay=1e-2) H = 900 W = 1600 final_dim = (256, 704) img_conf = dict(img_mean=[123.675, 116.28, 103.53], img_std=[58.395, 57.12, 57.375], to_rgb=True) ida_aug_conf = { 'resize_lim': (0.386, 0.55), 'final_dim': final_dim, 'rot_lim': (-5.4, 5.4), 'H': 900, 'W': 1600, 'rand_flip': True, 'bot_pct_lim': (0.0, 0.0), 'cams': ['CAM_FRONT_LEFT', 'CAM_FRONT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK', 'CAM_BACK_RIGHT'], 'Ncams': 6, } bda_aug_conf = { 'rot_ratio': 1.0, 'rot_lim': (-22.5, 22.5), 'scale_lim': (0.95, 1.05), 'flip_dx_ratio': 0.5, 'flip_dy_ratio': 0.5 } rda_aug_conf = { 'N_sweeps': 6, 'N_use': 5, 'drop_ratio': 0.1, } backbone_img_conf = { 'x_bound': [-51.2, 51.2, 0.8], 'y_bound': [-51.2, 51.2, 0.8], 'z_bound': [-5, 3, 8], 'd_bound': [2.0, 58.0, 0.8], 'final_dim': final_dim, 'output_channels': 80, 'downsample_factor': 16, 'img_backbone_conf': dict( type='ResNet', depth=50, frozen_stages=0, out_indices=[0, 1, 2, 3], norm_eval=False, init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), ), 'img_neck_conf': dict( type='SECONDFPN', in_channels=[256, 512, 1024, 2048], upsample_strides=[0.25, 0.5, 1, 2], out_channels=[128, 128, 128, 128], ), 'depth_net_conf': dict(in_channels=512, mid_channels=512), 'camera_aware': True } CLASSES = [ 'car', 'truck', 'construction_vehicle', 'bus', 'trailer', 'barrier', 'motorcycle', 'bicycle', 'pedestrian', 'traffic_cone', ] head_conf = { 'bev_backbone_conf': dict( type='ResNet', in_channels=80, depth=18, num_stages=3, strides=(1, 2, 2), dilations=(1, 1, 1), out_indices=[0, 1, 2], norm_eval=False, base_channels=160), 'bev_neck_conf': dict( type='SECONDFPN', in_channels=[80, 160, 320, 640], upsample_strides=[1, 2, 4, 8], out_channels=[64, 64, 64, 64]), 'tasks': [ dict(num_class=1, class_names=['car']), dict(num_class=2, class_names=['truck', 'construction_vehicle']), dict(num_class=2, class_names=['bus', 'trailer']), dict(num_class=1, class_names=['barrier']), dict(num_class=2, class_names=['motorcycle', 'bicycle']), dict(num_class=2, class_names=['pedestrian', 'traffic_cone']),], 'common_heads': dict( reg=(2, 2), height=(1, 2), dim=(3, 2), rot=(2, 2), vel=(2, 2)), 'bbox_coder': dict( type='CenterPointBBoxCoder', post_center_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_num=500, score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], pc_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], code_size=9), 'train_cfg': dict( point_cloud_range=[-51.2, -51.2, -5, 51.2, 51.2, 3], grid_size=[512, 512, 1], voxel_size=[0.2, 0.2, 8], out_size_factor=4, dense_reg=1, gaussian_overlap=0.1, max_objs=500, min_radius=2, code_weights=[1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.5]), 'test_cfg': dict( post_center_limit_range=[-61.2, -61.2, -10.0, 61.2, 61.2, 10.0], max_per_img=500, max_pool_nms=False, min_radius=[4, 12, 10, 1, 0.85, 0.175], score_threshold=0.1, out_size_factor=4, voxel_size=[0.2, 0.2, 8], nms_type='circle', pre_max_size=1000, post_max_size=83, nms_thr=0.2), 'in_channels': 256, # Equal to bev_neck output_channels. 'loss_cls': dict(type='GaussianFocalLoss', reduction='mean'), 'loss_bbox': dict(type='L1Loss', reduction='mean', loss_weight=0.25), 'gaussian_overlap': 0.1, 'min_radius': 2, } class BEVDepthLightningModel(LightningModule): MODEL_NAMES = sorted(name for name in models.__dict__ if name.islower() and not name.startswith('__') and callable(models.__dict__[name])) def __init__(self, gpus: int = 1, data_root='data/nuScenes', eval_interval=1, batch_size_per_device=8, class_names=CLASSES, backbone_img_conf=backbone_img_conf, head_conf=head_conf, ida_aug_conf=ida_aug_conf, bda_aug_conf=bda_aug_conf, rda_aug_conf=rda_aug_conf, default_root_dir='./outputs/', **kwargs): super().__init__() self.save_hyperparameters() self.gpus = gpus self.optimizer_config = optimizer_config self.pretrain_config = pretrain_config self.eval_interval = eval_interval self.batch_size_per_device = batch_size_per_device self.data_root = data_root self.class_names = class_names self.backbone_img_conf = backbone_img_conf self.head_conf = head_conf self.ida_aug_conf = ida_aug_conf self.bda_aug_conf = bda_aug_conf self.rda_aug_conf = rda_aug_conf mmcv.mkdir_or_exist(default_root_dir) self.default_root_dir = default_root_dir self.evaluator = DetNuscEvaluator(class_names=self.class_names, output_dir=self.default_root_dir) self.model = BaseBEVDepth(self.backbone_img_conf, self.head_conf) self.mode = 'valid' self.img_conf = img_conf self.data_use_cbgs = False self.load_interval = 1 self.num_sweeps = 1 self.sweep_idxes = list() self.key_idxes = list() self.data_return_depth = True self.downsample_factor = self.backbone_img_conf['downsample_factor'] self.dbound = self.backbone_img_conf['d_bound'] self.depth_channels = int( (self.dbound[1] - self.dbound[0]) / self.dbound[2]) self.use_fusion = False self.train_info_paths = 'data/nuScenes/nuscenes_infos_train.pkl' self.val_info_paths = 'data/nuScenes/nuscenes_infos_val.pkl' self.predict_info_paths = 'data/nuScenes/nuscenes_infos_test.pkl' self.return_image = True self.return_depth = True self.return_radar_pv = False self.remove_z_axis = True def forward(self, sweep_imgs, mats, is_train=False, **inputs): return self.model(sweep_imgs, mats, is_train=is_train) def training_step(self, batch): if self.global_rank == 0: for pg in self.trainer.optimizers[0].param_groups: self.log('learning_rate', pg["lr"]) (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] preds, depth_preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds) self.log('train/detection', loss_detection) self.log('train/heatmap', loss_heatmap) self.log('train/bbox', loss_bbox) self.log('train/depth', loss_depth) return loss_detection + loss_depth def get_depth_loss(self, depth_labels, depth_preds, weight=3.): depth_labels = self.get_downsampled_gt_depth(depth_labels) depth_preds = depth_preds.permute(0, 2, 3, 1).contiguous().view( -1, self.depth_channels) fg_mask = torch.max(depth_labels, dim=1).values > 0.0 with autocast(enabled=False): loss_depth = (F.binary_cross_entropy( depth_preds[fg_mask], depth_labels[fg_mask], reduction='none', ).sum() / max(1.0, fg_mask.sum())) return weight * loss_depth def get_downsampled_gt_depth(self, gt_depths): """ Input: gt_depths: [B, N, H, W] Output: gt_depths: [B*N*h*w, d] """ B, N, H, W = gt_depths.shape gt_depths = gt_depths.view( B * N, H // self.downsample_factor, self.downsample_factor, W // self.downsample_factor, self.downsample_factor, 1, ) gt_depths = gt_depths.permute(0, 1, 3, 5, 2, 4).contiguous() gt_depths = gt_depths.view( -1, self.downsample_factor * self.downsample_factor) gt_depths_tmp = torch.where(gt_depths == 0.0, 1e5 * torch.ones_like(gt_depths), gt_depths) gt_depths = torch.min(gt_depths_tmp, dim=-1).values gt_depths = gt_depths.view(B * N, H // self.downsample_factor, W // self.downsample_factor) gt_depths = (gt_depths - (self.dbound[0] - self.dbound[2])) / self.dbound[2] gt_depths = torch.where( (gt_depths < self.depth_channels + 1) & (gt_depths > 0.), gt_depths, torch.zeros_like(gt_depths)) gt_depths = F.one_hot(gt_depths.long(), num_classes=self.depth_channels + 1).view( -1, self.depth_channels + 1)[:, 1:] return gt_depths.float() def eval_step(self, batch, batch_idx, prefix: str): (sweep_imgs, mats, img_metas, _, _, _, _, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=False) if isinstance(self.model, torch.nn.parallel.DistributedDataParallel): results = self.model.module.get_bboxes(preds, img_metas) else: results = self.model.get_bboxes(preds, img_metas) for i in range(len(results)): results[i][0] = results[i][0].tensor.detach().cpu().numpy() results[i][1] = results[i][1].detach().cpu().numpy() results[i][2] = results[i][2].detach().cpu().numpy() results[i].append(img_metas[i]) return results def validation_epoch_end(self, validation_step_outputs): detection_losses = list() heatmap_losses = list() bbox_losses = list() depth_losses = list() for validation_step_output in validation_step_outputs: detection_losses.append(validation_step_output[0]) heatmap_losses.append(validation_step_output[1]) bbox_losses.append(validation_step_output[2]) depth_losses.append(validation_step_output[3]) synchronize() self.log('val/detection', torch.mean(torch.stack(detection_losses)), on_epoch=True) self.log('val/heatmap', torch.mean(torch.stack(heatmap_losses)), on_epoch=True) self.log('val/bbox', torch.mean(torch.stack(bbox_losses)), on_epoch=True) self.log('val/depth', torch.mean(torch.stack(depth_losses)), on_epoch=True) def validation_step(self, batch, batch_idx): (sweep_imgs, mats, _, gt_boxes_3d, gt_labels_3d, _, depth_labels, pts_pv) = batch if torch.cuda.is_available(): if self.return_image: sweep_imgs = sweep_imgs.cuda() for key, value in mats.items(): mats[key] = value.cuda() if self.return_radar_pv: pts_pv = pts_pv.cuda() gt_boxes_3d = [gt_box.cuda() for gt_box in gt_boxes_3d] gt_labels_3d = [gt_label.cuda() for gt_label in gt_labels_3d] with torch.no_grad(): preds, depth_preds = self(sweep_imgs, mats, pts_pv=pts_pv, is_train=True) targets = self.model.get_targets(gt_boxes_3d, gt_labels_3d) loss_detection, loss_heatmap, loss_bbox = self.model.loss(targets, preds) if len(depth_labels.shape) == 5: # only key-frame will calculate depth loss depth_labels = depth_labels[:, 0, ...].contiguous() loss_depth = self.get_depth_loss(depth_labels.cuda(), depth_preds, weight=3.) return loss_detection, loss_heatmap, loss_bbox, loss_depth def test_epoch_end(self, test_step_outputs): all_pred_results = list() all_img_metas = list() for test_step_output in test_step_outputs: for i in range(len(test_step_output)): all_pred_results.append(test_step_output[i][:3]) all_img_metas.append(test_step_output[i][3]) synchronize() # TODO: Change another way. dataset_length = len(self.val_dataloader().dataset) all_pred_results = sum( map(list, zip(*all_gather_object(all_pred_results))), [])[:dataset_length] all_img_metas = sum(map(list, zip(*all_gather_object(all_img_metas))), [])[:dataset_length] if self.global_rank == 0: self.evaluator.evaluate(all_pred_results, all_img_metas) def configure_optimizers(self): optimizer = build_optimizer(self.model, self.optimizer_config) scheduler = MultiStepLR(optimizer, [19, 23]) return [[optimizer], [scheduler]] def train_dataloader(self):
train_dataset = NuscDatasetRadarDet(
0
2023-12-06 14:57:49+00:00
24k
jinxixiang/magic_animate_unofficial
animatediff/magic_animate/pipeline.py
[ { "identifier": "UNet3DConditionModel", "path": "animatediff/magic_animate/unet_controlnet.py", "snippet": "class UNet3DConditionModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n sample_size: Optional[int] = None,\n in_channels: int = 4,\n out_channels: int = 4,\n center_input_sample: bool = False,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\",\n ),\n mid_block_type: str = \"UNetMidBlock3DCrossAttn\",\n up_block_types: Tuple[str] = (\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: int = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n dual_cross_attention: bool = False,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n\n # Additional\n use_motion_module=False,\n motion_module_resolutions=(1, 2, 4, 8),\n motion_module_mid_block=False,\n motion_module_decoder_only=False,\n motion_module_type=None,\n motion_module_kwargs={},\n unet_use_cross_frame_attention=None,\n unet_use_temporal_attention=None,\n\n # Addition for image embeddings\n use_image_condition=False,\n # Additional for dwpose adapter\n use_dwpose_adapter=False,\n ):\n super().__init__()\n\n self.sample_size = sample_size\n time_embed_dim = block_out_channels[0] * 4\n\n # input\n self.conv_in = InflatedConv3d(in_channels, block_out_channels[0], kernel_size=3, padding=(1, 1))\n\n # time\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # dwpose condition\n if use_dwpose_adapter:\n self.dwpose_adapter = ControlNetConditioningEmbedding(conditioning_embedding_channels=4) # pose guider net\n else:\n self.dwpose_adapter = None\n\n self.use_image_condition = False\n if use_image_condition:\n self.use_image_condition = True\n self.image_proj_model = Resampler(\n dim=cross_attention_dim,\n depth=4,\n dim_head=64,\n heads=12,\n num_queries=16,\n embedding_dim=1024,\n output_dim=cross_attention_dim,\n ff_mult=4,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.mid_block = None\n self.up_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n for i, down_block_type in enumerate(down_block_types):\n res = 2 ** i\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[i],\n downsample_padding=downsample_padding,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions) and (\n not motion_module_decoder_only),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.down_blocks.append(down_block)\n\n # mid\n if mid_block_type == \"UNetMidBlock3DCrossAttn\":\n self.mid_block = UNetMidBlock3DCrossAttn(\n in_channels=block_out_channels[-1],\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and motion_module_mid_block,\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n else:\n raise ValueError(f\"unknown mid_block_type : {mid_block_type}\")\n\n # count how many layers upsample the videos\n self.num_upsamplers = 0\n\n # up\n reversed_block_out_channels = list(reversed(block_out_channels))\n reversed_attention_head_dim = list(reversed(attention_head_dim))\n only_cross_attention = list(reversed(only_cross_attention))\n output_channel = reversed_block_out_channels[0]\n for i, up_block_type in enumerate(up_block_types):\n res = 2 ** (3 - i)\n is_final_block = i == len(block_out_channels) - 1\n\n prev_output_channel = output_channel\n output_channel = reversed_block_out_channels[i]\n input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]\n\n # add upsample block for all BUT final layer\n if not is_final_block:\n add_upsample = True\n self.num_upsamplers += 1\n else:\n add_upsample = False\n\n up_block = get_up_block(\n up_block_type,\n num_layers=layers_per_block + 1,\n in_channels=input_channel,\n out_channels=output_channel,\n prev_output_channel=prev_output_channel,\n temb_channels=time_embed_dim,\n add_upsample=add_upsample,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n attn_num_head_channels=reversed_attention_head_dim[i],\n dual_cross_attention=dual_cross_attention,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n\n unet_use_cross_frame_attention=unet_use_cross_frame_attention,\n unet_use_temporal_attention=unet_use_temporal_attention,\n\n use_motion_module=use_motion_module and (res in motion_module_resolutions),\n motion_module_type=motion_module_type,\n motion_module_kwargs=motion_module_kwargs,\n )\n self.up_blocks.append(up_block)\n prev_output_channel = output_channel\n\n # out\n self.conv_norm_out = nn.GroupNorm(num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps)\n self.conv_act = nn.SiLU()\n self.conv_out = InflatedConv3d(block_out_channels[0], out_channels, kernel_size=3, padding=1)\n\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maxium amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_slicable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_slicable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_slicable_dims(module)\n\n num_slicable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_slicable_layers * [1]\n\n slice_size = num_slicable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock3D, DownBlock3D, CrossAttnUpBlock3D, UpBlock3D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n class_labels: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n # for controlnet\n down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,\n mid_block_additional_residual: Optional[torch.Tensor] = None,\n # for pose_guider\n dwpose_conditions: Optional[torch.Tensor] = None,\n return_dict: bool = True,\n ) -> Union[UNet3DConditionOutput, Tuple]:\n r\"\"\"\n Args:\n sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor\n timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps\n encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states\n return_dict (`bool`, *optional*, defaults to `True`):\n Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple.\n\n Returns:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:\n [`~models.unet_2d_condition.UNet2DConditionOutput`] if `return_dict` is True, otherwise a `tuple`. When\n returning a tuple, the first element is the sample tensor.\n \"\"\"\n # By default samples have to be AT least a multiple of the overall upsampling factor.\n # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).\n # However, the upsampling interpolation output size can be forced to fit any upsampling size\n # on the fly if necessary.\n default_overall_up_factor = 2 ** self.num_upsamplers\n\n # if self.use_image_condition:\n # # project global image to 16 tokens for cross-attention\n # encoder_hidden_states = self.image_proj(encoder_hidden_states)\n # encoder_hidden_states = encoder_hidden_states.reshape(-1, 16, 768)\n # encoder_hidden_states = self.image_norm(encoder_hidden_states)\n\n # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`\n forward_upsample_size = False\n upsample_size = None\n\n if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):\n logger.info(\"Forward upsample size to force interpolation output size.\")\n forward_upsample_size = True\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # center input if necessary\n if self.config.center_input_sample:\n sample = 2 * sample - 1.0\n\n # time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n emb = self.time_embedding(t_emb)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # add pose conditions\n if dwpose_conditions is not None:\n conditions = self.dwpose_adapter(dwpose_conditions)\n sample += conditions\n\n # pre-process\n sample = self.conv_in(sample)\n\n # down\n is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None\n\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb,\n encoder_hidden_states=encoder_hidden_states)\n\n down_block_res_samples += res_samples\n\n if is_controlnet:\n new_down_block_res_samples = ()\n\n for down_block_res_sample, down_block_additional_residual in zip(\n down_block_res_samples, down_block_additional_residuals\n ):\n down_block_res_sample = down_block_res_sample + down_block_additional_residual\n new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)\n\n down_block_res_samples = new_down_block_res_samples\n\n # mid\n sample = self.mid_block(\n sample, emb, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n\n if is_controlnet:\n sample = sample + mid_block_additional_residual\n\n # up\n for i, upsample_block in enumerate(self.up_blocks):\n is_final_block = i == len(self.up_blocks) - 1\n\n res_samples = down_block_res_samples[-len(upsample_block.resnets):]\n down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]\n\n # if we have not reached the final block and need to forward the\n # upsample size, we do it here\n if not is_final_block and forward_upsample_size:\n upsample_size = down_block_res_samples[-1].shape[2:]\n\n if hasattr(upsample_block, \"has_cross_attention\") and upsample_block.has_cross_attention:\n sample = upsample_block(\n hidden_states=sample,\n temb=emb,\n res_hidden_states_tuple=res_samples,\n encoder_hidden_states=encoder_hidden_states,\n upsample_size=upsample_size,\n attention_mask=attention_mask,\n )\n else:\n sample = upsample_block(\n hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size,\n encoder_hidden_states=encoder_hidden_states,\n )\n\n # post-process\n sample = self.conv_norm_out(sample)\n sample = self.conv_act(sample)\n sample = self.conv_out(sample)\n\n if not return_dict:\n return (sample,)\n\n return UNet3DConditionOutput(sample=sample)\n\n @classmethod\n def from_pretrained_2d(cls, pretrained_model_path, subfolder=None, unet_additional_kwargs=None):\n if subfolder is not None:\n pretrained_model_path = os.path.join(pretrained_model_path, subfolder)\n print(f\"loaded temporal unet's pretrained weights from {pretrained_model_path} ...\")\n\n config_file = os.path.join(pretrained_model_path, 'config.json')\n if not os.path.isfile(config_file):\n raise RuntimeError(f\"{config_file} does not exist\")\n with open(config_file, \"r\") as f:\n config = json.load(f)\n config[\"_class_name\"] = cls.__name__\n config[\"down_block_types\"] = [\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"CrossAttnDownBlock3D\",\n \"DownBlock3D\"\n ]\n config[\"up_block_types\"] = [\n \"UpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\",\n \"CrossAttnUpBlock3D\"\n ]\n # config[\"mid_block_type\"] = \"UNetMidBlock3DCrossAttn\"\n\n from diffusers.utils import WEIGHTS_NAME\n model = cls.from_config(config, **unet_additional_kwargs)\n model_file = os.path.join(pretrained_model_path, WEIGHTS_NAME)\n if not os.path.isfile(model_file):\n raise RuntimeError(f\"{model_file} does not exist\")\n state_dict = torch.load(model_file, map_location=\"cpu\")\n\n m, u = model.load_state_dict(state_dict, strict=False)\n print(f\"### missing keys: {len(m)}; \\n### unexpected keys: {len(u)};\")\n # print(f\"### missing keys:\\n{m}\\n### unexpected keys:\\n{u}\\n\")\n\n params = [p.numel() if \"temporal\" in n else 0 for n, p in model.named_parameters()]\n print(f\"### Temporal Module Parameters: {sum(params) / 1e6} M\")\n\n return model" }, { "identifier": "ControlNetModel", "path": "animatediff/magic_animate/controlnet.py", "snippet": "class ControlNetModel(ModelMixin, ConfigMixin):\n _supports_gradient_checkpointing = True\n\n @register_to_config\n def __init__(\n self,\n in_channels: int = 4,\n flip_sin_to_cos: bool = True,\n freq_shift: int = 0,\n down_block_types: Tuple[str] = (\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"CrossAttnDownBlock2D\",\n \"DownBlock2D\",\n ),\n only_cross_attention: Union[bool, Tuple[bool]] = False,\n block_out_channels: Tuple[int] = (320, 640, 1280, 1280),\n layers_per_block: int = 2,\n downsample_padding: int = 1,\n mid_block_scale_factor: float = 1,\n act_fn: str = \"silu\",\n norm_num_groups: Optional[int] = 32,\n norm_eps: float = 1e-5,\n cross_attention_dim: int = 1280,\n attention_head_dim: Union[int, Tuple[int]] = 8,\n use_linear_projection: bool = False,\n class_embed_type: Optional[str] = None,\n num_class_embeds: Optional[int] = None,\n upcast_attention: bool = False,\n resnet_time_scale_shift: str = \"default\",\n projection_class_embeddings_input_dim: Optional[int] = None,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n ):\n super().__init__()\n\n # Check inputs\n if len(block_out_channels) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}.\"\n )\n\n if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):\n raise ValueError(\n f\"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}.\"\n )\n\n # input\n conv_in_kernel = 3\n conv_in_padding = (conv_in_kernel - 1) // 2\n self.conv_in = nn.Conv2d(\n in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding\n )\n\n # time\n time_embed_dim = block_out_channels[0] * 4\n\n self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)\n timestep_input_dim = block_out_channels[0]\n\n self.time_embedding = TimestepEmbedding(\n timestep_input_dim,\n time_embed_dim,\n act_fn=act_fn,\n )\n\n # class embedding\n if class_embed_type is None and num_class_embeds is not None:\n self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)\n elif class_embed_type == \"timestep\":\n self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim)\n elif class_embed_type == \"identity\":\n self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)\n elif class_embed_type == \"projection\":\n if projection_class_embeddings_input_dim is None:\n raise ValueError(\n \"`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set\"\n )\n # The projection `class_embed_type` is the same as the timestep `class_embed_type` except\n # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings\n # 2. it projects from an arbitrary input dimension.\n #\n # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.\n # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.\n # As a result, `TimestepEmbedding` can be passed arbitrary vectors.\n self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)\n else:\n self.class_embedding = None\n\n # control net conditioning embedding\n self.controlnet_cond_embedding = ControlNetConditioningEmbedding(\n conditioning_embedding_channels=block_out_channels[0],\n block_out_channels=conditioning_embedding_out_channels,\n )\n\n self.down_blocks = nn.ModuleList([])\n self.controlnet_down_blocks = nn.ModuleList([])\n\n if isinstance(only_cross_attention, bool):\n only_cross_attention = [only_cross_attention] * len(down_block_types)\n\n if isinstance(attention_head_dim, int):\n attention_head_dim = (attention_head_dim,) * len(down_block_types)\n\n # down\n output_channel = block_out_channels[0]\n\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n for i, down_block_type in enumerate(down_block_types):\n input_channel = output_channel\n output_channel = block_out_channels[i]\n is_final_block = i == len(block_out_channels) - 1\n\n down_block = get_down_block(\n down_block_type,\n num_layers=layers_per_block,\n in_channels=input_channel,\n out_channels=output_channel,\n temb_channels=time_embed_dim,\n add_downsample=not is_final_block,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n resnet_groups=norm_num_groups,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[i],\n downsample_padding=downsample_padding,\n use_linear_projection=use_linear_projection,\n only_cross_attention=only_cross_attention[i],\n upcast_attention=upcast_attention,\n resnet_time_scale_shift=resnet_time_scale_shift,\n )\n self.down_blocks.append(down_block)\n\n for _ in range(layers_per_block):\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n if not is_final_block:\n controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_down_blocks.append(controlnet_block)\n\n # mid\n mid_block_channel = block_out_channels[-1]\n\n controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1)\n controlnet_block = zero_module(controlnet_block)\n self.controlnet_mid_block = controlnet_block\n\n self.mid_block = UNetMidBlock2DCrossAttn(\n in_channels=mid_block_channel,\n temb_channels=time_embed_dim,\n resnet_eps=norm_eps,\n resnet_act_fn=act_fn,\n output_scale_factor=mid_block_scale_factor,\n resnet_time_scale_shift=resnet_time_scale_shift,\n cross_attention_dim=cross_attention_dim,\n num_attention_heads=attention_head_dim[-1],\n resnet_groups=norm_num_groups,\n use_linear_projection=use_linear_projection,\n upcast_attention=upcast_attention,\n )\n\n @classmethod\n def from_unet(\n cls,\n unet: UNet2DConditionModel,\n controlnet_conditioning_channel_order: str = \"rgb\",\n conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256),\n load_weights_from_unet: bool = True,\n ):\n r\"\"\"\n Instantiate Controlnet class from UNet2DConditionModel.\n\n Parameters:\n unet (`UNet2DConditionModel`):\n UNet model which weights are copied to the ControlNet. Note that all configuration options are also\n copied where applicable.\n \"\"\"\n controlnet = cls(\n in_channels=unet.config.in_channels,\n flip_sin_to_cos=unet.config.flip_sin_to_cos,\n freq_shift=unet.config.freq_shift,\n down_block_types=unet.config.down_block_types,\n only_cross_attention=unet.config.only_cross_attention,\n block_out_channels=unet.config.block_out_channels,\n layers_per_block=unet.config.layers_per_block,\n downsample_padding=unet.config.downsample_padding,\n mid_block_scale_factor=unet.config.mid_block_scale_factor,\n act_fn=unet.config.act_fn,\n norm_num_groups=unet.config.norm_num_groups,\n norm_eps=unet.config.norm_eps,\n cross_attention_dim=unet.config.cross_attention_dim,\n attention_head_dim=unet.config.attention_head_dim,\n use_linear_projection=unet.config.use_linear_projection,\n class_embed_type=unet.config.class_embed_type,\n num_class_embeds=unet.config.num_class_embeds,\n upcast_attention=unet.config.upcast_attention,\n resnet_time_scale_shift=unet.config.resnet_time_scale_shift,\n projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim,\n controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,\n conditioning_embedding_out_channels=conditioning_embedding_out_channels,\n )\n\n if load_weights_from_unet:\n controlnet.conv_in.load_state_dict(unet.conv_in.state_dict())\n controlnet.time_proj.load_state_dict(unet.time_proj.state_dict())\n controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict())\n\n if controlnet.class_embedding:\n controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict())\n\n controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict())\n controlnet.mid_block.load_state_dict(unet.mid_block.state_dict())\n\n return controlnet\n\n # @property\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors\n # def attn_processors(self) -> Dict[str, AttentionProcessor]:\n # r\"\"\"\n # Returns:\n # `dict` of attention processors: A dictionary containing all attention processors used in the model with\n # indexed by its weight name.\n # \"\"\"\n # # set recursively\n # processors = {}\n\n # def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):\n # if hasattr(module, \"set_processor\"):\n # processors[f\"{name}.processor\"] = module.processor\n\n # for sub_name, child in module.named_children():\n # fn_recursive_add_processors(f\"{name}.{sub_name}\", child, processors)\n\n # return processors\n\n # for name, module in self.named_children():\n # fn_recursive_add_processors(name, module, processors)\n\n # return processors\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor\n # def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):\n # r\"\"\"\n # Parameters:\n # `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):\n # The instantiated processor class or a dictionary of processor classes that will be set as the processor\n # of **all** `Attention` layers.\n # In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:\n\n # \"\"\"\n # count = len(self.attn_processors.keys())\n\n # if isinstance(processor, dict) and len(processor) != count:\n # raise ValueError(\n # f\"A dict of processors was passed, but the number of processors {len(processor)} does not match the\"\n # f\" number of attention layers: {count}. Please make sure to pass {count} processor classes.\"\n # )\n\n # def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):\n # if hasattr(module, \"set_processor\"):\n # if not isinstance(processor, dict):\n # module.set_processor(processor)\n # else:\n # module.set_processor(processor.pop(f\"{name}.processor\"))\n\n # for sub_name, child in module.named_children():\n # fn_recursive_attn_processor(f\"{name}.{sub_name}\", child, processor)\n\n # for name, module in self.named_children():\n # fn_recursive_attn_processor(name, module, processor)\n\n # # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor\n # def set_default_attn_processor(self):\n # \"\"\"\n # Disables custom attention processors and sets the default attention implementation.\n # \"\"\"\n # self.set_attn_processor(AttnProcessor())\n\n # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice\n def set_attention_slice(self, slice_size):\n r\"\"\"\n Enable sliced attention computation.\n\n When this option is enabled, the attention module will split the input tensor in slices, to compute attention\n in several steps. This is useful to save some memory in exchange for a small speed decrease.\n\n Args:\n slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `\"auto\"`):\n When `\"auto\"`, halves the input to the attention heads, so attention will be computed in two steps. If\n `\"max\"`, maximum amount of memory will be saved by running only one slice at a time. If a number is\n provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`\n must be a multiple of `slice_size`.\n \"\"\"\n sliceable_head_dims = []\n\n def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):\n if hasattr(module, \"set_attention_slice\"):\n sliceable_head_dims.append(module.sliceable_head_dim)\n\n for child in module.children():\n fn_recursive_retrieve_sliceable_dims(child)\n\n # retrieve number of attention layers\n for module in self.children():\n fn_recursive_retrieve_sliceable_dims(module)\n\n num_sliceable_layers = len(sliceable_head_dims)\n\n if slice_size == \"auto\":\n # half the attention head size is usually a good trade-off between\n # speed and memory\n slice_size = [dim // 2 for dim in sliceable_head_dims]\n elif slice_size == \"max\":\n # make smallest slice possible\n slice_size = num_sliceable_layers * [1]\n\n slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size\n\n if len(slice_size) != len(sliceable_head_dims):\n raise ValueError(\n f\"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different\"\n f\" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}.\"\n )\n\n for i in range(len(slice_size)):\n size = slice_size[i]\n dim = sliceable_head_dims[i]\n if size is not None and size > dim:\n raise ValueError(f\"size {size} has to be smaller or equal to {dim}.\")\n\n # Recursively walk through all the children.\n # Any children which exposes the set_attention_slice method\n # gets the message\n def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):\n if hasattr(module, \"set_attention_slice\"):\n module.set_attention_slice(slice_size.pop())\n\n for child in module.children():\n fn_recursive_set_attention_slice(child, slice_size)\n\n reversed_slice_size = list(reversed(slice_size))\n for module in self.children():\n fn_recursive_set_attention_slice(module, reversed_slice_size)\n\n def _set_gradient_checkpointing(self, module, value=False):\n if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):\n module.gradient_checkpointing = value\n\n def forward(\n self,\n sample: torch.FloatTensor,\n timestep: Union[torch.Tensor, float, int],\n encoder_hidden_states: torch.Tensor,\n controlnet_cond: torch.FloatTensor,\n conditioning_scale: float = 1.0,\n class_labels: Optional[torch.Tensor] = None,\n timestep_cond: Optional[torch.Tensor] = None,\n attention_mask: Optional[torch.Tensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n return_dict: bool = True,\n ) -> Union[ControlNetOutput, Tuple]:\n # check channel order\n channel_order = self.config.controlnet_conditioning_channel_order\n\n if channel_order == \"rgb\":\n # in rgb order by default\n ...\n elif channel_order == \"bgr\":\n controlnet_cond = torch.flip(controlnet_cond, dims=[1])\n else:\n raise ValueError(f\"unknown `controlnet_conditioning_channel_order`: {channel_order}\")\n\n # prepare attention_mask\n if attention_mask is not None:\n attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0\n attention_mask = attention_mask.unsqueeze(1)\n\n # 1. time\n timesteps = timestep\n if not torch.is_tensor(timesteps):\n # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can\n # This would be a good case for the `match` statement (Python 3.10+)\n is_mps = sample.device.type == \"mps\"\n if isinstance(timestep, float):\n dtype = torch.float32 if is_mps else torch.float64\n else:\n dtype = torch.int32 if is_mps else torch.int64\n timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)\n elif len(timesteps.shape) == 0:\n timesteps = timesteps[None].to(sample.device)\n\n # broadcast to batch dimension in a way that's compatible with ONNX/Core ML\n timesteps = timesteps.expand(sample.shape[0])\n\n t_emb = self.time_proj(timesteps)\n\n # timesteps does not contain any weights and will always return f32 tensors\n # but time_embedding might actually be running in fp16. so we need to cast here.\n # there might be better ways to encapsulate this.\n t_emb = t_emb.to(dtype=self.dtype)\n\n emb = self.time_embedding(t_emb, timestep_cond)\n\n if self.class_embedding is not None:\n if class_labels is None:\n raise ValueError(\"class_labels should be provided when num_class_embeds > 0\")\n\n if self.config.class_embed_type == \"timestep\":\n class_labels = self.time_proj(class_labels)\n\n class_emb = self.class_embedding(class_labels).to(dtype=self.dtype)\n emb = emb + class_emb\n\n # 2. pre-process\n sample = self.conv_in(sample)\n\n controlnet_cond = self.controlnet_cond_embedding(controlnet_cond)\n\n sample += controlnet_cond\n\n # 3. down\n down_block_res_samples = (sample,)\n for downsample_block in self.down_blocks:\n if hasattr(downsample_block, \"has_cross_attention\") and downsample_block.has_cross_attention:\n sample, res_samples = downsample_block(\n hidden_states=sample,\n temb=emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n else:\n sample, res_samples = downsample_block(hidden_states=sample, temb=emb)\n\n down_block_res_samples += res_samples\n\n # 4. mid\n if self.mid_block is not None:\n sample = self.mid_block(\n sample,\n emb,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=attention_mask,\n # cross_attention_kwargs=cross_attention_kwargs,\n )\n\n # 5. Control net blocks\n\n controlnet_down_block_res_samples = ()\n\n for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks):\n down_block_res_sample = controlnet_block(down_block_res_sample)\n controlnet_down_block_res_samples += (down_block_res_sample,)\n\n down_block_res_samples = controlnet_down_block_res_samples\n\n mid_block_res_sample = self.controlnet_mid_block(sample)\n\n # 6. scaling\n down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples]\n mid_block_res_sample *= conditioning_scale\n\n if not return_dict:\n return (down_block_res_samples, mid_block_res_sample)\n\n return ControlNetOutput(\n down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample\n )" }, { "identifier": "ReferenceAttentionControl", "path": "animatediff/magic_animate/mutual_self_attention.py", "snippet": "class ReferenceAttentionControl():\n \n def __init__(self, \n unet,\n mode=\"write\",\n do_classifier_free_guidance=False,\n attention_auto_machine_weight = float('inf'),\n gn_auto_machine_weight = 1.0,\n style_fidelity = 1.0,\n reference_attn=True,\n reference_adain=False,\n fusion_blocks=\"midup\",\n batch_size=1,\n clip_length=8,\n is_image=False,\n ) -> None:\n # 10. Modify self attention and group norm\n self.unet = unet\n assert mode in [\"read\", \"write\"]\n assert fusion_blocks in [\"midup\", \"full\"]\n self.reference_attn = reference_attn\n self.reference_adain = reference_adain\n self.fusion_blocks = fusion_blocks\n self.register_reference_hooks(\n mode, \n do_classifier_free_guidance,\n clip_length,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n fusion_blocks=fusion_blocks,\n batch_size=batch_size,\n is_image=is_image,\n )\n\n def register_reference_hooks(\n self, \n mode, \n do_classifier_free_guidance,\n clip_length,\n attention_auto_machine_weight,\n gn_auto_machine_weight,\n style_fidelity,\n reference_attn,\n reference_adain,\n dtype=torch.float16,\n batch_size=1, \n num_images_per_prompt=1, \n device=torch.device(\"cpu\"), \n fusion_blocks='midup',\n is_image=False,\n ):\n MODE = mode\n do_classifier_free_guidance = do_classifier_free_guidance\n attention_auto_machine_weight = attention_auto_machine_weight\n gn_auto_machine_weight = gn_auto_machine_weight\n style_fidelity = style_fidelity\n reference_attn = reference_attn\n reference_adain = reference_adain\n fusion_blocks = fusion_blocks\n num_images_per_prompt = num_images_per_prompt\n dtype=dtype\n if do_classifier_free_guidance:\n # uc_mask = (\n # torch.Tensor([1] * batch_size * num_images_per_prompt * 16 + [0] * batch_size * num_images_per_prompt * 16)\n # .to(device)\n # .bool()\n # )\n\n uc_mask = (\n torch.Tensor(\n [1] * batch_size * num_images_per_prompt * clip_length + [0] * batch_size * num_images_per_prompt * clip_length)\n .to(device)\n .bool()\n )\n\n else:\n uc_mask = (\n torch.Tensor([0] * batch_size * num_images_per_prompt * 2)\n .to(device)\n .bool()\n )\n \n def hacked_basic_transformer_inner_forward(\n self,\n hidden_states: torch.FloatTensor,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n timestep: Optional[torch.LongTensor] = None,\n cross_attention_kwargs: Dict[str, Any] = None,\n class_labels: Optional[torch.LongTensor] = None,\n video_length=None,\n ):\n if self.use_ada_layer_norm:\n norm_hidden_states = self.norm1(hidden_states, timestep)\n elif self.use_ada_layer_norm_zero:\n norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(\n hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype\n )\n else:\n norm_hidden_states = self.norm1(hidden_states)\n\n # 1. Self-Attention\n cross_attention_kwargs = cross_attention_kwargs if cross_attention_kwargs is not None else {}\n if self.only_cross_attention:\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n else:\n if MODE == \"write\":\n self.bank.append(norm_hidden_states.clone())\n attn_output = self.attn1(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,\n attention_mask=attention_mask,\n **cross_attention_kwargs,\n )\n if MODE == \"read\":\n if not is_image:\n self.bank = [rearrange(d.unsqueeze(1).repeat(1, video_length, 1, 1), \"b t l c -> (b t) l c\")[:hidden_states.shape[0]] for d in self.bank]\n\n hidden_states_uc = self.attn1(norm_hidden_states,\n encoder_hidden_states=torch.cat([norm_hidden_states] + self.bank, dim=1),\n attention_mask=attention_mask) + hidden_states\n hidden_states_c = hidden_states_uc.clone()\n _uc_mask = uc_mask.clone()\n if do_classifier_free_guidance:\n if hidden_states.shape[0] != _uc_mask.shape[0]:\n _uc_mask = (\n torch.Tensor([1] * (hidden_states.shape[0]//2) + [0] * (hidden_states.shape[0]//2))\n .to(device)\n .bool()\n )\n hidden_states_c[_uc_mask] = self.attn1(\n norm_hidden_states[_uc_mask],\n encoder_hidden_states=norm_hidden_states[_uc_mask],\n attention_mask=attention_mask,\n ) + hidden_states[_uc_mask]\n hidden_states = hidden_states_c.clone()\n \n self.bank.clear()\n if self.attn2 is not None:\n # Cross-Attention\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n hidden_states = (\n self.attn2(\n norm_hidden_states, encoder_hidden_states=encoder_hidden_states, attention_mask=attention_mask\n )\n + hidden_states\n )\n\n # Feed-forward\n hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states\n\n # Temporal-Attention\n if not is_image:\n if self.unet_use_temporal_attention:\n d = hidden_states.shape[1]\n hidden_states = rearrange(hidden_states, \"(b f) d c -> (b d) f c\", f=video_length)\n norm_hidden_states = (\n self.norm_temp(hidden_states, timestep) if self.use_ada_layer_norm else self.norm_temp(hidden_states)\n )\n hidden_states = self.attn_temp(norm_hidden_states) + hidden_states\n hidden_states = rearrange(hidden_states, \"(b d) f c -> (b f) d c\", d=d)\n\n return hidden_states\n \n if self.use_ada_layer_norm_zero:\n attn_output = gate_msa.unsqueeze(1) * attn_output\n hidden_states = attn_output + hidden_states\n\n if self.attn2 is not None:\n norm_hidden_states = (\n self.norm2(hidden_states, timestep) if self.use_ada_layer_norm else self.norm2(hidden_states)\n )\n\n # 2. Cross-Attention\n attn_output = self.attn2(\n norm_hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n attention_mask=encoder_attention_mask,\n **cross_attention_kwargs,\n )\n hidden_states = attn_output + hidden_states\n\n # 3. Feed-forward\n norm_hidden_states = self.norm3(hidden_states)\n\n if self.use_ada_layer_norm_zero:\n norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]\n\n ff_output = self.ff(norm_hidden_states)\n\n if self.use_ada_layer_norm_zero:\n ff_output = gate_mlp.unsqueeze(1) * ff_output\n\n hidden_states = ff_output + hidden_states\n\n return hidden_states\n\n def hacked_mid_forward(self, *args, **kwargs):\n eps = 1e-6\n x = self.original_forward(*args, **kwargs)\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append(mean)\n self.var_bank.append(var)\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(x, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank) / float(len(self.mean_bank))\n var_acc = sum(self.var_bank) / float(len(self.var_bank))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n x_uc = (((x - mean) / std) * std_acc) + mean_acc\n x_c = x_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n x_c[uc_mask] = x[uc_mask]\n x = style_fidelity * x_c + (1.0 - style_fidelity) * x_uc\n self.mean_bank = []\n self.var_bank = []\n return x\n\n def hack_CrossAttnDownBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n\n # TODO(Patrick, William) - attention mask is not used\n output_states = ()\n\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_DownBlock2D_forward(self, hidden_states, temb=None):\n eps = 1e-6\n\n output_states = ()\n\n for i, resnet in enumerate(self.resnets):\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n output_states = output_states + (hidden_states,)\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.downsamplers is not None:\n for downsampler in self.downsamplers:\n hidden_states = downsampler(hidden_states)\n\n output_states = output_states + (hidden_states,)\n\n return hidden_states, output_states\n\n def hacked_CrossAttnUpBlock2D_forward(\n self,\n hidden_states: torch.FloatTensor,\n res_hidden_states_tuple: Tuple[torch.FloatTensor, ...],\n temb: Optional[torch.FloatTensor] = None,\n encoder_hidden_states: Optional[torch.FloatTensor] = None,\n cross_attention_kwargs: Optional[Dict[str, Any]] = None,\n upsample_size: Optional[int] = None,\n attention_mask: Optional[torch.FloatTensor] = None,\n encoder_attention_mask: Optional[torch.FloatTensor] = None,\n ):\n eps = 1e-6\n # TODO(Patrick, William) - attention mask is not used\n for i, (resnet, attn) in enumerate(zip(self.resnets, self.attentions)):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n hidden_states = attn(\n hidden_states,\n encoder_hidden_states=encoder_hidden_states,\n cross_attention_kwargs=cross_attention_kwargs,\n attention_mask=attention_mask,\n encoder_attention_mask=encoder_attention_mask,\n return_dict=False,\n )[0]\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n def hacked_UpBlock2D_forward(self, hidden_states, res_hidden_states_tuple, temb=None, upsample_size=None):\n eps = 1e-6\n for i, resnet in enumerate(self.resnets):\n # pop res hidden states\n res_hidden_states = res_hidden_states_tuple[-1]\n res_hidden_states_tuple = res_hidden_states_tuple[:-1]\n hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)\n hidden_states = resnet(hidden_states, temb)\n\n if MODE == \"write\":\n if gn_auto_machine_weight >= self.gn_weight:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n self.mean_bank.append([mean])\n self.var_bank.append([var])\n if MODE == \"read\":\n if len(self.mean_bank) > 0 and len(self.var_bank) > 0:\n var, mean = torch.var_mean(hidden_states, dim=(2, 3), keepdim=True, correction=0)\n std = torch.maximum(var, torch.zeros_like(var) + eps) ** 0.5\n mean_acc = sum(self.mean_bank[i]) / float(len(self.mean_bank[i]))\n var_acc = sum(self.var_bank[i]) / float(len(self.var_bank[i]))\n std_acc = torch.maximum(var_acc, torch.zeros_like(var_acc) + eps) ** 0.5\n hidden_states_uc = (((hidden_states - mean) / std) * std_acc) + mean_acc\n hidden_states_c = hidden_states_uc.clone()\n if do_classifier_free_guidance and style_fidelity > 0:\n hidden_states_c[uc_mask] = hidden_states[uc_mask].to(hidden_states_c.dtype)\n hidden_states = style_fidelity * hidden_states_c + (1.0 - style_fidelity) * hidden_states_uc\n\n if MODE == \"read\":\n self.mean_bank = []\n self.var_bank = []\n\n if self.upsamplers is not None:\n for upsampler in self.upsamplers:\n hidden_states = upsampler(hidden_states, upsample_size)\n\n return hidden_states\n\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)] \n attn_modules = sorted(attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n\n for i, module in enumerate(attn_modules):\n module._original_inner_forward = module.forward\n module.forward = hacked_basic_transformer_inner_forward.__get__(module, BasicTransformerBlock)\n module.bank = []\n module.attn_weight = float(i) / float(len(attn_modules))\n\n if self.reference_adain:\n gn_modules = [self.unet.mid_block]\n self.unet.mid_block.gn_weight = 0\n\n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n module.gn_weight = 1.0 - float(w) / float(len(down_blocks))\n gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n module.gn_weight = float(w) / float(len(up_blocks))\n gn_modules.append(module)\n\n for i, module in enumerate(gn_modules):\n if getattr(module, \"original_forward\", None) is None:\n module.original_forward = module.forward\n if i == 0:\n # mid_block\n module.forward = hacked_mid_forward.__get__(module, torch.nn.Module)\n elif isinstance(module, CrossAttnDownBlock2D):\n module.forward = hack_CrossAttnDownBlock2D_forward.__get__(module, CrossAttnDownBlock2D)\n elif isinstance(module, DownBlock2D):\n module.forward = hacked_DownBlock2D_forward.__get__(module, DownBlock2D)\n elif isinstance(module, CrossAttnUpBlock2D):\n module.forward = hacked_CrossAttnUpBlock2D_forward.__get__(module, CrossAttnUpBlock2D)\n elif isinstance(module, UpBlock2D):\n module.forward = hacked_UpBlock2D_forward.__get__(module, UpBlock2D)\n module.mean_bank = []\n module.var_bank = []\n module.gn_weight *= 2\n \n def update(self, writer, dtype=torch.float16):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in (torch_dfs(writer.unet.mid_block)+torch_dfs(writer.unet.up_blocks)) if isinstance(module, BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, _BasicTransformerBlock)]\n writer_attn_modules = [module for module in torch_dfs(writer.unet) if isinstance(module, BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0]) \n writer_attn_modules = sorted(writer_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r, w in zip(reader_attn_modules, writer_attn_modules):\n r.bank = [v.clone().to(dtype) for v in w.bank]\n # w.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n writer_gn_modules = [writer.unet.mid_block]\n \n down_blocks = writer.unet.down_blocks\n for w, module in enumerate(down_blocks):\n writer_gn_modules.append(module)\n\n up_blocks = writer.unet.up_blocks\n for w, module in enumerate(up_blocks):\n writer_gn_modules.append(module)\n \n for r, w in zip(reader_gn_modules, writer_gn_modules):\n if len(w.mean_bank) > 0 and isinstance(w.mean_bank[0], list):\n r.mean_bank = [[v.clone().to(dtype) for v in vl] for vl in w.mean_bank]\n r.var_bank = [[v.clone().to(dtype) for v in vl] for vl in w.var_bank]\n else:\n r.mean_bank = [v.clone().to(dtype) for v in w.mean_bank]\n r.var_bank = [v.clone().to(dtype) for v in w.var_bank]\n \n def clear(self):\n if self.reference_attn:\n if self.fusion_blocks == \"midup\":\n reader_attn_modules = [module for module in (torch_dfs(self.unet.mid_block)+torch_dfs(self.unet.up_blocks)) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n elif self.fusion_blocks == \"full\":\n reader_attn_modules = [module for module in torch_dfs(self.unet) if isinstance(module, BasicTransformerBlock) or isinstance(module, _BasicTransformerBlock)]\n reader_attn_modules = sorted(reader_attn_modules, key=lambda x: -x.norm1.normalized_shape[0])\n for r in reader_attn_modules:\n r.bank.clear()\n if self.reference_adain:\n reader_gn_modules = [self.unet.mid_block]\n \n down_blocks = self.unet.down_blocks\n for w, module in enumerate(down_blocks):\n reader_gn_modules.append(module)\n\n up_blocks = self.unet.up_blocks\n for w, module in enumerate(up_blocks):\n reader_gn_modules.append(module)\n \n for r in reader_gn_modules:\n r.mean_bank.clear()\n r.var_bank.clear()" }, { "identifier": "get_context_scheduler", "path": "animatediff/magic_animate/context.py", "snippet": "def get_context_scheduler(name: str) -> Callable:\n if name == \"uniform\":\n return uniform\n else:\n raise ValueError(f\"Unknown context_overlap policy {name}\")" }, { "identifier": "get_total_steps", "path": "animatediff/magic_animate/context.py", "snippet": "def get_total_steps(\n scheduler,\n timesteps: List[int],\n num_steps: Optional[int] = None,\n num_frames: int = ...,\n context_size: Optional[int] = None,\n context_stride: int = 3,\n context_overlap: int = 4,\n closed_loop: bool = True,\n):\n return sum(\n len(\n list(\n scheduler(\n i,\n num_steps,\n num_frames,\n context_size,\n context_stride,\n context_overlap,\n )\n )\n )\n for i in range(len(timesteps))\n )" }, { "identifier": "get_tensor_interpolation_method", "path": "animatediff/utils/util.py", "snippet": "def get_tensor_interpolation_method():\n return tensor_interpolation" } ]
import inspect, math import numpy as np import torch import torch.distributed as dist import einops from typing import Callable, List, Optional, Union from dataclasses import dataclass from PIL import Image from tqdm import tqdm from diffusers.utils import is_accelerate_available from packaging import version from transformers import CLIPTextModel, CLIPTokenizer from diffusers.configuration_utils import FrozenDict from diffusers.models import AutoencoderKL from diffusers.pipeline_utils import DiffusionPipeline from diffusers.schedulers import ( DDIMScheduler, DPMSolverMultistepScheduler, EulerAncestralDiscreteScheduler, EulerDiscreteScheduler, LMSDiscreteScheduler, PNDMScheduler, ) from diffusers.utils import deprecate, logging, BaseOutput from einops import rearrange from animatediff.magic_animate.unet_controlnet import UNet3DConditionModel from animatediff.magic_animate.controlnet import ControlNetModel from animatediff.magic_animate.mutual_self_attention import ReferenceAttentionControl from animatediff.magic_animate.context import ( get_context_scheduler, get_total_steps ) from animatediff.utils.util import get_tensor_interpolation_method from accelerate import cpu_offload
17,204
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
# ************************************************************************* # This file may have been modified by Bytedance Inc. (“Bytedance Inc.'s Mo- # difications”). All Bytedance Inc.'s Modifications are Copyright (2023) B- # ytedance Inc.. # ************************************************************************* # Adapted from https://github.com/showlab/Tune-A-Video/blob/main/tuneavideo/pipelines/pipeline_tuneavideo.py # Copyright 2023 The HuggingFace Team. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ TODO: 1. support multi-controlnet 2. [DONE] support DDIM inversion 3. support Prompt-to-prompt """ logger = logging.get_logger(__name__) # pylint: disable=invalid-name @dataclass class AnimationPipelineOutput(BaseOutput): videos: Union[torch.Tensor, np.ndarray] class AnimationPipeline(DiffusionPipeline): _optional_components = [] def __init__( self, vae: AutoencoderKL, text_encoder: CLIPTextModel, tokenizer: CLIPTokenizer, unet: UNet3DConditionModel,
controlnet: ControlNetModel,
1
2023-12-12 00:16:39+00:00
24k
qitan/devops-backend-lite
common/ext_fun.py
[ { "identifier": "generate_docu", "path": "common/utils/ElasticSearchAPI.py", "snippet": "def generate_docu(table, index_version=None):\n index_name = f\"{table.name}-{index_version}\" if index_version else table.name\n _tbindex = Index(index_name)\n _tbindex.analyzer(my_normalizer)\n _tbindex.settings(number_of_shards=3, number_of_replicas=1)\n _fields = Mapping().generate_data_mapping(table)\n docu = type(index_name, (CustomDocument,), _fields)\n return _tbindex.document(docu)" }, { "identifier": "Search", "path": "common/utils/ElasticSearchAPI.py", "snippet": "class Search(BaseSearch):\n def __init__(self, prefix=False, **kwargs):\n if kwargs.get('index', None) and prefix:\n if isinstance(kwargs['index'], string_types):\n kwargs['index'] = f\"{ELASTICSEARCH_PREFIX}{kwargs['index']}\"\n elif isinstance(kwargs['index'], list):\n kwargs['index'] = [\n f\"{ELASTICSEARCH_PREFIX}{i}\" for i in kwargs['index']]\n elif isinstance(kwargs['index'], tuple):\n kwargs['index'] = tuple(\n f\"{ELASTICSEARCH_PREFIX}{i}\" for i in kwargs['index'])\n else:\n raise Exception('索引名称格式错误!')\n super(Search, self).__init__(**kwargs)" }, { "identifier": "GitLabAPI", "path": "common/utils/GitLabAPI.py", "snippet": "class GitLabAPI(object):\n def __init__(self, url, user=None, password=None, token=None, oauth=False):\n self.__url = url\n if token:\n self.__token = token\n if oauth:\n params = {'oauth_token': self.__token}\n else:\n params = {'private_token': self.__token}\n self.__gl = gitlab.Gitlab(self.__url, **params)\n else:\n self.__gl = gitlab.Gitlab(\n self.__url, http_username=user, http_password=password)\n self.__gl.auth()\n\n def get_gl(self):\n return self.__gl\n\n def list_projects(self, get_all=False, key=None, per_page=20, page=1):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n projects = self.__gl.projects.list(**params)\n return projects\n\n def get_project(self, project_id=None, project_name_with_namespace=None):\n if any([project_id, project_name_with_namespace]) is False:\n raise Exception('缺少参数,project_id或project_name_with_namespace必选其一.')\n condition = project_id or project_name_with_namespace\n try:\n project = self.__gl.projects.get(condition)\n return project\n except BaseException as e:\n logger.info(e)\n return None\n\n def create_project(self, name, namespace_id=None, initialize_with_readme=False):\n payload = {'name': name, 'path': name,\n 'initialize_with_readme': initialize_with_readme}\n if namespace_id:\n payload['namespace_id'] = namespace_id\n try:\n ret = self.__gl.projects.create(payload)\n return True, ret\n except BaseException as e:\n logger.exception(f'创建分支请求异常,原因:{e.__dict__}')\n return False, e\n\n def get_commit(self, commit_id, project_id=None, project_name_with_namespace=None):\n try:\n commit = self.get_project(\n project_id, project_name_with_namespace).get(commit_id)\n return commit\n except BaseException as e:\n logger.info(e)\n return None\n\n def list_groups(self, get_all=False, key=None, per_page=20, page=1):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n groups = self.__gl.groups.list(**params)\n return [{'id': i.id, 'name': i.name, 'description': i.description} for i in groups if not i.parent_id]\n\n def create_group(self, name, path=None, desc=None, parent=None):\n \"\"\"\n 创建组\n \"\"\"\n payload = {'name': name, 'path': path or name,\n 'description': desc or ''}\n if parent:\n payload['parent_id'] = parent\n try:\n group = self.__gl.groups.create(payload)\n return True, group\n except BaseException as e:\n logger.info(e)\n return False, e\n\n def create_branch(self, project, src_branch, target_branch):\n payload = {'branch': target_branch,\n 'ref': src_branch}\n if isinstance(project, (int,)):\n project = self.get_project(project)\n try:\n ret = project.branches.create(payload)\n return True, ret\n except BaseException as e:\n logger.exception(f'创建分支请求异常,原因:{e.__dict__}')\n return False, e\n\n def list_branches(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1, protected='0', *args, **kwargs):\n params = {'per_page': per_page, 'page': page}\n if not protected:\n protected = '0'\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n params.update(kwargs)\n branches = self.get_project(project_id=project_id,\n project_name_with_namespace=project_name_with_namespace).branches.list(**params)\n branches = [{'uid': f\"{G_COMMIT[0][0]}:{i.name}\", 'name': i.name, 'commit': i.commit, 'label': G_COMMIT[0][0], 'protected': i.protected}\n for i in branches]\n if protected != '0':\n # 过滤受保护分支\n _map = {'1': True, '2': False}\n branches = [i for i in branches if i['protected']\n == _map[protected]]\n return branches\n\n def list_protected_branches(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1, *args, **kwargs):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n params.update(kwargs)\n branches = self.get_project(project_id=project_id,\n project_name_with_namespace=project_name_with_namespace).protectedbranches.list(**params)\n branches = [{'uid': f\"{G_COMMIT[0][0]}:{i.name}\", 'name': i.name, 'commit': i.commit, 'label': G_COMMIT[0][0], 'protected': i.protected}\n for i in branches]\n return branches\n\n def list_tags(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n tags = self.get_project(\n project_id, project_name_with_namespace).tags.list(**params)\n tags = [{'uid': f\"{G_COMMIT[1][0]}:{i.name}\", 'name': i.name, 'message': i.message, 'commit': i.commit,\n 'label': G_COMMIT[1][0]} for i in tags]\n return tags\n\n def list_commits(self, project_id=None, project_name_with_namespace=None, get_all=False, key=None, per_page=20,\n page=1, ref_name=None, since=None):\n params = {'per_page': per_page, 'page': page}\n if get_all:\n params = {'get_all': True, 'per_page': per_page}\n if key:\n params['search'] = key\n if ref_name:\n params['ref_name'] = ref_name\n if since:\n params['since'] = since\n commits = self.get_project(\n project_id, project_name_with_namespace).commits.list(**params)\n commits = [\n {'title': i.title, 'short_id': i.short_id, 'author_name': i.author_name, 'committer_name': i.committer_name,\n 'committed_date': i.committed_date, 'message': i.message, 'web_url': i.web_url} for i in commits]\n return commits\n\n def repo_checkout(self, repo):\n import subprocess\n git_url = repo.split('//')\n subprocess.call(\n ['git', 'clone', f\"{git_url[0]}//oauth2:{self.__token}@{git_url[1]}\"])\n\n def get_user_id(self, username):\n user_list = self.__gl.users.list(username=username)\n if user_list:\n return user_list[0].id\n else:\n return None\n\n def get_project_from_name(self, project_name):\n projects = self.__gl.projects.list(search=project_name)\n for p in projects:\n if p.name == project_name:\n return p\n return None\n\n def add_project_member(self, project, user_id, access_level):\n try:\n project.members.create(\n {'user_id': user_id, 'access_level': access_level})\n return True, '成功'\n except Exception as error:\n return False, error\n\n def del_project_member(self, project, user_id):\n try:\n project.members.delete(user_id)\n return True, '成功'\n except Exception as error:\n return False, error" }, { "identifier": "HarborAPI", "path": "common/utils/HarborAPI.py", "snippet": "class HarborAPI(object):\n def __init__(self, url, username, password):\n self.__url = url.rstrip('/')\n self.__user = username\n self.__password = password\n self.__token = base64.b64encode(\n bytes('%s:%s' % (self.__user, self.__password), encoding='utf-8'))\n self.__headers = dict()\n self.__headers[\"Accept\"] = \"application/json\"\n self.__headers['authorization'] = 'Basic %s' % str(\n self.__token, encoding='utf-8')\n\n def request(self, method, obj=None, prefix='/'):\n try:\n if method == 'get':\n req = requests.request(method, '%s%s' % (self.__url, prefix), params=obj, headers=self.__headers,\n verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.json(), 'count': req.headers.get('X-Total-Count', None),\n 'next': req.headers.get('Link', None)}\n if method == 'delete':\n req = requests.request(method, '%s%s' % (\n self.__url, prefix), headers=self.__headers, verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.content}\n if method in ['put', 'post']:\n req = requests.request(method, '%s%s' % (self.__url, prefix), json=obj, headers=self.__headers,\n verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.content}\n if method == 'head':\n req = requests.request(method, '%s%s' % (\n self.__url, prefix), headers=self.__headers, verify=False)\n if req.status_code > 399:\n return {'ecode': req.status_code, 'message': f'{req.content}\\n{req.reason}'}\n res = {'ecode': req.status_code, 'data': req.content}\n except BaseException as e:\n raise e\n return res\n\n def systeminfo(self):\n res = self.request('get', prefix='/systeminfo')\n return res\n\n def get_users(self):\n res = self.request('get', prefix='/users')\n return res\n\n def get_projects(self, project_name=None, page=1, page_size=20):\n \"\"\"\n :project_name: The name of project\n :page: default is 1.\n :page_size: default is 10, maximum is 100.\n \"\"\"\n params = {'page': page, 'page_size': page_size}\n if project_name:\n params['name'] = project_name\n try:\n res = self.request('get', params, prefix='/projects')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def get_repositories(self, project_id, page=1, page_size=20, repo=None):\n params = {'project_id': project_id,\n 'page': page, 'page_size': page_size}\n if repo:\n params['q'] = repo\n try:\n res = self.request('get', params, '/repositories')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def get_tags(self, repo):\n try:\n res = self.request('get', prefix='/repositories/%s/tags' % repo)\n tags = [\n {'name': i['name'], 'created': i['created'], 'push_time': i.get(\n 'push_time', None), 'size': i['size']}\n for i in\n res['data']]\n tags.sort(key=lambda k: (k.get('created')), reverse=True)\n return {'ecode': 200, 'data': tags, 'count': len(tags)}\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def fetch_project(self, project_id):\n \"\"\"\n 获取项目信息\n \"\"\"\n try:\n res = self.request(\n 'get', {'project_id': project_id}, prefix=f'/projects/{project_id}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def fetch_tag(self, repo, tag):\n \"\"\"\n 获取指定镜像标签\n \"\"\"\n try:\n res = self.request(\n 'get', prefix=f'/repositories/{repo}/tags/{tag}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def create_project(self, project_name, public=True):\n \"\"\"\n 创建仓库项目\n \"\"\"\n try:\n data = {'project_name': project_name, 'metadata': {\n 'public': 'true' if public else 'false'}}\n res = self.request('post', obj=data, prefix='/projects')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def update_project(self, project_id, *args, **kwargs):\n \"\"\"\n 更新仓库项目\n \"\"\"\n try:\n res = self.request('put', obj=kwargs,\n prefix=f'/projects/{project_id}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def project_exists(self, project_name):\n \"\"\"\n 查询项目是否存在\n \"\"\"\n try:\n res = self.request(\n 'head', prefix=f'/projects?project_name={project_name}')\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def patch_tag(self, repo, src_image, tag_name):\n \"\"\"\n 镜像打标签\n \"\"\"\n try:\n try:\n # 创建仓库项目\n res = self.create_project(repo.split('/')[0])\n except BaseException as e:\n pass\n data = {'tag': tag_name, 'src_image': src_image, 'override': True}\n res = self.request(\n 'post', obj=data, prefix='/repositories/%s/tags' % repo)\n return res\n except BaseException as e:\n return {'ecode': 500, 'message': e}\n\n def delete_tag(self, repo, tag):\n \"\"\"\n 删除标签\n \"\"\"\n try:\n res = self.request(\n 'delete', prefix=f'/repositories/{repo}/tags/{tag}')\n return res\n except BaseException as e:\n logger.ex\n return {'ecode': 500, 'message': e}\n\n def search(self, query):\n \"\"\"\n 搜索\n \"\"\"\n try:\n res = self.request('get', {'q': query}, prefix='/search')\n return res\n except BaseException as e:\n logger.exception(e)\n return {'ecode': 500, 'message': e}" }, { "identifier": "GlueJenkins", "path": "common/utils/JenkinsAPI.py", "snippet": "class GlueJenkins(Jenkins):\n\n def __init__(self, url=None, username=None, password=None):\n self.__url = url\n self.__username = username\n self.__password = password\n super(GlueJenkins, self).__init__(\n self.__url, self.__username, self.__password)\n\n def _get_encoded_params(self, params):\n for k, v in params.items():\n if k in [\"name\", \"msg\", \"short_name\", \"from_short_name\",\n \"to_short_name\", \"folder_url\", \"from_folder_url\", \"to_folder_url\"]:\n params[k] = quote(v.encode('utf8'))\n return params\n\n def _build_url(self, format_spec, variables=None):\n\n if variables:\n url_path = format_spec % self._get_encoded_params(variables)\n else:\n url_path = format_spec\n return str(urljoin(self.server, url_path))\n\n def assert_credential_exists(self, name, folder_name=None, domain_name='_',\n exception_message='credential[%s] does not exist.'):\n '''Raise an exception if credential does not exist in domain of folder\n\n :param name: Name of credential, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :param exception_message: Message to use for the exception.\n Formatted with ``name``, ``domain_name``,\n and ``folder_name``\n :throws: :class:`JenkinsException` whenever the credentail\n does not exist in domain of folder\n '''\n if not self.credential_exists(name, folder_name, domain_name):\n raise JenkinsException(exception_message\n % name)\n\n def get_credential_global_config(self, name, domain_name='_'):\n '''Get configuration of credential in domain of folder.\n :param name: Name of credentail, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Credential configuration (XML format)\n '''\n return self.jenkins_open(requests.Request(\n 'GET', self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n ))\n\n def get_credential_info(self, name, folder_name=None, domain_name='_'):\n '''Get credential information dictionary in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: folder_name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: Dictionary of credential info, ``dict``\n '''\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(CREDENTIAL_INFO_GLOBAL, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('credential[%s] does not exist.' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('credential[%s] does not exist.' % name)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for credential[%s].' % name\n )\n\n def credential_exists(self, name, folder_name=None, domain_name='_'):\n '''Check whether a credentail exists in domain of folder\n\n :param name: Name of credentail, ``str``\n :param folder_name: Folder name, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n :returns: ``True`` if credentail exists, ``False`` otherwise\n '''\n try:\n return self.get_credential_info(name)['id'] == name\n except JenkinsException:\n return False\n\n def create_credential_global(self, name=None, user=None, password=None, secret=None, comment=None, domain_name='_'):\n '''Create credentail in domain of folder\n\n :param name: username\n :param password: password\n :param comment: comment, ``str``\n :param config_xml: New XML configuration, ``str``\n :param domain_name: Domain name, default is '_', ``str``\n '''\n st = shortuuid.ShortUUID()\n st.set_alphabet(\n f\"0123456789{''.join([chr(i) for i in range(ord('a'), ord('z') + 1)])}\")\n if name is None:\n name = '-'.join(['api', st.random(length=8),\n st.random(length=4), st.random(length=12)])\n config_xml = '''<com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <username>%s</username>\n <password>%s</password>\n</com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl>''' % (name, comment, user, password)\n if user is None:\n config_xml = '''<org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>\n <scope>GLOBAL</scope>\n <id>%s</id>\n <description>[%s] Created by DevOps Platform</description>\n <secret>%s</secret>\n</org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl>''' % (name, comment, secret)\n if self.credential_exists(name):\n raise JenkinsException('credential[%s] already exists.' % name)\n\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_CREDENTIAL_GLOBAL, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n self.assert_credential_exists(\n name, exception_message='create credential[%s] failed.')\n return {'status': 0, 'data': name}\n\n def reconfig_credential_global(self, name, user=None, password=None, secret=None, comment=None, domain_name='_'):\n \"\"\"\n Reconfig credential with new config in domain of folder\n :param name: name, ``str``\n :param user:\n :param password:\n :param secret:\n :param comment:\n :param domain_name: Domain name, default is '_', ``str``\n :return:\n \"\"\"\n reconfig_url = self._build_url(CONFIG_CREDENTIAL_GLOBAL, locals())\n config_xml = self.get_credential_global_config(name)\n xml_dict = xmltodict.parse(config_xml)\n if user is None:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['secret'] = secret\n if comment:\n xml_dict['org.jenkinsci.plugins.plaincredentials.impl.StringCredentialsImpl']['description'] = comment\n else:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['username'] = user\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl']['password'] = password\n if comment:\n xml_dict['com.cloudbees.plugins.credentials.impl.UsernamePasswordCredentialsImpl'][\n 'description'] = comment\n config_xml = xmltodict.unparse(xml_dict, pretty=True)\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def create_job(self, name, config_xml):\n '''Create a new Jenkins job\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: config file text, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n if self.job_exists(name):\n raise JenkinsException('job[%s] already exists' % (name))\n\n try:\n self.jenkins_open(requests.Request(\n 'POST', self._build_url(CREATE_JOB, locals()),\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n except NotFoundException:\n raise JenkinsException('Cannot create job[%s] because folder '\n 'for the job does not exist' % (name))\n self.assert_job_exists(name, 'create[%s] failed')\n\n def reconfig_job(self, name, config_xml):\n '''Change configuration of existing Jenkins job.\n\n To create a new job, see :meth:`Jenkins.create_job`.\n\n :param name: Name of Jenkins job, ``str``\n :param config_xml: New XML configuration, ``str``\n '''\n folder_url, short_name = self._get_job_folder(name)\n reconfig_url = self._build_url(CONFIG_JOB, locals())\n self.jenkins_open(requests.Request(\n 'POST', reconfig_url,\n data=config_xml.encode('utf-8'),\n headers=DEFAULT_HEADERS\n ))\n\n def get_stage_describe(self, name, number, node_number):\n \"\"\" 获取 单个stage 详情 \"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_DES, locals())\n ))\n\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_logs(self, name, number, node_number):\n \"\"\" 获取 stage 执行日志\"\"\"\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_LOG, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_stage_info(self, name, number, depth=0):\n\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(STAGE_INFO, locals())\n ))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (name, number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (name, number)\n )\n\n def get_flow_detail(self, job_name, build_number):\n stage_data = self.get_stage_info(name=job_name, number=build_number)\n stages = stage_data.get('stages')\n for i in stages:\n logs = ''\n try:\n # 获取stage返回信息\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(i['_links']['self']['href']), locals())\n ))\n if response:\n res = json.loads(response)\n for j in res['stageFlowNodes']:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(\n unquote(j['_links']['log']['href']), locals())\n ))\n res = json.loads(response)\n try:\n # 移除href html信息,保留链接文字\n import re\n pat = re.compile('<a href[^>]*>')\n logs = logs + '\\n' + \\\n pat.sub('', res['text'].replace('</a>', ''))\n except:\n pass\n else:\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] number[%d] does not exist'\n % (job_name, build_number))\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for job[%s] number[%d]'\n % (job_name, build_number)\n )\n\n stage_data[\"stages\"][stages.index(i)]['logs'] = logs\n return stage_data\n\n def get_queue_item(self, number, depth=0):\n '''Get information about a queued item (to-be-created job).\n\n The returned dict will have a \"why\" key if the queued item is still\n waiting for an executor.\n\n The returned dict will have an \"executable\" key if the queued item is\n running on an executor, or has completed running. Use this to\n determine the job number / URL.\n\n :param name: queue number, ``int``\n :returns: dictionary of queued information, ``dict``\n '''\n url = self._build_url(Q_ITEM, locals())\n try:\n response = self.jenkins_open(requests.Request('GET', url))\n if response:\n return json.loads(response)\n else:\n raise JenkinsException('queue number[%d] does not exist'\n % number)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('queue number[%d] does not exist' % number)\n except ValueError:\n raise JenkinsException(\n 'Could not parse JSON info for queue number[%d]' % number\n )\n\n def build_job(self, name, parameters=None, token=None):\n '''Trigger build job.\n\n This method returns a queue item number that you can pass to\n :meth:`Jenkins.get_queue_item`. Note that this queue number is only\n valid for about five minutes after the job completes, so you should\n get/poll the queue information as soon as possible to determine the\n job's URL.\n\n :param name: name of job\n :param parameters: parameters for job, or ``None``, ``dict``\n :param token: Jenkins API token\n :returns: ``int`` queue item\n '''\n response = self.jenkins_request(requests.Request(\n 'POST', self.build_job_url(name, parameters, token)))\n\n if 'Location' not in response.headers:\n raise EmptyResponseException(\n \"Header 'Location' not found in \"\n \"response from server[%s]\" % self.server)\n\n location = response.headers['Location']\n if location.endswith('/'):\n location = location[:-1]\n parts = location.split('/')\n number = int(parts[-1])\n return number\n\n def get_job_config(self, name):\n '''Get configuration of existing Jenkins job.\n\n :param name: Name of Jenkins job, ``str``\n :returns: job configuration (XML format)\n '''\n folder_url, short_name = self._get_job_folder(name)\n request = requests.Request(\n 'GET', self._build_url(CONFIG_JOB, locals()))\n return self.jenkins_open(request)\n\n def get_job_info(self, name, depth=0, fetch_all_builds=False):\n '''Get job information dictionary.\n\n :param name: Job name, ``str``\n :param depth: JSON depth, ``int``\n :param fetch_all_builds: If true, all builds will be retrieved\n from Jenkins. Otherwise, Jenkins will\n only return the most recent 100\n builds. This comes at the expense of\n an additional API call which may\n return significant amounts of\n data. ``bool``\n :returns: dictionary of job information\n '''\n folder_url, short_name = self._get_job_folder(name)\n try:\n response = self.jenkins_open(requests.Request(\n 'GET', self._build_url(JOB_INFO, locals())\n ))\n if response:\n if fetch_all_builds:\n return self._add_missing_builds(json.loads(response))\n else:\n return json.loads(response)\n else:\n raise JenkinsException('job[%s] does not exist' % name)\n except (req_exc.HTTPError, NotFoundException):\n raise JenkinsException('job[%s] does not exist' % name)\n except ValueError:\n raise JenkinsException(\n \"Could not parse JSON info for job[%s]\" % name)" }, { "identifier": "convert_xml_to_str_with_pipeline", "path": "common/custom_format.py", "snippet": "def convert_xml_to_str_with_pipeline(xml, url, secret, desc, jenkinsfile, scm=True):\n \"\"\"\n scm\n True: jenkinsfile为指定的git地址\n False: jenkinsfile为具体的pipeline\n \"\"\"\n xml_dict = xmltodict.parse(xml)\n if scm:\n xml_dict['flow-definition']['definition']['@class'] = 'org.jenkinsci.plugins.workflow.cps.CpsScmFlowDefinition'\n xml_dict['flow-definition']['definition']['scm']['userRemoteConfigs']['hudson.plugins.git.UserRemoteConfig'][\n 'url'] = url\n xml_dict['flow-definition']['definition']['scm']['userRemoteConfigs']['hudson.plugins.git.UserRemoteConfig'][\n 'credentialsId'] = secret\n xml_dict['flow-definition']['definition']['scriptPath'] = jenkinsfile\n else:\n xml_dict['flow-definition']['definition']['@class'] = 'org.jenkinsci.plugins.workflow.cps.CpsFlowDefinition'\n xml_dict['flow-definition']['definition']['script'] = jenkinsfile\n xml_dict['flow-definition']['definition']['sandbox'] = 'true'\n xml_dict['flow-definition']['description'] = desc\n result = xmltodict.unparse(\n xml_dict, short_empty_elements=True, pretty=True)\n return result" }, { "identifier": "DASHBOARD_TIME_FORMAT", "path": "common/variables.py", "snippet": "DASHBOARD_TIME_FORMAT = {'year_only': '%Y', 'years': '%Y-%m', 'months': '%Y-%m-%d', 'days': '%Y-%m-%d %H:00:00',\n 'hours': '%Y-%m-%d %H:%M:00', 'minutes': '%Y-%m-%d %H:%M:%S'}" }, { "identifier": "DASHBOARD_TIME_FORMAT_T", "path": "common/variables.py", "snippet": "DASHBOARD_TIME_FORMAT_T = {'years': '%Y', 'months': '%Y-%m', 'days': '%Y-%m-%d', 'hours': \"%Y-%m-%d %H:00:00\",\n 'minutes': \"%Y-%m-%d %H:%M:00\", 'seconds': \"%Y-%m-%d %H:%M:%S\"}" }, { "identifier": "DASHBOARD_TIME_FREQNAMES", "path": "common/variables.py", "snippet": "DASHBOARD_TIME_FREQNAMES = {'year_only': YEARLY, 'years': MONTHLY, 'months': DAILY, 'days': HOURLY, 'hours': MINUTELY,\n 'minutes': SECONDLY}" }, { "identifier": "DASHBOARD_TIME_FREQNAMES_T", "path": "common/variables.py", "snippet": "DASHBOARD_TIME_FREQNAMES_T = {'years': YEARLY, 'months': MONTHLY, 'days': DAILY, 'hours': HOURLY, 'minutes': MINUTELY,\n 'seconds': SECONDLY}" }, { "identifier": "SENSITIVE_KEYS", "path": "common/variables.py", "snippet": "SENSITIVE_KEYS = ['password', 'token', 'access',\n 'refresh', 'AUTHORIZATION', 'COOKIE']" }, { "identifier": "JENKINS_CALLBACK_KEY", "path": "common/variables.py", "snippet": "JENKINS_CALLBACK_KEY = 'jenkins_callback_flag::'" }, { "identifier": "JENKINS_STATUS_MAP", "path": "common/variables.py", "snippet": "JENKINS_STATUS_MAP = {'IN_PROGRESS': 3, 'SUCCESS': 1, 'FAILED': 2, 'ABORTED': 4, 'FAILURE': 2, 'NOT_EXECUTED': 5,\n 'NOT_EXEC_TIMEOUT': 5}" }, { "identifier": "DEV_LANGUAGE_KEY", "path": "common/variables.py", "snippet": "DEV_LANGUAGE_KEY = 'devlanguage:'" }, { "identifier": "AppInfo", "path": "dbapp/models.py", "snippet": "" }, { "identifier": "K8sAPI", "path": "common/utils/K8sAPI.py", "snippet": "class K8sAPI(object):\n def __init__(self, host=None, username=None, password=None, api_key=None, api_key_prefix='Bearer', verify_ssl=False,\n k8s_config=None,\n config_file=None, eks=None):\n \"\"\"\n elk: aws kubernetes\n \"\"\"\n self.__host = host\n self.__username = username\n self.__password = password\n self.__api_key = api_key\n self.__api_key_prefix = api_key_prefix\n self.__verify_ssl = verify_ssl\n if k8s_config is not None:\n config.kube_config.load_kube_config_from_dict(k8s_config)\n self.__client0 = client.CoreApi()\n self.__client = client.CoreV1Api()\n elif config_file is not None:\n config.kube_config.load_kube_config(config_file=config_file)\n self.__client0 = client.CoreApi()\n self.__client = client.CoreV1Api()\n elif self.__host:\n if self.__username and self.__password:\n self.__client = self.get_api()\n else:\n raise Exception('Please input username/password or api_key')\n else:\n raise Exception('Cannot find k8s config')\n self.client = self.__client\n\n def get_token(self):\n pass\n\n def get_api(self):\n configuration = client.Configuration()\n configuration.host = self.__host\n if self.__verify_ssl is False:\n configuration.verify_ssl = False\n configuration.username = self.__username\n configuration.password = self.__password\n basic_auth_token = configuration.get_basic_auth_token()\n api = core_v1_api.CoreV1Api(api_client.ApiClient(configuration=configuration, header_name=\"authorization\",\n header_value=basic_auth_token))\n return api\n\n def get_client(self):\n return self.__client\n\n def set_client(self, obj):\n self.__client = getattr(client, obj)()\n\n def get_apis(self):\n print(\"Supported APIs (* is preferred version):\")\n self.__client2 = client.ApisApi(self.__client0.api_client)\n for api in self.__client2.get_api_versions().groups:\n versions = []\n for v in api.versions:\n name = \"\"\n if v.version == api.preferred_version.version and len(\n api.versions) > 1:\n name += \"*\"\n name += v.version\n versions.append(name)\n\n def get_nodes(self, **kwargs):\n ret = self.__client.list_node(**kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def get_node_info(self, name):\n ret = self.__client.read_node_status(name)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def get_namespaces(self, **kwargs):\n ret = self.__client.list_namespace(**kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def create_namespace(self, name):\n payload = {\n \"apiVersion\": \"v1\",\n \"kind\": \"Namespace\",\n \"metadata\": {\n \"name\": name,\n }\n }\n ret = self.__client.create_namespace(body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n print(rs)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def get_services(self, namespace='default', **kwargs):\n ret = self.__client.list_namespaced_service(namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def fetch_service(self, name, namespace='default', api_version='apps/v1'):\n try:\n ret = self.__client.read_namespaced_service(name, namespace)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n print('reason', e.reason)\n return {'ecode': e.status, 'message': e.body}\n except BaseException as e:\n print('reason', e.reason)\n return {'ecode': e.status, 'message': e.body}\n\n def create_namespace_service(self, name, app=None, targets=list, namespace='default', service_type='NodePort',\n svc_yaml=None):\n \"\"\"\n 目前只支持NodePort类型,对外服务端口随机生成(如手动生成,需配置node_port和endpoints)\n :param name: service name\n :param app: app name\n :param targets: [{port, target_port, protocol, node_port}]\n :param namespace:\n :param service_type:\n :return:\n \"\"\"\n ports = []\n if svc_yaml:\n if isinstance(svc_yaml, str):\n body = yaml.safe_load(svc_yaml)\n else:\n body = svc_yaml\n else:\n for index, target in enumerate(targets):\n port_body = {'name': f\"{name}-{index}\", 'port': target['port'], 'target_port': target['port'],\n 'protocol': target['protocol']}\n if target['node_port'] > 30000:\n port_body['node_port'] = target['node_port']\n ports.append(client.V1ServicePort(**port_body))\n body = client.V1Service(\n api_version=\"v1\",\n kind=\"Service\",\n metadata=client.V1ObjectMeta(\n name=name\n ),\n spec=client.V1ServiceSpec(\n selector={\"app\": app},\n type=service_type,\n ports=ports\n )\n )\n try:\n ret = self.__client.create_namespaced_service(namespace=namespace, body=body,\n **{'_return_http_data_only': False})\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n logger.error('reason', e)\n return {'error': True, 'message': str(e)}\n except ApiException as e:\n if e.status == 409:\n logger.error('reason', e.reason)\n return {'error': True, 'ecode': e.status, 'message': e.body}\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def update_namespace_service(self, name, app=None, targets=Type[list], namespace='default', service_type='NodePort',\n svc_yaml=None):\n ports = []\n if svc_yaml:\n if isinstance(svc_yaml, str):\n body = yaml.safe_load(svc_yaml)\n else:\n body = svc_yaml\n logger.debug(f'svc_yaml body == {body}')\n func = self.__client.replace_namespaced_service\n else:\n for index, target in enumerate(targets):\n port_body = {'name': target['name'], 'port': target['port'], 'target_port': target['port'],\n 'protocol': target['protocol']}\n if target['node_port'] > 30000:\n port_body['node_port'] = target['node_port']\n ports.append(client.V1ServicePort(**port_body))\n body = client.V1Service(\n api_version=\"v1\",\n kind=\"Service\",\n metadata=client.V1ObjectMeta(\n name=name\n ),\n spec=client.V1ServiceSpec(\n selector={\"app\": name},\n type=service_type,\n ports=ports\n )\n )\n func = self.__client.patch_namespaced_service\n try:\n ret = func(\n name, namespace, body=body,\n **{'_return_http_data_only': False}\n )\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n logger.error(f'ApiClient sanitize_for_serialization 异常: {e}', )\n return {'error': True, 'message': str(e)}\n except ApiException as e:\n if e.status == 409:\n logger.error(f'ApiException 异常 409 资源冲突: {e} {e.reason}', )\n return {'error': True, 'ecode': e.status, 'message': e.body}\n except BaseException as e:\n logger.error(f'patch_namespaced_service 异常: {e}', )\n return {'error': True, 'message': str(e)}\n\n def delete_namespace_service(self, name, namespace='default', api_version='apps/v1'):\n try:\n ret = self.__client.delete_namespaced_service(name, namespace)\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def get_configmaps(self, namespace='default', **kwargs):\n ret = self.__client.list_namespaced_config_map(namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def get_configmap(self, name, namespace='default', **kwargs):\n \"\"\"\n get configmap content\n \"\"\"\n try:\n ret = self.__client.read_namespaced_config_map(\n name, namespace, **kwargs)\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def create_namespace_configmap(self, svc_yaml, namespace='default', **kwargs):\n if isinstance(svc_yaml, str):\n body = yaml.safe_load(svc_yaml)\n else:\n body = svc_yaml\n try:\n ret = self.__client.create_namespaced_config_map(\n namespace, body, **kwargs)\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def update_namespace_configmap(self, name, svc_yaml, namespace='default', **kwargs):\n if isinstance(svc_yaml, str):\n body = yaml.safe_load(svc_yaml)\n else:\n body = svc_yaml\n try:\n ret = self.__client.patch_namespaced_config_map(\n name, namespace, body, **kwargs)\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def delete_namespace_configmap(self, name, namespace='default', api_version='apps/v1'):\n try:\n ret = self.__client.delete_namespaced_config_map(name, namespace)\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def get_namespace_deployment(self, namespace='default', api_version='apps/v1', **kwargs):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n ret = self.__client2.list_namespaced_deployment(namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def create_namespace_deployment(self, name, image=None, port=list, replicas=1, deploy_yaml=None,\n pod_type='Deployment', namespace='default'):\n \"\"\"\n\n :param name:\n :param image:\n :param port: [{containerPort: 8080, protocol: 'TCP'}]\n :param replicas:\n :param pod_type:\n :param namespace:\n :return:\n \"\"\"\n payload = {'kind': pod_type, 'spec': {'replicas': replicas, 'template': {\n 'spec': {'containers': [{'image': image, 'name': name, 'ports': port}]},\n 'metadata': {'labels': {'app': name}}},\n 'selector': {'matchLabels': {'app': name}}},\n 'apiVersion': 'apps/v1beta2',\n 'metadata': {'labels': {'app': name}, 'namespace': namespace,\n 'name': name}}\n if deploy_yaml is not None:\n payload = yaml.safe_load(deploy_yaml)\n payload['metadata'].pop('resourceVersion', None)\n self.__client2 = operator.methodcaller(\n ''.join([i.capitalize() for i in payload.get(\n 'apiVersion', 'apps/v1beta2').split('/')]) + 'Api',\n self.__client.api_client)(client)\n try:\n ret = self.__client2.create_namespaced_deployment(\n namespace=namespace, body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def delete_namespace_deployment(self, name, namespace='default', api_version='apps/v1'):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n ret = self.__client2.delete_namespaced_deployment(name, namespace,\n body=client.V1DeleteOptions(grace_period_seconds=0,\n propagation_policy='Foreground'))\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def update_deployment(self, name, replicas=None, image=None, envs=None, deploy_yaml=None, namespace='default',\n api_version='apps/v1', force=False):\n \"\"\"\n force: 强制更新\n \"\"\"\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n payload = {'spec': {'replicas': replicas, 'template': {}}}\n if replicas is None and image is None and deploy_yaml is None:\n return {'err': '缺少参数'}\n if replicas is not None:\n payload['spec']['replicas'] = replicas\n if image is not None:\n payload['spec']['template'] = {\n 'spec': {'containers': [{'image': image, 'name': name}]}}\n\n if envs is not None:\n payload['spec']['template'] = {\n 'spec': {'containers': [{'env': envs}]}}\n\n if deploy_yaml is not None:\n payload = yaml.safe_load(deploy_yaml)\n payload['metadata'].pop('resourceVersion', None)\n try:\n if force:\n ret = self.__client2.replace_namespaced_deployment(\n name, namespace, body=payload)\n else:\n ret = self.__client2.patch_namespaced_deployment(\n name, namespace, body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def update_deployment_replica(self, name, replicas, namespace='default', api_version='apps/v1'):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n payload = {'spec': {'replicas': replicas}}\n ret = self.__client2.patch_namespaced_deployment_scale(\n name, namespace, body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def update_deployment_image(self, name, image, namespace='default', api_version='apps/v1'):\n deploy = self.fetch_deployment(name, namespace)\n if deploy.get('ecode', 200) > 399:\n return deploy\n payload = {'spec': deploy['message']['spec']}\n payload['spec']['template']['spec']['containers'][0]['image'] = image\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n try:\n ret = self.__client2.patch_namespaced_deployment(name, namespace, body=payload,\n **{'_return_http_data_only': False})\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def update_deployment_resource(self, name, envs, image_policy, namespace='default', api_version='apps/v1',\n **kwargs):\n payload = {'spec': {'template': {'spec': {'containers': [\n {'name': name, 'env': envs, 'imagePullPolicy': image_policy, 'resources': kwargs['resources']}]}}}}\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n ret = self.__client2.patch_namespaced_deployment(\n name, namespace, body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def restart_deployment(self, name, namespace='default', api_version='apps/v1'):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n payload = {\n 'spec': {\n 'template': {\n 'spec': {\n 'containers': [\n {\n 'name': name,\n 'env': [\n {\n 'name': 'RESTART_',\n 'value': datetime.now().strftime('%Y%m%d%H%M%S')\n }\n ]\n }\n ]\n }\n }\n }\n }\n\n ret = self.__client2.patch_namespaced_deployment(\n name, namespace, body=payload)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'err': str(e)}\n\n def fetch_deployment(self, name, namespace='default', api_version='apps/v1'):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n try:\n ret = self.__client2.read_namespaced_deployment(name, namespace)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def get_replica(self, namespace='default', api_version='apps/v1', **kwargs):\n self.__client2 = operator.methodcaller(''.join([i.capitalize() for i in api_version.split('/')]) + 'Api',\n self.__client.api_client)(client)\n try:\n ret = self.__client2.list_namespaced_replica_set(\n namespace=namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def get_pods(self, namespace=None, **kwargs):\n if namespace is None:\n return {}\n try:\n ret = self.__client.list_namespaced_pod(namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n except ApiException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def fetch_pod(self, name, namespace='default'):\n try:\n ret = self.__client.read_namespaced_pod(\n name=name, namespace=namespace)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return {'ecode': 200, 'message': rs}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n except BaseException as e:\n return {'ecode': e.status, 'message': e.body}\n\n def get_secrets(self, namespace='default', **kwargs):\n ret = self.__client.list_namespaced_secret(namespace, **kwargs)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def get_secret(self, name, namespace='default', **kwargs):\n \"\"\"\n get secret content\n \"\"\"\n ret = self.__client.read_namespaced_secret(name, namespace, **kwargs)\n try:\n ret = self.__client.read_namespaced_secret(\n name, namespace, **kwargs)\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'error': True, 'message': str(e)}\n\n def manage_secret(self, name, namespace='default', api_version='v1', **kwargs):\n payload = kwargs.pop('payload', {})\n body = kubernetes.client.V1Secret(api_version=api_version, **payload)\n ret = {}\n try:\n ret = self.__client.replace_namespaced_secret(\n name, namespace, body, **kwargs)\n except ApiException as e:\n if e.status == 404:\n ret = self.__client.create_namespaced_secret(namespace, body)\n try:\n rs = ApiClient().sanitize_for_serialization(ret)\n return rs\n except BaseException as e:\n return {'error': True, 'message': str(e)}" } ]
from gitlab.exceptions import GitlabGetError from functools import reduce from common.utils.ElasticSearchAPI import generate_docu, Search from common.utils.GitLabAPI import GitLabAPI from common.utils.HarborAPI import HarborAPI from common.utils.JenkinsAPI import GlueJenkins from common.custom_format import convert_xml_to_str_with_pipeline from common.variables import DASHBOARD_TIME_FORMAT, DASHBOARD_TIME_FORMAT_T, DASHBOARD_TIME_FREQNAMES, \ DASHBOARD_TIME_FREQNAMES_T, SENSITIVE_KEYS, JENKINS_CALLBACK_KEY, \ JENKINS_STATUS_MAP, DEV_LANGUAGE_KEY from dbapp.models import AppInfo, Product, KubernetesCluster, KubernetesDeploy, MicroApp, Project, ProjectConfig, DevLanguage, BuildJob, UserProfile, SystemConfig, Role, Permission, Menu, DataDict from django.conf import settings from django.core.cache import cache from django.utils import timezone from django.db.models import Q from social_django.utils import load_strategy from rest_framework.utils.serializer_helpers import ReturnDict from config import SOCIAL_AUTH_GITLAB_API_URL, GITLAB_ADMIN_TOKEN from common.utils.K8sAPI import K8sAPI from urllib.parse import urlparse, quote_plus from dateutil.relativedelta import relativedelta from dateutil.rrule import rrule from ruamel import yaml from datetime import datetime, timedelta from celery import current_app import copy import operator import re import time import pytz import os import json import requests import math import shortuuid import logging
17,589
'branch': 'master', 'content': content, 'author_email': user.email, 'author_name': user.username, 'commit_message': f'Create {instance.name} {filename} by {user.username}'}) content = project.files.raw( f"{instance.name}/{filename}", ref='master') return True, content except GitlabGetError as e: logger.info(f'获取异常,{e}') if e.response_code == 404: logger.info(f'{instance.name}/{filename}文件不存在') return True, '' except BaseException as e: logger.error(f'GitLab开发语言模板异常, 原因: {e}') return False, f'GitLab开发语言模板异常, 原因: {e}' def snake_case(x): """ 驼峰转下划线 """ term_exclude = ['OS', 'GPU', 'DB', 'IA', 'IP', 'RR', 'TTL', 'SLB', 'CPU', 'MEMORY', 'QPS'] for i in term_exclude: x = x.replace(i, i.lower()) return re.sub(r'(?P<key>[A-Z])', r'_\g<key>', x).lower().strip('_') def node_filter(node_id, data): """ 查找节点 :params: node_id int 节点ID :params: data list 节点数组 """ for i in data: if i['id'] == node_id: print('get node', i) return i else: if i.get('children', None): node = node_filter(node_id, i['children']) if isinstance(node, (dict,)): return node def get_time_range(request): """ 获取时间轴 """ type_range = request.query_params.get('range_type', 'static') if type_range == 'static': time_range = request.query_params.get('range', '6-months') else: time_range = request.query_params.getlist('range[]', None) if not time_range: time_range = '6-months' period = time_period(time_range, type_range) time_line = timeline_generate(period, format_type='cmdb') # 时间刻度, 以小时为刻度则删除年份 time_line_x = [i.split(' ')[-1] for i in time_line] if period['name'] == 'hours' else time_line return period, time_line, time_line_x def compare_dict(data, old_data): different_list = [] for k1 in data: if k1 == 'update_time': continue v1 = data.get(k1) v2 = old_data.get(k1) if v1 != v2: different_list.append({ 'key': k1, 'new_value': v1, 'old_value': v2 }) return different_list def get_project_mergerequest(project: Project, cli: GitLabAPI, **params): """ 获取项目下所有应用的合并请求 """ mrdata = [] git_project = [app.repo['id'] for app in project.microapp_set.all() if app.repo.get('id')] for project_id in set(git_project): try: git_project = cli.get_project(project_id) ok, data = cli.list_mrs(project=git_project, **params) if ok is False: continue mrdata.extend([i.attributes for i in data]) except BaseException as e: logger.error(f'获取应用合并请求异常,原因:{e}') return mrdata def gitlab_cli(user=None, admin=False, superadmin=False, merge=False): """ 获取GitlabAPI :param merge: 用于分支合并,管理员统一用配置文件里的token """ try: payload = {'token': GITLAB_ADMIN_TOKEN, 'oauth': False} cli = GitLabAPI(SOCIAL_AUTH_GITLAB_API_URL, **payload) return True, cli except BaseException as e: logger.warning(f'获取GitlabAPI异常,原因:{e}') return False, f'获取GitlabAPI异常,原因:{e}' def get_deploy_image_list(app_id, appinfo_id=None, module=None, force=0): # 可选发布镜像 # 获取关联应用ID
#!/usr/bin/env python # -*- coding: utf-8 -*- """ @Author : Charles Lai @Contact : [email protected] @Time : 2020/12/21 上午10:00 @FileName: ext_fun.py @Blog :https://imaojia.com """ logger = logging.getLogger('drf') class ThirdPartyUser(object): def get_user(self): user = UserProfile.objects.get_or_create(username='thirdparty')[0] self.set_permission(user, self.get_role()) return user def get_role(self): return Role.objects.get_or_create(name='thirdparty')[0] def get_perm(self): return Permission.objects.get_or_create(name='Jenkins回调', method='jenkins_callback')[0] def set_permission(self, user, role): role.permissions.set([self.get_perm().id]) user.roles.set([role.id]) def set_redis_data(name, config): cache.set(f"system:{name}", config, None) def get_redis_data(name): ret = cache.get(f"system:{name}") if not ret: try: if name == 'cicd-harbor': qs = SystemConfig.objects.filter(type=name)[0] else: qs = SystemConfig.objects.get(name=name) except BaseException as e: return None ret = json.loads(qs.config) set_redis_data(name, ret) return ret def get_datadict(name, config=0, default_value=None): """ 从数据字典获取数据 """ try: qs = DataDict.objects.get(key=name) except BaseException as e: return default_value if config: ret = json.loads(qs.extra) else: ret = {'id': qs.id, 'key': qs.key, 'value': qs.value, 'desc': qs.desc} return ret def check_pods(cluster_id, k8s_config, namespace, **kwargs): k8s = KubernetesCluster.objects.get(id=cluster_id) cli = k8s_cli(k8s, k8s_config) if not cli: return False count = 3 while count: ret2 = cli.get_pods(namespace, **kwargs) count -= 1 if len(ret2['items']) > 0: return True else: check_pods(k8s_config, namespace, **kwargs) return False def template_svc_generate(appinfo_obj): """ 生成Kubernetes Svc Yaml ### 格式: { "apiVersion": "v1", "kind": "Service", "metadata": { "name": "appname", "namespace": "env-product", "labels": { "app": "appname" } }, "spec": { "ports": [{ "port": 8080, "targetPort": 8080, "protocol": "TCP", "name": "http" }], "selector": { "app": "appname" } } } """ svc_temp = DataDict.objects.filter(key='yaml.svc') if svc_temp.exists(): svc_temp = json.loads(svc_temp.first().extra) if appinfo_obj.environment.name in svc_temp: svc_temp = svc_temp[appinfo_obj.environment.name] namespace = appinfo_obj.namespace svc_temp['metadata']['name'] = appinfo_obj.app.name svc_temp['metadata']['namespace'] = namespace svc_temp['metadata']['labels'] = {'app': appinfo_obj.app.name} labels = [] labels.extend([{'name': 'app', 'value': appinfo_obj.app.name}]) svc_temp['spec']['selector'] = { i['name']: i['value'] for i in labels} return True, svc_temp return False, None def harbor_cli(namespace, **filters): try: harbor = SystemConfig.objects.filter(**filters).first() # 获取harbor配置 harbor_config = json.loads(harbor.config) except BaseException as e: logger.exception(f'创建任务失败, 原因: 获取harbor仓库异常, {e}') return False, f"获取harbor仓库异常:{e}" # 构建前创建harbor项目 cli = HarborAPI(url=harbor_config['url'], username=harbor_config['user'], password=harbor_config['password']) try: cli.create_project( namespace, public=harbor_config.get('public', False)) except BaseException as e: pass return True, harbor_config def k8s_cli(k8s, k8s_config): try: if k8s_config['type'] == 'basic': # basic auth or token auth k8s_config.pop('config', None) k8s_config.pop('type', None) cli = K8sAPI(**k8s_config) else: eks = None eks_token = None k8s_config = yaml.safe_load(k8s_config['config']) if k8s.idc.type == 1 and k8s.idc.supplier.split('.')[-1] == 'aws': return False, 'not support.' cli = K8sAPI(k8s_config=k8s_config, api_key=eks_token, eks=eks) return True, cli except BaseException as e: return False, str(e) def template_generate(appinfo_obj: AppInfo, image=None, partial_deploy_replicas: int = 0): """ 生成Kubernetes Deployment Yaml """ def health_lifecycle_generate(item, enable=True): _c = {} for i in template[item]['data']: _x = {} if i.get('enable', enable): for j in i['items']: if '__' in j['name']: _t = j['name'].split('__') _value = j['value'] if j['name'] == 'exec__command': _value = ["sh", "-c", j['value']] if _x.get(_t[0], None): _x[_t[0]][_t[1]] = _value else: _x[_t[0]] = {_t[1]: _value} else: _x[j['name']] = j['value'] _c[i['name']] = _x return _c def container_generate(container_data): containers = [] for i in container_data: if i.get('enable', None): container = get_datadict(i['key'], config=1) if not container: container = i['extra'] containers.append( container) return containers language_obj = DevLanguage.objects.get(name=appinfo_obj.app.language) project_config = ProjectConfig.objects.filter(project_id=appinfo_obj.app.project.id, environment_id=appinfo_obj.environment.id) namespace = appinfo_obj.namespace harbor_config = get_redis_data('cicd-harbor') harbor_url = harbor_config['url'].split('://')[1] image = f"{harbor_url}/{image}" template = {} # 模板优先级 # 应用模块 -> 应用 -> 项目 -> 环境 if project_config.first(): project_template = project_config.first().template for k, v in project_template.items(): if v and isinstance(v, (dict,)): if v.get('custom', False) is False: if appinfo_obj.environment.template.get(k, None): template[k] = appinfo_obj.environment.template[k] else: if project_template.get(k, None): template[k] = project_template[k] microapp_template = appinfo_obj.app.template for k, v in microapp_template.items(): if '_on' in k and v: _k = k.rstrip('_on') if microapp_template.get(_k, None): template[_k] = microapp_template[_k] use_host_network = False if appinfo_obj.template.get('userHostNetwork', 0): use_host_network = True for k, v in appinfo_obj.template.items(): if v and isinstance(v, (dict,)): if v.get('custom', False) and appinfo_obj.template.get(k, None): template[k] = appinfo_obj.template[k] yaml_template = {'kind': 'Deployment', 'metadata': {}, 'spec': {'strategy': {}, 'template': {'metadata': {}, 'spec': {'containers': [{'ports': [{'containerPort': 8080}], 'resources': []}], 'imagePullSecrets': [{'name': 'loginharbor'}], 'terminationGracePeriodSeconds': 120} } } } try: tz = appinfo_obj.app.project.product.region.extra['timezone'] except BaseException as e: tz = 'Asia/Shanghai' try: if template.get('strategy', None): for i in template['strategy']['data']: if i['key'] in ['maxSurge', 'maxUnavailable']: if yaml_template['spec']['strategy'].get('rollingUpdate', None) is None: yaml_template['spec']['strategy']['rollingUpdate'] = {} yaml_template['spec']['strategy']['rollingUpdate'][i['key'] ] = f"{i['value']}%" else: yaml_template['spec'][i['key']] = i['value'] _d = {} for i in template['resources']['data']: _t = i['key'].split('_') if _d.get(_t[0], None): _d[_t[0]][_t[1]] = f"{i['value']}{i['slot']}" else: _d[_t[0]] = {_t[1]: f"{i['value']}{i['slot']}"} yaml_template['spec']['template']['spec']['containers'][0]['resources'] = _d yaml_template['metadata']['name'] = appinfo_obj.app.name yaml_template['metadata']['namespace'] = namespace yaml_template['spec']['template']['spec']['containers'][0]['name'] = appinfo_obj.app.name yaml_template['spec']['template']['spec']['containers'][0]['image'] = image command = appinfo_obj.app.template.get( 'command', None) or language_obj.labels.get('command', None) if command: if command.startswith('./'): yaml_template['spec']['template']['spec']['containers'][0]['command'] = [ command] else: yaml_template['spec']['template']['spec']['containers'][0]['command'] = [ 'sh', '-c', command] # 优先级: 应用模块>应用>预设>开发语言 labels = template['label']['data'] labels.extend([{'name': 'app', 'value': appinfo_obj.app.name}]) yaml_template['spec']['template']['metadata']['labels'] = { i['name']: i['value'] for i in labels} yaml_template['spec']['template']['metadata']['labels'][ 'status-app-name-for-ops-platform'] = appinfo_obj.app.name yaml_template['spec']['selector'] = { 'matchLabels': {i['name']: i['value'] for i in labels}} selectors = template['selector']['data'] yaml_template['spec']['template']['spec']['nodeSelector'] = { i['name']: i['value'] for i in selectors} if 'annotations' not in yaml_template['spec']['template']['metadata']: yaml_template['spec']['template']['metadata']['annotations'] = {} for i in template['prometheus']['data']: yaml_template['spec']['template']['metadata'][ 'annotations'][f'prometheus.io/{i["name"]}'] = i['value'] if 'prometheus.io/path' in yaml_template['spec']['template']['metadata']['annotations']: yaml_template['spec']['template']['metadata']['annotations'][ 'prometheus.io/app_product'] = appinfo_obj.app.project.product.name yaml_template['spec']['template']['metadata']['annotations'][ 'prometheus.io/app_env'] = appinfo_obj.environment.name yaml_template['spec']['template']['metadata']['annotations'][ 'prometheus.io/app_project'] = appinfo_obj.app.project.name # 环境变量 envs = [{'name': 'TZ', 'value': tz}] envs.extend(template['env']['data']) envs.extend([ {'name': '_RESTART', 'value': datetime.now().strftime( '%Y%m%d%H%M%S')}, # _RESTART变量用于强制更新deployment {'name': 'PRODUCT_NAME', 'value': appinfo_obj.app.project.product.name}, {'name': 'PROJECT_NAME', 'value': appinfo_obj.app.project.name}, {'name': 'APPNAME', 'value': appinfo_obj.app.name}, {'name': 'APPID', 'value': appinfo_obj.app.appid}, {'name': 'ENV', 'value': appinfo_obj.environment.name}, {'name': 'POD_NAMESPACE', 'value': namespace} ]) envs = list({i['name']: i for i in envs}.values()) for i in envs: try: env_value = i.get('value', None) cmname = i.pop('cmname', None) cmkey = i.pop('cmkey', None) if env_value: env_value = env_value.lstrip('"').rstrip( '"').lstrip("'").rstrip("'") i.pop('value', None) i['name'] = i['name'].lstrip('"').rstrip( '"').lstrip("'").rstrip("'") if i.get('valueFrom', None) == 'configMapKeyRef': i['valueFrom'] = {'configMapKeyRef': { 'name': cmname, 'key': cmkey}} else: i['value'] = env_value i['valueFrom'] = None except BaseException as e: pass yaml_template['spec']['template']['spec']['containers'][0]['env'] = envs if template.get('health', False): _d = health_lifecycle_generate('health', True) for k, v in _d.items(): yaml_template['spec']['template']['spec']['containers'][0][k] = v if template.get('lifecycle', False): yaml_template['spec']['template']['spec']['containers'][0]['lifecycle'] = { } _d = health_lifecycle_generate('lifecycle', False) for k, v in _d.items(): yaml_template['spec']['template']['spec']['containers'][0]['lifecycle'][k] = v _vo_mount = [{'mountPath': '/data/logs', 'name': 'logs', 'readOnly': False}] _volumes = [{'name': 'logs', 'type': 'Directory', 'hostPath': { 'path': f'/data/{appinfo_obj.environment.name}-applogs/{appinfo_obj.app.project.name}/'}}] if template.get('storage', None): for k, v in template['storage']['data'].items(): for i in v: _x = {} for m, n in i.items(): if isinstance(n, (str,)): n = n.replace('${APPNAME}', appinfo_obj.app.name) if '_' in m: _t = m.split('_') if _x.get(_t[0], None): _x[_t[0]][_t[1]] = n else: _x[_t[0]] = {_t[1]: n} else: _x[m] = n _t = {'mountPath': _x['mount'], 'name': _x['name'], 'readOnly': True if _x.get('mode', None) == 'ReadOnly' else False} if _x.get('file', None): _t['subPath'] = _x['configMap']['items'][0]['key'] _vo_mount.append(_t) _mode = _x.pop('mode', None) _x.pop('file', None) _x.pop('mount', None) if _x.get('configMap', None): _x['configMap']['defaultMode'] = 0o600 if _mode == 'ReadOnly' else 0o755 _volumes.append(_x) yaml_template['spec']['template']['spec']['containers'][0]['volumeMounts'] = _vo_mount yaml_template['spec']['template']['spec']['volumes'] = _volumes if use_host_network: yaml_template['spec']['template']['spec']['hostNetwork'] = True partial_deploy_yaml_template = None except BaseException as e: logger.exception(f'generate yaml err {e.__class__} {e}') return {'ecode': 500, 'message': str(e)} # 多容器处理 if appinfo_obj.template.get('containers_custom', None): containers = container_generate( appinfo_obj.template.get('containers', [])) else: containers = container_generate( project_config.first().template.get('containers', [])) yaml_template['spec']['template']['spec']['containers'].extend(containers) ret = {'ecode': 200, 'image': image, 'yaml': yaml_template} if partial_deploy_yaml_template: ret['partial_deploy_yaml'] = partial_deploy_yaml_template return ret def get_members(obj): team_members = [j for i in obj.team_members.values() for j in i] return list(set(team_members)) def get_permission_from_role(request): try: perms = request.user.roles.values( 'permissions__method', ).distinct() return [p['permissions__method'] for p in perms] except AttributeError: return [] def get_headers(request=None): """ Function: get_headers(self, request) Description: To get all the headers from request """ regex = re.compile('^HTTP_') return dict((regex.sub('', header), value) for (header, value) in request.META.items() if header.startswith('HTTP_')) def mask_sensitive_data(data): """ Hides sensitive keys specified in sensitive_keys settings. Loops recursively over nested dictionaries. """ if hasattr(settings, 'DRF_API_LOGGER_EXCLUDE_KEYS'): if type(settings.DRF_API_LOGGER_EXCLUDE_KEYS) in (list, tuple): SENSITIVE_KEYS.extend(settings.DRF_API_LOGGER_EXCLUDE_KEYS) if type(data) != dict and type(data) != ReturnDict: try: data = json.loads(data) except BaseException as e: return data for key, value in data.items(): if key in SENSITIVE_KEYS: data[key] = "***FILTERED***" if type(value) == dict: data[key] = mask_sensitive_data(data[key]) return data def time_convert(target_time): """ 时间转字符串 """ return target_time.astimezone(pytz.timezone('Asia/Shanghai')).strftime('%Y-%m-%d %H:%M:%S+08:00') def time_comp(target_time, **kwargs): """ 时间比较/统一使用utc时间对比 target_time: 目标时间 kwargs: 额外参数, 时间差 如{hours: 1}, {minutes: 1}, {seconds: 1}, 数值不取负数 """ ctime = timezone.now() if kwargs: # 两个时间是否在期望时间差范围 if target_time > ctime: return target_time - ctime <= timedelta(**kwargs) else: return ctime - target_time <= timedelta(**kwargs) # 判断两个时间是否相等 return ctime == target_time def timeline_generate(time_range, format_type='dashboard'): """ 根据起始时间生成时间线 : params format_type: 默认为dashboard, 用于概览报表粗略显示, 其它用于监控类的展示则使用更细粒度的格式 """ TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES TIME_FORMAT = DASHBOARD_TIME_FORMAT if format_type == 'cmdb': TIME_FREQNAMES = DASHBOARD_TIME_FREQNAMES_T TIME_FORMAT = DASHBOARD_TIME_FORMAT_T start_time = time_range['start_time'] end_time = time_range['end_time'] time_line = rrule( freq=TIME_FREQNAMES[time_range['name']], dtstart=start_time, until=end_time) return [i.strftime(TIME_FORMAT[time_range['name']]) for i in time_line] def time_period(time_range='6-months', type_range='static', time_zone='Asia/Shanghai', name=None): """ 根据时间范围生成起止时间 """ start_time = None end_time = timezone.now().astimezone(pytz.timezone(time_zone)) if type_range == 'dynamic' and name is None: start_time = datetime.strptime(time_range[0], '%Y-%m-%d %H:%M:%S') end_time = datetime.strptime(time_range[1], '%Y-%m-%d %H:%M:%S') if start_time > end_time: start_time, end_time = end_time, start_time if (end_time - start_time).days >= 60: name = 'months' elif (end_time - start_time).days >= 2: name = 'days' elif (end_time - start_time).days >= 1 or (end_time - start_time).seconds > 60 * 60: name = 'hours' else: name = 'minutes' return {'name': name, 'start_time': start_time, 'end_time': end_time} if type_range == 'static': _time = time_range.split('-') if _time[-1] == 'week': start_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second, microseconds=end_time.microsecond) return {'name': 'days', 'start_time': start_time, 'end_time': end_time} if _time[-1] == 'lastweek': start_time = end_time - relativedelta(days=end_time.weekday() + 7, hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second, microseconds=end_time.microsecond) end_time = end_time - relativedelta(days=end_time.weekday(), hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second, microseconds=end_time.microsecond) return {'name': 'days', 'start_time': start_time, 'end_time': end_time} if _time[-1] in ['today', 'yesterday']: start_time = end_time - relativedelta(hours=end_time.hour, minutes=end_time.minute, seconds=end_time.second, microseconds=end_time.microsecond) if _time[-1] == 'yesterday': end_time = start_time start_time = end_time - relativedelta(days=1) return {'name': 'hours', 'start_time': start_time, 'end_time': end_time} name = _time[1] if name is None: if _time[1] in ['years', 'months']: name = 'months' if _time[1] == 'months' and int(_time[0]) < 2: name = 'days' if _time[1] == 'days' and int(_time[0]) < 2: name = 'hours' start_time = end_time + relativedelta(**{_time[1]: -int(_time[0])}) return {'name': name, 'start_time': start_time, 'end_time': end_time} def extend_jenkins(data, env): jenkins = get_redis_data('cicd-jenkins') app = AppInfo.objects.filter(id=data['id'])[0] category = DataDict.objects.get(key=app.app.category) job_name = app.jenkins_jobname jenkins_cli = GlueJenkins(jenkins.get('url', 'http://localhost'), username=jenkins.get('user', 'admin'), password=jenkins.get('password', None)) try: view_xml_config = f'''<?xml version="1.0" encoding="UTF-8"?> <hudson.model.ListView> <name>{app.app.project.alias}{env.alias}</name> <filterExecutors>false</filterExecutors> <filterQueue>false</filterQueue> <properties class="hudson.model.View$PropertyList"/> <jobNames> <comparator class="hudson.util.CaseInsensitiveComparator"/> </jobNames> <jobFilters/> <columns> <hudson.views.StatusColumn/> <hudson.views.WeatherColumn/> <hudson.views.JobColumn/> <jenkins.branch.DescriptionColumn/> <hudson.views.LastSuccessColumn/> <hudson.views.LastFailureColumn/> <hudson.views.LastDurationColumn/> <hudson.views.BuildButtonColumn/> </columns> <includeRegex>{env.name.lower()}-.*-{app.app.project.name.lower()}-.*</includeRegex> </hudson.model.ListView>''' jenkins_cli.create_view( f'{app.app.project.alias}{env.alias}', view_xml_config) except BaseException as e: pass try: config_xml = convert_xml_to_str_with_pipeline(jenkins['xml'], jenkins['pipeline']['http_url_to_repo'], jenkins['gitlab_credit'], app.app.alias, f'{app.app.language}/Jenkinsfile') if not jenkins_cli.job_exists(job_name): jenkins_cli.create_job(name=job_name, config_xml=config_xml) else: jenkins_cli.reconfig_job(name=job_name, config_xml=config_xml) except Exception as e: logger.error(f"创建Jenkins JOB: {job_name} 失败 ERROR: {e}") def get_celery_tasks(): """ 获取celery任务 """ current_app.loader.import_default_modules() tasks = list( sorted(name for name in current_app.tasks if not name.startswith('celery.'))) return tasks def is_chinese(string): """ 检查整个字符串是否包含中文 :param string: 需要检查的字符串 :return: bool """ for ch in string: if u'\u4e00' <= ch <= u'\u9fff': return True return False def get_word_list(string): """ 切割字符串, 中文/-切割成单个字, 其它则切割成单个词 """ res = re.compile(r"([\u4e00-\u9fa5\-])") return [i for i in res.split(string.lower()) if len(i.strip()) > 0 and i != '-'] def devlanguage_template_manage(instance, filename, user=None, content=None, action='retrieve'): jenkins = get_redis_data('cicd-jenkins') ok, cli = gitlab_cli(admin=True) if not ok: return False, cli project_id = jenkins['pipeline'].get('id', None) if not project_id: return False, '获取流水线失败,请检查Jenkins配置.' project = cli.get_project(project_id) items = project.repository_tree(path=instance.name) try: if action == 'update': if filename in [i['name'] for i in items]: # 文件已存在则更新 f = project.files.get( f"{instance.name}/{filename}", ref='master') f.content = content f.save( branch='master', commit_message=f'Update {instance.name} {filename} by {user.username}') else: # 文件不存在则创建 logger.info(f'{instance.name}/{filename}文件不存在则创建') project.files.create({'file_path': f"{instance.name}/{filename}", 'branch': 'master', 'content': content, 'author_email': user.email, 'author_name': user.username, 'commit_message': f'Create {instance.name} {filename} by {user.username}'}) content = project.files.raw( f"{instance.name}/{filename}", ref='master') return True, content except GitlabGetError as e: logger.info(f'获取异常,{e}') if e.response_code == 404: logger.info(f'{instance.name}/{filename}文件不存在') return True, '' except BaseException as e: logger.error(f'GitLab开发语言模板异常, 原因: {e}') return False, f'GitLab开发语言模板异常, 原因: {e}' def snake_case(x): """ 驼峰转下划线 """ term_exclude = ['OS', 'GPU', 'DB', 'IA', 'IP', 'RR', 'TTL', 'SLB', 'CPU', 'MEMORY', 'QPS'] for i in term_exclude: x = x.replace(i, i.lower()) return re.sub(r'(?P<key>[A-Z])', r'_\g<key>', x).lower().strip('_') def node_filter(node_id, data): """ 查找节点 :params: node_id int 节点ID :params: data list 节点数组 """ for i in data: if i['id'] == node_id: print('get node', i) return i else: if i.get('children', None): node = node_filter(node_id, i['children']) if isinstance(node, (dict,)): return node def get_time_range(request): """ 获取时间轴 """ type_range = request.query_params.get('range_type', 'static') if type_range == 'static': time_range = request.query_params.get('range', '6-months') else: time_range = request.query_params.getlist('range[]', None) if not time_range: time_range = '6-months' period = time_period(time_range, type_range) time_line = timeline_generate(period, format_type='cmdb') # 时间刻度, 以小时为刻度则删除年份 time_line_x = [i.split(' ')[-1] for i in time_line] if period['name'] == 'hours' else time_line return period, time_line, time_line_x def compare_dict(data, old_data): different_list = [] for k1 in data: if k1 == 'update_time': continue v1 = data.get(k1) v2 = old_data.get(k1) if v1 != v2: different_list.append({ 'key': k1, 'new_value': v1, 'old_value': v2 }) return different_list def get_project_mergerequest(project: Project, cli: GitLabAPI, **params): """ 获取项目下所有应用的合并请求 """ mrdata = [] git_project = [app.repo['id'] for app in project.microapp_set.all() if app.repo.get('id')] for project_id in set(git_project): try: git_project = cli.get_project(project_id) ok, data = cli.list_mrs(project=git_project, **params) if ok is False: continue mrdata.extend([i.attributes for i in data]) except BaseException as e: logger.error(f'获取应用合并请求异常,原因:{e}') return mrdata def gitlab_cli(user=None, admin=False, superadmin=False, merge=False): """ 获取GitlabAPI :param merge: 用于分支合并,管理员统一用配置文件里的token """ try: payload = {'token': GITLAB_ADMIN_TOKEN, 'oauth': False} cli = GitLabAPI(SOCIAL_AUTH_GITLAB_API_URL, **payload) return True, cli except BaseException as e: logger.warning(f'获取GitlabAPI异常,原因:{e}') return False, f'获取GitlabAPI异常,原因:{e}' def get_deploy_image_list(app_id, appinfo_id=None, module=None, force=0): # 可选发布镜像 # 获取关联应用ID
app = MicroApp.objects.get(id=app_id)
14
2023-12-13 03:09:32+00:00
24k
MarilynKeller/aitviewer-skel
aitviewer/renderables/sdf.py
[ { "identifier": "BoundingBoxes", "path": "aitviewer/renderables/bounding_boxes.py", "snippet": "class BoundingBoxes(Node):\n \"\"\"\n Draw bounding boxes.\n \"\"\"\n\n def __init__(self, vertices, thickness=0.005, color=(0.0, 0.0, 1.0, 1.0), **kwargs):\n \"\"\"\n Initializer.\n :param vertices: Set of 3D coordinates as a np array of shape (N, 8, 3). The vertices will be connected in the\n following way: 0-1-2-3-0 (bottom) 4-5-6-7-4 (top) 0-4 1-5 2-6 3-7 (vertical connections between bottom\n and top).\n :param thickness: Line thickness.\n :param color: Color of the lines.\n \"\"\"\n if not isinstance(vertices, np.ndarray):\n vertices = np.array(vertices)\n if len(vertices.shape) == 2:\n vertices = vertices[np.newaxis]\n else:\n assert len(vertices.shape) == 3\n assert vertices.shape[1] == 8\n super(BoundingBoxes, self).__init__(n_frames=len(vertices), color=color, **kwargs)\n\n self.vertices = vertices\n\n self.lines = Lines(\n lines=self._get_line_coords(),\n mode=\"lines\",\n r_base=thickness,\n color=self.color,\n cast_shadow=False,\n )\n self.spheres = Spheres(positions=self.vertices, radius=thickness, color=self.color, cast_shadow=False)\n self._add_nodes(self.lines, self.spheres, show_in_hierarchy=False)\n\n @property\n def bounds(self):\n return self.get_bounds(self.vertices)\n\n @property\n def current_bounds(self):\n return self.get_bounds(self.vertices[self.current_frame_id])\n\n @staticmethod\n def from_min_max_diagonal(v_min, v_max, **kwargs):\n \"\"\"\n Create an axis-aligned bounding box from the 3D diagonal.\n :param v_min: np array of shape (N, 3).\n :param v_max: np array of shape (N, 3).\n :return: BoundingBoxes corresponding to the given diagonals.\n \"\"\"\n vertices = np.zeros((v_min.shape[0], 8, 3), dtype=v_min.dtype)\n vertices[:, 0:4] = v_min[:, np.newaxis]\n vertices[:, 1, 0] = v_max[:, 0]\n vertices[:, 2, 0:2] = v_max[:, 0:2]\n vertices[:, 3, 1] = v_max[:, 1]\n\n vertices[:, 4:] = v_max[:, np.newaxis]\n vertices[:, 4, 0:2] = v_min[:, 0:2]\n vertices[:, 7, 0] = v_min[:, 0]\n vertices[:, 5, 1] = v_min[:, 1]\n\n return BoundingBoxes(vertices, **kwargs)\n\n def _get_line_coords(self):\n lines = np.zeros((self.n_frames, 12 * 2, 3), dtype=self.vertices.dtype)\n\n # Bottom 0-1-2-3-0.\n lines[:, 0:2] = self.vertices[:, 0:2]\n lines[:, 2:4] = self.vertices[:, 1:3]\n lines[:, 4:6] = self.vertices[:, 2:4]\n lines[:, 6:8] = self.vertices[:, [3, 0]]\n\n # Top 4-5-6-7-4.\n lines[:, 8:10] = self.vertices[:, 4:6]\n lines[:, 10:12] = self.vertices[:, 5:7]\n lines[:, 12:14] = self.vertices[:, 6:8]\n lines[:, 14:16] = self.vertices[:, [7, 4]]\n\n # Vertical Connections.\n lines[:, 16:18] = self.vertices[:, [0, 4]]\n lines[:, 18:20] = self.vertices[:, [1, 5]]\n lines[:, 20:22] = self.vertices[:, [2, 6]]\n lines[:, 22:24] = self.vertices[:, [3, 7]]\n\n return lines\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n self.lines.color = color\n self.spheres.color = color" }, { "identifier": "Lines", "path": "aitviewer/renderables/lines.py", "snippet": "class Lines(Node):\n \"\"\"Render lines as cylinders or cones. Can render approx. 600k lines at 40 fps.\"\"\"\n\n def __init__(\n self,\n lines,\n r_base=0.01,\n r_tip=None,\n color=(0.0, 0.0, 1.0, 1.0),\n mode=\"line_strip\",\n cast_shadow=True,\n **kwargs,\n ):\n \"\"\"\n Initializer.\n :param lines: Set of 3D coordinates as a np array of shape (F, L, 3) or (L, 3).\n :param r_base: Thickness of the line.\n :param r_tip: If set, the thickness of the line will taper from r_base to r_tip. If set to 0.0 it will create\n a proper cone.\n :param color: Color of the line (4-tuple) or array of color (N_LINES, 4), one for each line.\n :param mode: 'lines' or 'line_strip'.\n 'lines': a line is drawn from point 0 to 1, from 2 to 3, and so on, number of lines is L / 2.\n 'line_strip': a line is drawn between all adjacent points, 0 to 1, 1 to 2 and so on, number of lines is L - 1.\n :param cast_shadow: If True the mesh casts a shadow on other objects.\n \"\"\"\n if len(lines.shape) == 2:\n lines = lines[np.newaxis]\n assert len(lines.shape) == 3\n assert mode == \"lines\" or mode == \"line_strip\"\n if mode == \"lines\":\n assert lines.shape[1] % 2 == 0\n\n self._lines = lines\n self.mode = mode\n self.r_base = r_base\n self.r_tip = r_tip if r_tip is not None else r_base\n\n self.vertices, self.faces = self.get_mesh()\n self.n_lines = self.lines.shape[1] // 2 if mode == \"lines\" else self.lines.shape[1] - 1\n\n # Define a default material in case there is None.\n if isinstance(color, tuple) or len(color.shape) == 1:\n kwargs[\"material\"] = kwargs.get(\"material\", Material(color=color, ambient=0.2))\n self.line_colors = kwargs[\"material\"].color\n else:\n assert (\n color.shape[1] == 4 and color.shape[0] == self.n_lines\n ), \"Color must be a tuple of 4 values or a numpy array of shape (N_LINES, 4)\"\n self.line_colors = color\n\n super(Lines, self).__init__(n_frames=self.lines.shape[0], **kwargs)\n\n self._need_upload = True\n self.draw_edges = False\n\n # Render passes.\n self.outline = True\n self.fragmap = True\n self.depth_prepass = True\n self.cast_shadow = cast_shadow\n\n @property\n def bounds(self):\n bounds = self.get_bounds(self.lines)\n r = max(self.r_base, self.r_tip)\n bounds[:, 0] -= r\n bounds[:, 1] += r\n return bounds\n\n @property\n def current_bounds(self):\n bounds = self.get_bounds(self.current_lines)\n r = max(self.r_base, self.r_tip)\n bounds[:, 0] -= r\n bounds[:, 1] += r\n return bounds\n\n @property\n def lines(self):\n return self._lines\n\n @lines.setter\n def lines(self, value):\n self._lines = value if len(value.shape) == 3 else value[np.newaxis]\n self.n_frames = self.lines.shape[0]\n self.redraw()\n\n @property\n def current_lines(self):\n idx = self.current_frame_id if self._lines.shape[0] > 1 else 0\n return self._lines[idx]\n\n @current_lines.setter\n def current_lines(self, lines):\n assert len(lines.shape) == 2\n idx = self.current_frame_id if self._lines.shape[0] > 1 else 0\n self._lines[idx] = lines\n self.redraw()\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n self.line_colors = color\n self.redraw()\n\n @property\n def line_colors(self):\n if len(self._line_colors.shape) == 1:\n t = np.tile(np.array(self._line_colors), (self.n_lines, 1))\n return t\n else:\n return self._line_colors\n\n @line_colors.setter\n def line_colors(self, color):\n if isinstance(color, tuple):\n color = np.array(color)\n self._line_colors = color\n self.redraw()\n\n def on_frame_update(self):\n self.redraw()\n\n def redraw(self, **kwargs):\n self._need_upload = True\n\n @Node.once\n def make_renderable(self, ctx: moderngl.Context):\n self.prog = get_lines_instanced_program()\n\n vs_path = \"lines_instanced_positions.vs.glsl\"\n self.outline_program = get_outline_program(vs_path)\n self.depth_only_program = get_depth_only_program(vs_path)\n self.fragmap_program = get_fragmap_program(vs_path)\n\n self.vbo_vertices = ctx.buffer(self.vertices.astype(\"f4\").tobytes())\n self.vbo_indices = ctx.buffer(self.faces.astype(\"i4\").tobytes())\n self.vbo_instance_base = ctx.buffer(reserve=self.n_lines * 12)\n self.vbo_instance_tip = ctx.buffer(reserve=self.n_lines * 12)\n self.vbo_instance_color = ctx.buffer(reserve=self.n_lines * 16)\n\n self.vao = VAO()\n self.vao.buffer(self.vbo_vertices, \"3f4\", \"in_position\")\n self.vao.buffer(self.vbo_instance_base, \"3f4/i\", \"instance_base\")\n self.vao.buffer(self.vbo_instance_tip, \"3f4/i\", \"instance_tip\")\n self.vao.buffer(self.vbo_instance_color, \"4f4/i\", \"instance_color\")\n self.vao.index_buffer(self.vbo_indices)\n\n def _upload_buffers(self):\n if not self.is_renderable or not self._need_upload:\n return\n self._need_upload = False\n\n lines = self.current_lines\n if self.mode == \"lines\":\n v0s = lines[::2]\n v1s = lines[1::2]\n else:\n v0s = lines[:-1]\n v1s = lines[1:]\n\n self.vbo_instance_base.write(v0s.astype(\"f4\").tobytes())\n self.vbo_instance_tip.write(v1s.astype(\"f4\").tobytes())\n\n if len(self._line_colors.shape) > 1:\n self.vbo_instance_color.write(self._line_colors.astype(\"f4\").tobytes())\n\n def render(self, camera, **kwargs):\n self._upload_buffers()\n\n prog = self.prog\n prog[\"r_base\"] = self.r_base\n prog[\"r_tip\"] = self.r_tip\n if len(self._line_colors.shape) == 1:\n prog[\"use_uniform_color\"] = True\n prog[\"uniform_color\"] = tuple(self.color)\n else:\n prog[\"use_uniform_color\"] = False\n prog[\"draw_edges\"].value = 1.0 if self.draw_edges else 0.0\n prog[\"win_size\"].value = kwargs[\"window_size\"]\n prog[\"clip_control\"].value = (0, 0, 0)\n\n self.set_camera_matrices(prog, camera, **kwargs)\n set_lights_in_program(\n prog,\n kwargs[\"lights\"],\n kwargs[\"shadows_enabled\"],\n kwargs[\"ambient_strength\"],\n )\n set_material_properties(prog, self.material)\n self.receive_shadow(prog, **kwargs)\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_lines)\n\n def render_positions(self, prog):\n if self.is_renderable:\n self._upload_buffers()\n prog[\"r_base\"] = self.r_base\n prog[\"r_tip\"] = self.r_tip\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_lines)\n\n def get_mesh(self):\n v0s = np.array([[0, 0, 0]], np.float32)\n v1s = np.array([[0, 0, 1]], np.float32)\n\n # If r_tip is below a certain threshold, we create a proper cone, i.e. with just a single vertex at the top.\n if self.r_tip < 1e-5:\n data = _create_cone_from_to(v0s, v1s, radius=1.0)\n else:\n data = _create_cylinder_from_to(v0s, v1s, radius1=1.0, radius2=1.0)\n\n return data[\"vertices\"][0], data[\"faces\"]\n\n @hooked\n def release(self):\n if self.is_renderable:\n self.vao.release()\n\n def update_frames(self, lines, frames):\n self.lines[frames] = lines\n self.redraw()\n\n def add_frames(self, lines):\n if len(lines.shape) == 2:\n lines = lines[np.newaxis]\n self.lines = np.append(self.lines, lines, axis=0)\n\n def remove_frames(self, frames):\n self.lines = np.delete(self.lines, frames, axis=0)\n self.redraw()\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n name = f\"{self.name}_{self.uid:03}\".replace(\" \", \"_\")\n usd_path = f\"{usd_path}/{name}\"\n\n if self.mode == \"lines\":\n v0s = self.lines[:, ::2]\n v1s = self.lines[:, 1::2]\n else:\n v0s = self.lines[:, :-1]\n v1s = self.lines[:, 1:]\n\n print(self.lines.shape)\n print(v0s.shape)\n\n # Data is in the form of (F, N_LINES, 3), convert it to (F*N_LINES, 3)\n v0s = np.reshape(v0s, (-1, 3))\n v1s = np.reshape(v1s, (-1, 3))\n\n self.r_tip = self.r_base if self.r_tip is None else self.r_tip\n\n # If r_tip is below a certain threshold, we create a proper cone, i.e. with just a single vertex at the top.\n if self.r_tip < 10e-6:\n data = _create_cone_from_to(v0s, v1s, radius=self.r_base)\n else:\n data = _create_cylinder_from_to(v0s, v1s, radius1=self.r_base, radius2=self.r_tip)\n\n L = self.n_lines\n V = data[\"vertices\"].shape[1]\n\n vertices = data[\"vertices\"].reshape((self.n_frames, -1, 3))\n faces = data[\"faces\"]\n\n fs = faces[np.newaxis].repeat(L, 0).reshape((L, -1))\n offsets = (np.arange(L) * V).reshape((L, 1))\n faces = (fs + offsets).reshape((-1, 3))\n\n mesh = usd.add_mesh(stage, usd_path, self.name, vertices, faces, self.get_local_transform())\n usd.add_color(stage, mesh, usd_path, self.color[:3])\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "Meshes", "path": "aitviewer/renderables/meshes.py", "snippet": "class Meshes(Node):\n \"\"\"A sequence of triangle meshes. This assumes that the mesh topology is fixed over the sequence.\"\"\"\n\n def __init__(\n self,\n vertices,\n faces,\n vertex_normals=None,\n face_normals=None,\n vertex_colors=None,\n face_colors=None,\n uv_coords=None,\n path_to_texture=None,\n cast_shadow=True,\n pickable=True,\n flat_shading=False,\n draw_edges=False,\n draw_outline=False,\n instance_transforms=None,\n icon=\"\\u008d\",\n **kwargs,\n ):\n \"\"\"\n Initializer.\n :param vertices: A np array of shape (N, V, 3) or (V, 3).\n :param faces: A np array of shape (F, 3).\n :param vertex_normals: A np array of shape (N, V, 3). If not provided, the vertex normals will be computed,\n which incurs some overhead.\n :param face_normals: A np array of shape (N, F, 3). If not provided, the face normals will be computed, which\n incurs some overhead.\n :param vertex_colors: A np array of shape (N, V, 4) overriding the uniform color.\n :param face_colors: A np array of shape (N, F, 4) overriding the uniform or vertex colors.\n :param uv_coords: A np array of shape (V, 2) if the mesh is to be textured.\n :param path_to_texture: Path to an image file that serves as the texture.\n :param cast_shadow: If True the mesh casts a shadow on other objects.\n :param pickable: If True the mesh can be selected with a mouse click.\n :param flat_shading: If True the each face of the mesh is shaded with a constant normal.\n :param draw_edges: If True the normals the edges of the mesh is drawn on top of the mesh.\n :param draw_outline: If true an outline is drawn around the mesh.\n :instance_transforms: np array of size (N, I, 4, 4) or (I, 4, 4) or None. If not None, 'I' instances of\n the same mesh will be rendered, each with its own transformation matrix.\n \"\"\"\n if len(vertices.shape) == 2 and vertices.shape[-1] == 3:\n vertices = vertices[np.newaxis]\n assert len(vertices.shape) == 3\n assert len(faces.shape) == 2\n n_frames = vertices.shape[0]\n\n # Instancing.\n if instance_transforms is not None:\n # Check shape of transforms.\n if len(instance_transforms.shape) == 3:\n instance_transforms = instance_transforms[np.newaxis]\n assert len(instance_transforms.shape) == 4\n\n # Number of instance frames must match number of frames or be 1.\n assert n_frames == 1 or instance_transforms.shape[0] == 1 or n_frames == instance_transforms.shape[0]\n n_frames = max(n_frames, instance_transforms.shape[0])\n\n self._instance_transforms = instance_transforms\n else:\n self._instance_transforms = None\n\n super(Meshes, self).__init__(n_frames=n_frames, icon=icon, **kwargs)\n\n self._vertices = vertices\n self._faces = faces.astype(np.int32)\n\n # Create these first because other setters can call redraw() which uses this fields.\n self._face_colors = None\n self._vertex_colors = None\n self._has_transparent_vertex_or_face_colors = False\n\n def _maybe_unsqueeze(x):\n return x[np.newaxis] if x is not None and x.ndim == 2 else x\n\n self._vertex_normals = _maybe_unsqueeze(vertex_normals)\n self._face_normals = _maybe_unsqueeze(face_normals)\n self.vertex_colors = _maybe_unsqueeze(vertex_colors)\n self.face_colors = _maybe_unsqueeze(face_colors)\n\n # Texture handling.\n self.has_texture = (uv_coords is not None) and (path_to_texture is not None)\n self.uv_coords = uv_coords\n self.texture_path = path_to_texture\n\n if self.has_texture:\n self.use_pickle_texture = path_to_texture.endswith((\".pickle\", \"pkl\"))\n if self.use_pickle_texture:\n self.texture_image = pickle.load(open(path_to_texture, \"rb\"))\n else:\n self.texture_image = Image.open(path_to_texture).transpose(method=Image.FLIP_TOP_BOTTOM).convert(\"RGB\")\n else:\n self.texture_image = None\n\n # Enable rendering passes\n self.cast_shadow = cast_shadow\n self.fragmap = pickable\n self.depth_prepass = True\n self.outline = True\n\n # Misc.\n self._flat_shading = flat_shading\n self.draw_edges = draw_edges\n self.draw_outline = draw_outline\n self.show_texture = self.has_texture\n self.norm_coloring = False\n self.normals_r = None\n self.need_upload = True\n self._use_uniform_color = self._vertex_colors is None and self._face_colors is None\n self._vertex_faces_sparse = trimesh.geometry.index_sparse(self._vertices.shape[1], self._faces)\n\n self.clip_control = np.array((0, 0, 0), np.int32)\n self.clip_value = np.array((0, 0, 0), np.float32)\n\n @classmethod\n def instanced(cls, *args, positions=None, rotations=None, scales=None, **kwargs):\n \"\"\"\n Creates and returns an instanced sequence of N frames and I instances.\n Each instance will have its own position, rotation and scale.\n :param positions: np array of size (N, I, 3) or (I, 3) or None.\n :param rotations: np array of size (N, I, 3, 3) or (I, 3, 3) or None.\n :param scales: np array of size (N, I) or (I) or None.\n\n *args, and **kwargs are forwarded to the Meshes constructor.\n \"\"\"\n assert positions is not None or rotations is not None or scales is not None\n\n n_instances = 0\n n_frames = 0\n\n def check_array(a, dim):\n nonlocal n_instances, n_frames\n if a is not None:\n if len(a.shape) == dim + 1:\n a = a[np.newaxis]\n n_frames = max(n_frames, a.shape[0])\n n_instances = max(n_instances, a.shape[1])\n return a\n\n positions = check_array(positions, 1)\n rotations = check_array(rotations, 2)\n scales = check_array(scales, 0)\n\n if positions is None:\n positions = np.zeros((n_frames, n_instances, 3))\n if rotations is None:\n rotations = np.zeros((n_frames, n_instances, 3, 3))\n rotations[:, :] = np.eye(3)\n if scales is None:\n scales = np.ones((n_frames, n_instances))\n\n transforms = np.zeros((n_frames, n_instances, 4, 4))\n transforms[:, :, :3, :3] = (rotations.reshape((-1, 9)) * scales.reshape((-1, 1))).reshape(\n (n_frames, n_instances, 3, 3)\n )\n transforms[:, :, :3, 3] = positions\n transforms[:, :, 3, 3] = 1.0\n return cls(*args, **kwargs, instance_transforms=transforms)\n\n @classmethod\n def from_file(cls, file, **kwargs):\n \"\"\"\n Loads a mesh from a file that can be loaded by trimesh (e.g. \".obj\", \".ply\", ...)\n See trimesh.available_formats() for a complete list.\n \"\"\"\n mesh = trimesh.load(file)\n\n uvs = None\n vertex_colors = None\n face_colors = None\n if isinstance(mesh.visual, trimesh.visual.ColorVisuals):\n if mesh.visual.kind == \"vertex_colors\":\n vertex_colors = mesh.visual.vertex_colors\n elif mesh.visual.kind == \"face_colors\":\n face_colors = mesh.visual.vertex_colors\n elif isinstance(mesh.visual, trimesh.visual.TextureVisuals):\n uvs = mesh.visual.uv\n\n return Meshes(\n mesh.vertices,\n mesh.faces,\n vertex_normals=mesh.vertex_normals,\n face_colors=face_colors,\n vertex_colors=vertex_colors,\n uv_coords=uvs,\n **kwargs,\n )\n\n @property\n def vertices(self):\n return self._vertices\n\n @vertices.setter\n def vertices(self, vertices):\n if len(vertices.shape) == 2:\n vertices = vertices[np.newaxis]\n\n # Update vertices and redraw\n self._vertices = vertices\n self.n_frames = len(vertices)\n\n # If vertex or face normals were supplied, they are no longer valid.\n self._vertex_normals = None\n self._face_normals = None\n\n # Must clear all LRU caches where the vertices are used.\n self.compute_vertex_and_face_normals.cache_clear()\n\n self.redraw()\n\n @property\n def faces(self):\n return self._faces\n\n @faces.setter\n def faces(self, f):\n self._faces = f.astype(np.int32)\n self._vertex_faces_sparse = trimesh.geometry.index_sparse(self.vertices.shape[1], self._faces)\n\n @property\n def current_vertices(self):\n idx = self.current_frame_id if self.vertices.shape[0] > 1 else 0\n return self.vertices[idx]\n\n @current_vertices.setter\n def current_vertices(self, vertices):\n idx = self.current_frame_id if self.vertices.shape[0] > 1 else 0\n self._vertices[idx] = vertices\n self.compute_vertex_and_face_normals.cache_clear()\n self.redraw()\n\n @property\n def current_transformed_vertices(self):\n return (self.current_vertices @ self.model_matrix[:3, :3].T) + self.model_matrix[:3, 3]\n\n @property\n def transformed_vertices(self):\n return (self.vertices @ self.model_matrix[:3, :3].T) + self.model_matrix[:3, 3]\n\n @property\n def n_faces(self):\n return self.faces.shape[0]\n\n @property\n def n_vertices(self):\n return self.vertices.shape[1]\n\n @property\n def vertex_faces(self):\n # To compute the normals we need to know a mapping from vertex ID to all faces that this vertex is part of.\n # Because we are lazy we abuse trimesh to compute this for us. Not all vertices have the maximum degree, so\n # this array is padded with -1 if necessary.\n return trimesh.Trimesh(self.vertices[0], self.faces, process=False).vertex_faces\n\n @property\n def vertex_normals(self):\n \"\"\"Get or compute all vertex normals (this might take a while for long sequences).\"\"\"\n if self._vertex_normals is None:\n vertex_normals, _ = compute_vertex_and_face_normals_sparse(\n self.vertices, self.faces, self._vertex_faces_sparse, normalize=True\n )\n self._vertex_normals = vertex_normals\n return self._vertex_normals\n\n @property\n def face_normals(self):\n \"\"\"Get or compute all face normals (this might take a while for long sequences).\"\"\"\n if self._face_normals is None:\n _, face_normals = compute_vertex_and_face_normals_sparse(\n self.vertices, self.faces, self._vertex_faces_sparse, normalize=True\n )\n self._face_normals = face_normals\n return self._face_normals\n\n def vertex_normals_at(self, frame_id):\n \"\"\"Get or compute the vertex normals at the given frame.\"\"\"\n if self._vertex_normals is None:\n vn, _ = self.compute_vertex_and_face_normals(frame_id, normalize=True)\n else:\n assert len(self._vertex_normals.shape) == 3, f\"Got shape {self._vertex_normals.shape}\"\n vn = self._vertex_normals[frame_id]\n return vn\n\n def face_normals_at(self, frame_id):\n \"\"\"Get or compute the face normals at the given frame.\"\"\"\n if self._face_normals is None:\n _, fn = self.compute_vertex_and_face_normals(frame_id, normalize=True)\n else:\n assert len(self._face_normals.shape) == 3, f\"Got shape {self._face_normals.shape}\"\n fn = self._face_normals[frame_id]\n return fn\n\n @property\n def vertex_colors(self):\n if self._vertex_colors is None:\n self._vertex_colors = np.full((self.n_frames, self.n_vertices, 4), self.material.color)\n return self._vertex_colors\n\n @vertex_colors.setter\n def vertex_colors(self, vertex_colors):\n # If vertex_colors are None, we resort to the material color.\n if vertex_colors is None:\n self._vertex_colors = None\n self._use_uniform_color = True\n elif isinstance(vertex_colors, tuple) and len(vertex_colors) == 4:\n self.vertex_colors = None\n self._use_uniform_color = True\n self.material.color = vertex_colors\n else:\n if len(vertex_colors.shape) == 2:\n assert vertex_colors.shape[0] == self.n_vertices\n vertex_colors = np.repeat(vertex_colors[np.newaxis], self.n_frames, axis=0)\n assert len(vertex_colors.shape) == 3\n self._vertex_colors = vertex_colors\n self._use_uniform_color = False\n self.redraw()\n\n @property\n def current_vertex_colors(self):\n if self._use_uniform_color:\n return np.full((self.n_vertices, 4), self.material.color)\n else:\n idx = self.current_frame_id if self.vertex_colors.shape[0] > 1 else 0\n return self.vertex_colors[idx]\n\n @property\n def face_colors(self):\n return self._face_colors\n\n @face_colors.setter\n def face_colors(self, face_colors):\n if face_colors is not None:\n if len(face_colors.shape) == 2:\n face_colors = face_colors[np.newaxis]\n self._face_colors = face_colors\n self._use_uniform_color = False\n else:\n self._face_colors = None\n self.redraw()\n\n @property\n def current_face_colors(self):\n if self._use_uniform_color:\n return np.full((self.n_faces, 4), self.material.color)\n else:\n idx = self.current_frame_id if self.face_colors.shape[0] > 1 else 0\n return self.face_colors[idx]\n\n @Node.color.setter\n def color(self, color):\n self.material.color = color\n\n if self.face_colors is None:\n self.vertex_colors = color\n\n @property\n def flat_shading(self):\n return self._flat_shading\n\n @flat_shading.setter\n def flat_shading(self, flat_shading):\n if self._flat_shading != flat_shading:\n self._flat_shading = flat_shading\n self.redraw()\n\n def closest_vertex_in_triangle(self, tri_id, point):\n face_vertex_id = np.linalg.norm((self.current_vertices[self.faces[tri_id]] - point), axis=-1).argmin()\n return self.faces[tri_id][face_vertex_id]\n\n def get_bc_coords_from_points(self, tri_id, points):\n return points_to_barycentric(self.current_vertices[self.faces[[tri_id]]], points)[0]\n\n @lru_cache(2048)\n def compute_vertex_and_face_normals(self, frame_id, normalize=False):\n \"\"\"\n Compute face and vertex normals for the given frame. We use an LRU cache since this is a potentially\n expensive operation. This function exists because computing the normals on all frames can increase the\n startup time of the viewer considerably.\n\n :param frame_id: On which frame to compute the normals.\n :param normalize: Whether or not to normalize the normals. Not doing it is faster and the shaders typically\n enforce unit length of normals anyway.\n :return: The vertex and face normals as a np arrays of shape (V, 3) and (F, 3) respectively.\n \"\"\"\n vs = self.vertices[frame_id : frame_id + 1] if self.vertices.shape[0] > 1 else self.vertices\n vn, fn = compute_vertex_and_face_normals_sparse(vs, self.faces, self._vertex_faces_sparse, normalize)\n return vn.squeeze(0), fn.squeeze(0)\n\n @property\n def bounds(self):\n if self.instance_transforms is None:\n return self.get_bounds(self.vertices)\n else:\n # Get bounds in local coordinates\n bounds = self.get_local_bounds(self.vertices)\n\n # Transform bounds with instance transforms\n min = np.append(bounds[:, 0], 1.0)\n max = np.append(bounds[:, 1], 1.0)\n transforms = self.instance_transforms.reshape((-1, 4, 4))\n mins = transforms @ min\n maxs = transforms @ max\n\n # Return bounds in world coordinates\n return self.get_bounds(np.vstack((mins, maxs)))\n\n @property\n def current_bounds(self):\n if self.instance_transforms is None:\n return self.get_bounds(self.current_vertices)\n else:\n # Get bounds in local coordinates\n bounds = self.get_local_bounds(self.current_vertices)\n\n # Transform bounds with instance transforms\n min = np.append(bounds[:, 0], 1.0)\n max = np.append(bounds[:, 1], 1.0)\n transforms = self.current_instance_transforms.reshape((-1, 4, 4))\n mins = transforms @ min\n maxs = transforms @ max\n\n # Return bounds in world coordinates\n return self.get_bounds(np.vstack((mins[:, :3], maxs[:, :3])))\n\n def is_transparent(self):\n return self.color[3] < 1.0 or self._has_transparent_vertex_or_face_colors\n\n def on_frame_update(self):\n \"\"\"Called whenever a new frame must be displayed.\"\"\"\n super().on_frame_update()\n self.redraw()\n\n @property\n def current_instance_transforms(self):\n if self._instance_transforms is None:\n return None\n idx = self.current_frame_id if self._instance_transforms.shape[0] > 1 else 0\n return self._instance_transforms[idx]\n\n @property\n def instance_transforms(self):\n return self._instance_transforms\n\n @instance_transforms.setter\n def instance_transforms(self, instance_transforms):\n assert self._instance_transforms.shape == instance_transforms\n self._instance_transforms = instance_transforms\n\n @property\n def n_instances(self):\n if self._instance_transforms is None:\n return 1\n else:\n return self._instance_transforms.shape[1]\n\n def _upload_buffers(self):\n \"\"\"Upload the current frame data to the GPU for rendering.\"\"\"\n if not self.is_renderable or not self._need_upload:\n return\n\n self._need_upload = False\n\n # Write positions.\n self.vbo_vertices.write(self.current_vertices.astype(\"f4\").tobytes())\n\n # Write normals.\n if not self.flat_shading:\n vertex_normals = self.vertex_normals_at(self.current_frame_id)\n self.vbo_normals.write(vertex_normals.astype(\"f4\").tobytes())\n\n if self.face_colors is None:\n # Write vertex colors.\n self.vbo_colors.write(self.current_vertex_colors.astype(\"f4\").tobytes())\n else:\n # Write face colors.\n\n # Compute shape of 2D texture.\n shape = (min(self.faces.shape[0], 8192), (self.faces.shape[0] + 8191) // 8192)\n\n # Write texture left justifying the buffer to fill the last row of the texture.\n self.face_colors_texture.write(\n self.current_face_colors.astype(\"f4\").tobytes().ljust(shape[0] * shape[1] * 16)\n )\n\n # Write uvs.\n if self.has_texture:\n self.vbo_uvs.write(self.uv_coords.astype(\"f4\").tobytes())\n\n # Write instance transforms.\n if self.instance_transforms is not None:\n self.vbo_instance_transforms.write(\n np.transpose(self.current_instance_transforms.astype(\"f4\"), (0, 2, 1)).tobytes()\n )\n\n @hooked\n def redraw(self, **kwargs):\n self._need_upload = True\n\n transparent = False\n if self._vertex_colors is not None:\n transparent = transparent or np.any(self.vertex_colors[:, :, 3] < 1.0)\n if self._face_colors is not None:\n transparent = transparent or np.any(self.face_colors[:, :, 3] < 1.0)\n\n self._has_transparent_vertex_or_face_colors = transparent\n\n def _load_programs(self, vs, positions_vs):\n instanced = 1 if self.instance_transforms is not None else 0\n self.smooth_prog = get_smooth_lit_with_edges_program(vs, instanced)\n self.flat_prog = get_flat_lit_with_edges_program(vs, instanced)\n self.smooth_face_prog = get_smooth_lit_with_edges_face_color_program(vs, instanced)\n self.flat_face_prog = get_flat_lit_with_edges_face_color_program(vs, instanced)\n\n self.depth_only_program = get_depth_only_program(positions_vs, instanced)\n self.outline_program = get_outline_program(positions_vs, instanced)\n self.fragmap_program = get_fragmap_program(positions_vs, instanced)\n\n # noinspection PyAttributeOutsideInit\n @Node.once\n def make_renderable(self, ctx: moderngl.Context):\n \"\"\"Prepares this object for rendering. This function must be called before `render` is used.\"\"\"\n vs = \"lit_with_edges.glsl\"\n positions_vs = \"mesh_positions.vs.glsl\"\n self._load_programs(vs, positions_vs)\n\n vertices = self.current_vertices\n vertex_normals = self.vertex_normals_at(self.current_frame_id)\n vertex_colors = self.current_vertex_colors\n\n self.vbo_vertices = ctx.buffer(vertices.astype(\"f4\").tobytes())\n self.vbo_normals = ctx.buffer(vertex_normals.astype(\"f4\").tobytes())\n self.vbo_colors = ctx.buffer(vertex_colors.astype(\"f4\").tobytes())\n self.vbo_indices = ctx.buffer(self.faces.tobytes())\n\n self.vao = VAO()\n self.vao.buffer(self.vbo_vertices, \"3f4\", \"in_position\")\n self.vao.buffer(self.vbo_normals, \"3f4\", \"in_normal\")\n self.vao.buffer(self.vbo_colors, \"4f4\", \"in_color\")\n self.vao.index_buffer(self.vbo_indices)\n\n if self.instance_transforms is not None:\n self.vbo_instance_transforms = ctx.buffer(\n np.transpose(self.current_instance_transforms.astype(\"f4\"), (0, 2, 1)).tobytes()\n )\n self.vao.buffer(self.vbo_instance_transforms, \"16f4/i\", \"instance_transform\")\n\n # Compute shape of 2D texture.\n shape = (min(self.faces.shape[0], 8192), (self.faces.shape[0] + 8191) // 8192)\n self.face_colors_texture = ctx.texture(shape, 4, dtype=\"f4\")\n if self.face_colors is not None:\n # Write texture left justifying the buffer to fill the last row of the texture.\n self.face_colors_texture.write(\n self.current_face_colors.astype(\"f4\").tobytes().ljust(shape[0] * shape[1] * 16)\n )\n\n if self.has_texture:\n img = self.texture_image\n if self.use_pickle_texture:\n self.texture = ctx.texture(img.shape[:2], img.shape[2], img.tobytes())\n else:\n self.texture = ctx.texture(img.size, 3, img.tobytes())\n self.texture_prog = get_smooth_lit_texturized_program(vs)\n self.vbo_uvs = ctx.buffer(self.uv_coords.astype(\"f4\").tobytes())\n self.vao.buffer(self.vbo_uvs, \"2f4\", \"in_uv\")\n\n @hooked\n def release(self):\n if self.is_renderable:\n self.vao.release()\n if self.has_texture:\n self.texture.release()\n\n def _use_program(self, camera, **kwargs):\n if self.has_texture and self.show_texture:\n prog = self.texture_prog\n prog[\"diffuse_texture\"] = 0\n self.texture.use(0)\n else:\n if self.face_colors is None:\n if self.flat_shading:\n prog = self.flat_prog\n else:\n prog = self.smooth_prog\n else:\n if self.flat_shading:\n prog = self.flat_face_prog\n else:\n prog = self.smooth_face_prog\n self.face_colors_texture.use(0)\n prog[\"face_colors\"] = 0\n prog[\"norm_coloring\"].value = self.norm_coloring\n\n prog[\"use_uniform_color\"] = self._use_uniform_color\n prog[\"uniform_color\"] = self.material.color\n prog[\"draw_edges\"].value = 1.0 if self.draw_edges else 0.0\n prog[\"win_size\"].value = kwargs[\"window_size\"]\n\n prog[\"clip_control\"].value = tuple(self.clip_control)\n prog[\"clip_value\"].value = tuple(self.clip_value)\n\n self.set_camera_matrices(prog, camera, **kwargs)\n set_lights_in_program(\n prog,\n kwargs[\"lights\"],\n kwargs[\"shadows_enabled\"],\n kwargs[\"ambient_strength\"],\n )\n set_material_properties(prog, self.material)\n self.receive_shadow(prog, **kwargs)\n return prog\n\n def render(self, camera, **kwargs):\n self._upload_buffers()\n prog = self._use_program(camera, **kwargs)\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_instances)\n\n def render_positions(self, prog):\n if self.is_renderable:\n self._upload_buffers()\n\n prog[\"clip_control\"].value = tuple(self.clip_control)\n prog[\"clip_value\"].value = tuple(self.clip_value)\n\n self.vao.render(prog, moderngl.TRIANGLES, instances=self.n_instances)\n\n def _show_normals(self):\n \"\"\"Create and add normals at runtime\"\"\"\n vn = self.vertex_normals\n\n bounds = self.bounds\n diag = np.linalg.norm(bounds[:, 0] - bounds[:, 1])\n\n length = 0.005 * max(diag, 1) / self.scale\n vn = vn / np.linalg.norm(vn, axis=-1, keepdims=True) * length\n\n # Must import here because if we do it at the top we create a circular dependency.\n from aitviewer.renderables.arrows import Arrows\n\n positions = self.vertices\n self.normals_r = Arrows(\n positions,\n positions + vn,\n r_base=length / 10,\n r_head=2 * length / 10,\n p=0.25,\n name=\"Normals\",\n )\n self.normals_r.current_frame_id = self.current_frame_id\n self.add(self.normals_r)\n\n def gui(self, imgui):\n super(Meshes, self).gui(imgui)\n\n _, self.show_texture = imgui.checkbox(\n \"Render Texture##render_texture{}\".format(self.unique_name),\n self.show_texture,\n )\n _, self.norm_coloring = imgui.checkbox(\n \"Norm Coloring##norm_coloring{}\".format(self.unique_name),\n self.norm_coloring,\n )\n _, self.flat_shading = imgui.checkbox(\n \"Flat shading [F]##flat_shading{}\".format(self.unique_name),\n self.flat_shading,\n )\n _, self.draw_edges = imgui.checkbox(\"Draw edges [E]##draw_edges{}\".format(self.unique_name), self.draw_edges)\n _, self.draw_outline = imgui.checkbox(\n \"Draw outline##draw_outline{}\".format(self.unique_name), self.draw_outline\n )\n\n if self.normals_r is None:\n if imgui.button(\"Show Normals ##show_normals{}\".format(self.unique_name)):\n self._show_normals()\n\n def gui_context_menu(self, imgui, x: int, y: int):\n _, self.flat_shading = imgui.menu_item(\"Flat shading\", \"F\", selected=self.flat_shading, enabled=True)\n _, self.draw_edges = imgui.menu_item(\"Draw edges\", \"E\", selected=self.draw_edges, enabled=True)\n _, self.draw_outline = imgui.menu_item(\"Draw outline\", selected=self.draw_outline)\n\n imgui.spacing()\n imgui.separator()\n imgui.spacing()\n super().gui_context_menu(imgui, x, y)\n\n def gui_io(self, imgui):\n if imgui.button(\"Export OBJ##export_{}\".format(self.unique_name)):\n mesh = trimesh.Trimesh(vertices=self.current_vertices, faces=self.faces, process=False)\n mesh.export(\"../export/\" + self.name + \".obj\")\n\n def key_event(self, key, wnd_keys):\n if key == wnd_keys.F:\n self.flat_shading = not self.flat_shading\n elif key == wnd_keys.E:\n self.draw_edges = not self.draw_edges\n\n def update_frames(self, vertices, frames):\n self.vertices[frames] = vertices\n self.redraw()\n\n def add_frames(self, vertices):\n if len(vertices.shape) == 2:\n vertices = vertices[np.newaxis]\n self.vertices = np.append(self.vertices, vertices, axis=0)\n self.n_frames = max(self.n_frames, self.vertices.shape[0])\n\n def remove_frames(self, frames):\n self.vertices = np.delete(self.vertices, frames, axis=0)\n self.redraw()\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n name = f\"{self.name}_{self.uid:03}\".replace(\" \", \"_\")\n usd_path = f\"{usd_path}/{name}\"\n\n mesh = usd.add_mesh(stage, usd_path, self.name, self.vertices, self.faces, self.get_local_transform())\n if self.has_texture and not self.use_pickle_texture:\n # UVs.\n a_uv = UsdGeom.PrimvarsAPI(mesh).CreatePrimvar(\n \"st\", Sdf.ValueTypeNames.TexCoord2fArray, UsdGeom.Tokens.faceVarying\n )\n a_uv.Set(time=1, value=self.uv_coords[self.faces.flatten()])\n\n if not directory:\n texture_path = os.path.abspath(self.texture_path)\n else:\n texture_path = usd.copy_texture(self.texture_path, name, directory)\n usd.add_texture(stage, mesh, usd_path, texture_path)\n else:\n # NOTE: Per vertex and per face colors using usd displayColor are not currently\n # loaded by Blender. This code path can be enabled once support is there.\n if False:\n a_colors = mesh.GetDisplayColorAttr()\n if self._face_colors is not None:\n # Per face colors.\n if self._face_colors.shape[0] == 1:\n a_colors.Set(self._face_colors[0, :, :3].astype(np.float32))\n else:\n for i in range(self.n_frames):\n a_colors.Set(time=i + 1, value=self._face_colors[i, :, :3].astype(np.float32))\n elif self._vertex_colors is not None:\n # Per vertex colors.\n if self._vertex_colors.shape[0] == 1:\n a_colors.Set(self._vertex_colors[0, :, :3].astype(np.float32))\n else:\n for i in range(self.n_frames):\n a_colors.Set(time=i + 1, value=self._vertex_colors[i, :, :3].astype(np.float32))\n else:\n # Uniform color.\n a_colors.Set(np.array(self.color, np.float32)[:3])\n else:\n usd.add_color(stage, mesh, usd_path, self.color[:3])\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "Node", "path": "aitviewer/scene/node.py", "snippet": "class Node(object):\n \"\"\"Interface for nodes.\"\"\"\n\n def __init__(\n self,\n name=None,\n icon=None,\n position=None,\n rotation=None,\n scale=1.0,\n color=(0.5, 0.5, 0.5, 1.0),\n material=None,\n is_selectable=True,\n gui_affine=True,\n gui_material=True,\n enabled_frames=None,\n n_frames=1,\n ):\n \"\"\"\n :param name: Name of the node\n :param icon: Custom Node Icon using custom Icon font\n :param position: Starting position in the format (X,Y,Z) or np array of positions with shape (F, 3)\n :param rotation: Starting rotation in rotation matrix representation (3,3) or np array of rotations with shape (F, 3, 3)\n :param scale: Starting scale (scalar) or np array of scale values with shape (F)\n :param color: (R,G,B,A) 0-1 formatted color value.\n :param material: Object material properties. The color specified in the material will override node color\n :param is_selectable: If True the node is selectable when clicked on, otherwise the parent node will be selected.\n :param gui_affine: If True the node will have transform controls (position, rotation, scale) in the GUI.\n :param gui_material: If True the node will have material controls in the GUI.\n :param enabled_frames: Numpy array of boolean values, the object will be enabled only in frames where the value is True,\n the number of ones in the mask must match the number of frames of the object.\n :param n_frames: How many frames this renderable has.\n \"\"\"\n # Transform & Animation\n position = np.zeros(3, dtype=np.float32) if position is None else np.array(position, dtype=np.float32)\n rotation = np.eye(3, dtype=np.float32) if rotation is None else np.array(rotation, dtype=np.float32)\n\n self._positions = position if len(position.shape) != 1 else position[np.newaxis]\n self._rotations = rotation if len(rotation.shape) != 2 else rotation[np.newaxis]\n self._scales = (scale if isinstance(scale, np.ndarray) else np.array([scale])).astype(np.float32)\n\n n_positions = self._positions.shape[0]\n n_rotations = self._rotations.shape[0]\n n_scales = self._scales.shape[0]\n\n if n_frames > 1:\n assert n_positions == 1 or n_frames == n_positions, (\n f\"Number of position frames\" f\" ({n_positions}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_rotations == 1 or n_frames == n_rotations, (\n f\"Number of rotations frames\" f\" ({n_rotations}) must be 1 or match number of Node frames {n_frames}\"\n )\n assert n_scales == 1 or n_frames == n_scales, (\n f\"Number of scales frames\" f\" ({n_scales}) must be 1 or match number of Node frames {n_frames}\"\n )\n else:\n n_frames = max(n_positions, n_rotations, n_scales)\n assert (\n (n_positions == 1 or n_positions == n_frames)\n and (n_rotations == 1 or n_rotations == n_frames)\n and (n_scales == 1 or n_scales == n_frames)\n ), (\n f\"Number of position\"\n f\"({n_positions}), rotation ({n_rotations}) and scale ({n_scales})\"\n \"frames must be 1 or match.\"\n )\n\n # Frames\n self._n_frames = n_frames\n self._current_frame_id = 0\n self.model_matrix = self.get_local_transform()\n self._enabled_frames = enabled_frames\n if self._enabled_frames is not None:\n assert np.count_nonzero(self._enabled_frames) == n_frames, (\n f\"Number of non-zero elements in enabled_frames\"\n f\" ({np.count_nonzero(self._enabled_frames)}) must match number of frames in sequence ({n_frames})\"\n )\n # Create an array that maps from the true frame id (counting also disabled frames) to the index of the\n # first existing frame in the sequence.\n self._enabled_frame_id = np.cumsum(self._enabled_frames) - 1\n\n # Stores the true frame id (counting also disabled frames) we use this to allow going\n # through both enabled and disabled frames from the GUI.\n self._internal_frame_id = 0\n\n # Material\n self.material = Material(color=color) if material is None else material\n\n # Renderable Attributes\n self.is_renderable = False\n self.backface_culling = True\n self.backface_fragmap = False\n self.draw_outline = False\n\n # Flags to enable rendering passes\n self.cast_shadow = False\n self.depth_prepass = False\n self.fragmap = False\n self.outline = False\n\n # Programs for render passes. Subclasses are responsible for setting these.\n self.depth_only_program = None # Required for depth_prepass and cast_shadow passes\n self.fragmap_program = None # Required for fragmap pass\n self.outline_program = None # Required for outline pass\n\n # GUI\n self.name = name if name is not None else type(self).__name__\n self.uid = C.next_gui_id()\n self.unique_name = self.name + \"{}\".format(self.uid)\n self.icon = icon if icon is not None else \"\\u0082\"\n self._enabled = True\n self._expanded = False\n self.gui_controls = {\n \"affine\": {\n \"fn\": self.gui_affine,\n \"icon\": \"\\u009b\",\n \"is_visible\": gui_affine,\n },\n \"material\": {\n \"fn\": self.gui_material,\n \"icon\": \"\\u0088\",\n \"is_visible\": gui_material,\n },\n \"animation\": {\n \"fn\": self.gui_animation,\n \"icon\": \"\\u0098\",\n \"is_visible\": (lambda: self._n_frames > 1)(),\n },\n \"io\": {\n \"fn\": self.gui_io,\n \"icon\": \"\\u009a\",\n \"is_visible\": (lambda: self.gui_io.__func__ is not Node.gui_io)(),\n },\n }\n self.gui_modes = {\"view\": {\"title\": \" View\", \"fn\": self.gui_mode_view, \"icon\": \"\\u0099\"}}\n self._selected_mode = \"view\"\n self._show_in_hierarchy = True\n self.is_selectable = is_selectable\n self.export_usd_enabled = True\n self.export_usd_expanded = True\n\n self.nodes: List[Node] = []\n self.parent: Node = None\n\n # Selected Mode\n @property\n def selected_mode(self):\n return self._selected_mode\n\n @selected_mode.setter\n def selected_mode(self, selected_mode):\n self._selected_mode = selected_mode\n\n # Transform\n @property\n def position(self):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n return self._positions[idx]\n\n @position.setter\n def position(self, position):\n idx = self.current_frame_id if self._positions.shape[0] > 1 else 0\n self._positions[idx] = np.array(position, dtype=np.float32).copy()\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def positions(self):\n return self._positions\n\n @positions.setter\n def positions(self, positions):\n self._positions = positions\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotation(self):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n return self._rotations[idx]\n\n @rotation.setter\n def rotation(self, rotation):\n idx = self.current_frame_id if self._rotations.shape[0] > 1 else 0\n self._rotations[idx] = rotation\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def rotations(self):\n return self._rotations\n\n @rotations.setter\n def rotations(self, rotations):\n self._rotations = rotations\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scale(self):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n return self._scales[idx]\n\n @scale.setter\n def scale(self, scale):\n idx = self.current_frame_id if self._scales.shape[0] > 1 else 0\n self._scales[idx] = scale\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @property\n def scales(self):\n return self._scales\n\n @scales.setter\n def scales(self, scales):\n self._scales = scales\n self.update_transform(None if self.parent is None else self.parent.model_matrix)\n\n @staticmethod\n @lru_cache()\n def _compute_transform(pos, rot, scale):\n rotation = np.eye(4)\n rotation[:3, :3] = np.array(rot)\n\n trans = np.eye(4)\n trans[:3, 3] = np.array(pos)\n\n scale = np.diag([scale, scale, scale, 1])\n\n return (trans @ rotation @ scale).astype(\"f4\")\n\n def get_local_transform(self):\n \"\"\"Construct local transform as a 4x4 matrix from this node's position, orientation and scale.\"\"\"\n return self._compute_transform(tuple(self.position), tuple(map(tuple, self.rotation)), self.scale)\n\n def update_transform(self, parent_transform=None):\n \"\"\"Update the model matrix of this node and all of its descendants.\"\"\"\n if parent_transform is None:\n self.model_matrix = self.get_local_transform()\n else:\n self.model_matrix = parent_transform.astype(\"f4\") @ self.get_local_transform()\n\n for n in self.nodes:\n n.update_transform(self.model_matrix)\n\n @property\n def color(self):\n return self.material.color\n\n @color.setter\n def color(self, color):\n self.material.color = color\n\n @property\n def bounds(self):\n \"\"\"The bounds in the format ((x_min, x_max), (y_min, y_max), (z_min, z_max))\"\"\"\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_bounds(self):\n return np.array([[0, 0], [0, 0], [0, 0]])\n\n @property\n def current_center(self):\n return self.current_bounds.mean(-1)\n\n @property\n def center(self):\n return self.bounds.mean(-1)\n\n def get_local_bounds(self, points):\n if len(points.shape) == 2 and points.shape[-1] == 3:\n points = points[np.newaxis]\n assert len(points.shape) == 3\n\n # Compute min and max coordinates of the bounding box ignoring NaNs.\n val = np.array(\n [\n [np.nanmin(points[:, :, 0]), np.nanmax(points[:, :, 0])],\n [np.nanmin(points[:, :, 1]), np.nanmax(points[:, :, 1])],\n [np.nanmin(points[:, :, 2]), np.nanmax(points[:, :, 2])],\n ]\n )\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n def get_bounds(self, points):\n val = self.get_local_bounds(points)\n\n # Transform bounding box with the model matrix.\n val = (self.model_matrix @ np.vstack((val, np.array([1.0, 1.0]))))[:3]\n\n # If any of the elements is NaN return an empty bounding box.\n if np.isnan(val).any():\n return np.array([[0, 0], [0, 0], [0, 0]])\n else:\n return val\n\n @property\n def n_frames(self):\n return self._n_frames\n\n @n_frames.setter\n def n_frames(self, n_frames):\n self._n_frames = n_frames\n\n def __len__(self):\n return self.n_frames\n\n @property\n def current_frame_id(self):\n return self._current_frame_id\n\n @current_frame_id.setter\n def current_frame_id(self, frame_id):\n # Check if the frame changed.\n last_frame_id = self._current_frame_id if self._enabled_frames is None else self._internal_frame_id\n if self.n_frames == 1 or frame_id == last_frame_id:\n return\n\n self.on_before_frame_update()\n if self._enabled_frames is None:\n if frame_id < 0:\n self._current_frame_id = 0\n elif frame_id >= len(self):\n self._current_frame_id = len(self) - 1\n else:\n self._current_frame_id = frame_id\n else:\n # If an enabled_frames is present use it to get the current frame.\n if frame_id < 0:\n self._internal_frame_id = 0\n elif frame_id >= self._enabled_frames.shape[0]:\n self._internal_frame_id = self._enabled_frames.shape[0] - 1\n else:\n self._internal_frame_id = frame_id\n self._current_frame_id = self._enabled_frame_id[self._internal_frame_id]\n # Update enabled using the mask.\n self.enabled = self._enabled_frames[self._internal_frame_id]\n\n # Update frame id of all children nodes.\n for n in self.nodes:\n n.current_frame_id = self._current_frame_id\n\n self.on_frame_update()\n if self.parent and (self._positions.shape[0] > 1 or self._rotations.shape[0] > 1 or self._scales.shape[0] > 1):\n self.update_transform(self.parent.model_matrix)\n\n def next_frame(self):\n self.current_frame_id = self.current_frame_id + 1 if self.current_frame_id < len(self) - 1 else 0\n\n def previous_frame(self):\n self.current_frame_id = self.current_frame_id - 1 if self.current_frame_id > 0 else len(self) - 1\n\n def on_before_frame_update(self):\n \"\"\"Called when the current frame is about to change, 'self.current_frame_id' still has the id of the\n previous frame.\"\"\"\n pass\n\n def on_frame_update(self):\n \"\"\"Called when the current frame is changed.\"\"\"\n pass\n\n def add(self, *nodes, **kwargs):\n self._add_nodes(*nodes, **kwargs)\n\n def _add_node(self, n: \"Node\", show_in_hierarchy=True, expanded=False, enabled=True):\n \"\"\"\n Add a single node\n :param show_in_hierarchy: Whether to show the node in the scene hierarchy.\n :param expanded: Whether the node is initially expanded in the GUI.\n \"\"\"\n if n is None:\n return\n n._show_in_hierarchy = show_in_hierarchy\n n._expanded = expanded\n n._enabled = enabled if n._enabled_frames is None else n._enabled_frames[n.current_frame_id]\n self.nodes.append(n)\n n.parent = self\n n.update_transform(self.model_matrix)\n\n def _add_nodes(self, *nodes, **kwargs):\n \"\"\"Add multiple nodes\"\"\"\n for n in nodes:\n self._add_node(n, **kwargs)\n\n def remove(self, *nodes):\n for n in nodes:\n n.release()\n try:\n self.nodes.remove(n)\n except:\n pass\n\n @property\n def show_in_hierarchy(self):\n return self._show_in_hierarchy\n\n @property\n def enabled(self):\n return self._enabled\n\n @enabled.setter\n def enabled(self, enabled):\n self._enabled = enabled\n\n @property\n def expanded(self):\n return self._expanded\n\n @expanded.setter\n def expanded(self, expanded):\n self._expanded = expanded\n\n def is_transparent(self):\n \"\"\"\n Returns true if the object is transparent and should thus be sorted when rendering.\n Subclassess that use a different color should implement this method to be rendered correctly when transparent.\n \"\"\"\n return self.material.color[3] < 1.0\n\n def gui(self, imgui):\n \"\"\"\n Render GUI for custom node properties and controls. Implementation optional.\n Elements rendered here will show up in the scene hierarchy\n :param imgui: imgui context.\n See https://pyimgui.readthedocs.io/en/latest/reference/imgui.core.html for available elements to render\n \"\"\"\n pass\n\n def gui_modes(self, imgui):\n \"\"\"Render GUI with toolbar (tools) for this particular node\"\"\"\n\n def gui_animation(self, imgui):\n \"\"\"Render GUI for animation related settings\"\"\"\n\n if self._enabled_frames is None:\n if self.n_frames > 1:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self.current_frame_id,\n min_value=0,\n max_value=self.n_frames - 1,\n )\n if u:\n self.current_frame_id = fid\n else:\n u, fid = imgui.slider_int(\n \"Frame##r_{}\".format(self.unique_name),\n self._internal_frame_id,\n min_value=0,\n max_value=self._enabled_frames.shape[0] - 1,\n )\n if u:\n self.current_frame_id = fid\n\n def gui_affine(self, imgui):\n \"\"\"Render GUI for affine transformations\"\"\"\n # Position controls\n up, pos = imgui.drag_float3(\n \"Position##pos{}\".format(self.unique_name),\n *self.position,\n 1e-2,\n format=\"%.2f\",\n )\n if up:\n self.position = pos\n\n # Rotation controls\n euler_angles = rot2euler_numpy(self.rotation[np.newaxis], degrees=True)[0]\n ur, euler_angles = imgui.drag_float3(\n \"Rotation##pos{}\".format(self.unique_name),\n *euler_angles,\n 1e-2,\n format=\"%.2f\",\n )\n if ur:\n self.rotation = euler2rot_numpy(np.array(euler_angles)[np.newaxis], degrees=True)[0]\n\n # Scale controls\n us, scale = imgui.drag_float(\n \"Scale##scale{}\".format(self.unique_name),\n self.scale,\n 1e-2,\n min_value=0.001,\n max_value=100.0,\n format=\"%.3f\",\n )\n if us:\n self.scale = scale\n\n def gui_material(self, imgui):\n \"\"\"Render GUI with material properties\"\"\"\n\n # Color Control\n uc, color = imgui.color_edit4(\"Color##color{}'\".format(self.unique_name), *self.material.color)\n if uc:\n self.color = color\n\n # Diffuse\n ud, diffuse = imgui.slider_float(\n \"Diffuse##diffuse{}\".format(self.unique_name),\n self.material.diffuse,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ud:\n self.material.diffuse = diffuse\n\n # Ambient\n ua, ambient = imgui.slider_float(\n \"Ambient##ambient{}\".format(self.unique_name),\n self.material.ambient,\n 0.0,\n 1.0,\n \"%.2f\",\n )\n if ua:\n self.material.ambient = ambient\n\n def gui_io(self, imgui):\n \"\"\"Render GUI for import/export\"\"\"\n pass\n\n def gui_mode_view(self, imgui):\n \"\"\"Render custom GUI for view mode\"\"\"\n pass\n\n def gui_context_menu(self, imgui, x: int, y: int):\n _, self.enabled = imgui.checkbox(\"Enabled\", self.enabled)\n if any([n._show_in_hierarchy for n in self.nodes]):\n imgui.spacing()\n imgui.separator()\n imgui.spacing()\n for n in self.nodes:\n if not n._show_in_hierarchy:\n continue\n if imgui.begin_menu(f\"{n.name}##{n.uid}\"):\n n.gui_context_menu(imgui, x, y)\n imgui.end_menu()\n\n # Renderable\n @staticmethod\n def once(func):\n def _decorator(self, *args, **kwargs):\n if self.is_renderable:\n return\n else:\n func(self, *args, **kwargs)\n self.is_renderable = True\n\n return _decorator\n\n def make_renderable(self, ctx):\n \"\"\"\n Prepares this object for rendering. This function must be called before `render` is used.\n :param ctx: The moderngl context.\n \"\"\"\n pass\n\n def render(self, camera, position=None, rotation=None, **kwargs):\n \"\"\"Render the current frame in this sequence.\"\"\"\n pass\n\n def render_positions(self, prog):\n \"\"\"\n Render with a VAO with only positions bound, used for shadow mapping, fragmap and depth prepass.\n \"\"\"\n pass\n\n def redraw(self, **kwargs):\n \"\"\"Perform update and redraw operations. Push to the GPU when finished. Recursively redraw child nodes\"\"\"\n for n in self.nodes:\n n.redraw(**kwargs)\n\n def set_camera_matrices(self, prog, camera, **kwargs):\n \"\"\"Set the model view projection matrix in the given program.\"\"\"\n # Transpose because np is row-major but OpenGL expects column-major.\n prog[\"model_matrix\"].write(self.model_matrix.T.astype(\"f4\").tobytes())\n prog[\"view_projection_matrix\"].write(camera.get_view_projection_matrix().T.astype(\"f4\").tobytes())\n\n def receive_shadow(self, program, **kwargs):\n \"\"\"\n Call this function if the renderable is to receive shadows.\n :param program: The shader program that can shade with shadows.\n :param kwargs: The render kwargs.\n \"\"\"\n if kwargs.get(\"shadows_enabled\", False):\n lights = kwargs[\"lights\"]\n\n for i, light in enumerate(lights):\n if light.shadow_enabled and light.shadow_map:\n light_matrix = light.mvp() @ self.model_matrix\n program[f\"dirLights[{i}].matrix\"].write(light_matrix.T.tobytes())\n\n # Bind shadowmap to slot i + 1, we reserve slot 0 for the mesh texture\n # and use slots 1 to (#lights + 1) for shadow maps\n light.shadow_map.use(location=i + 1)\n\n # Set sampler uniforms\n uniform = program[f\"shadow_maps\"]\n uniform.value = 1 if uniform.array_length == 1 else [*range(1, len(lights) + 1)]\n\n def render_shadowmap(self, light_matrix):\n if not self.cast_shadow or self.depth_only_program is None or self.color[3] == 0.0:\n return\n\n prog = self.depth_only_program\n prog[\"model_matrix\"].write(self.model_matrix.T.tobytes())\n prog[\"view_projection_matrix\"].write(light_matrix.T.tobytes())\n\n self.render_positions(prog)\n\n def render_fragmap(self, ctx, camera, uid=None):\n if not self.fragmap or self.fragmap_program is None:\n return\n\n # Transpose because np is row-major but OpenGL expects column-major.\n prog = self.fragmap_program\n self.set_camera_matrices(prog, camera)\n\n # Render with the specified object uid, if None use the node uid instead.\n prog[\"obj_id\"] = uid or self.uid\n\n if self.backface_culling or self.backface_fragmap:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n\n # If backface_fragmap is enabled for this node only render backfaces\n if self.backface_fragmap:\n ctx.cull_face = \"front\"\n\n self.render_positions(prog)\n\n # Restore cull face to back\n if self.backface_fragmap:\n ctx.cull_face = \"back\"\n\n def render_depth_prepass(self, camera, **kwargs):\n if not self.depth_prepass or self.depth_only_program is None:\n return\n\n prog = self.depth_only_program\n self.set_camera_matrices(prog, camera)\n self.render_positions(prog)\n\n def render_outline(self, ctx, camera):\n if self.outline and self.outline_program is not None:\n prog = self.outline_program\n self.set_camera_matrices(prog, camera)\n\n if self.backface_culling:\n ctx.enable(moderngl.CULL_FACE)\n else:\n ctx.disable(moderngl.CULL_FACE)\n self.render_positions(prog)\n\n # Render children node recursively.\n for n in self.nodes:\n n.render_outline(ctx, camera)\n\n def release(self):\n \"\"\"\n Release all OpenGL resources used by this node and any of its children. Subclasses that instantiate OpenGL\n objects should implement this method with '@hooked' to avoid leaking resources.\n \"\"\"\n for n in self.nodes:\n n.release()\n\n def on_selection(self, node, instance_id, tri_id):\n \"\"\"\n Called when the node is selected\n\n :param node: the node which was clicked (can be None if the selection wasn't a mouse event)\n :param instance_id: the id of the instance that was clicked, 0 if the object is not instanced\n (can be None if the selection wasn't a mouse event)\n :param tri_id: the id of the triangle that was clicked from the 'node' mesh\n (can be None if the selection wasn't a mouse event)\n \"\"\"\n pass\n\n def key_event(self, key, wnd_keys):\n \"\"\"\n Handle shortcut key presses (if you are the selected object)\n \"\"\"\n pass\n\n def update_frames(self, *args, **kwargs):\n pass\n\n def add_frames(self, *args, **kwargs):\n pass\n\n def remove_frames(self, *args, **kwargs):\n pass\n\n def _export_usd_recursively(self, stage, usd_path, directory, verbose):\n if verbose:\n print(usd_path)\n for n in self.nodes:\n if n.export_usd_enabled:\n n.export_usd(stage, usd_path, directory, verbose)\n\n def export_usd(self, stage, usd_path: str, directory: str = None, verbose=False):\n \"\"\"\n Export the node into an USD file. Nodes that implement this method should use\n recursively call this for every children that should also be exported.\n\n :param stage: an object of type Usd.Stage into which to export the node\n :param usd_path: the path of the parent object in the USD file scene hierarchy.\n \"\"\"\n from pxr import Gf, UsdGeom\n\n usd_path = f\"{usd_path}/{self.name.replace(' ', '_')}_{self.uid:03}\"\n\n # Transform.\n xform = UsdGeom.Xform.Define(stage, usd_path)\n a_xform = xform.AddTransformOp()\n a_xform.Set(Gf.Matrix4d(self.get_local_transform().astype(np.float64).T))\n\n self._export_usd_recursively(stage, usd_path, directory, verbose)" }, { "identifier": "hooked", "path": "aitviewer/utils/decorators.py", "snippet": "class hooked:\n def __init__(self, fn):\n self.fn = fn\n\n def __set_name__(self, owner, name):\n func = self.fn\n\n def _decorator(self, *args, **kwargs):\n super_obj = super(owner, self)\n super_fn = getattr(super_obj, func.__name__)\n super_fn(*args, **kwargs)\n return func(self, *args, **kwargs)\n\n setattr(owner, name, _decorator)\n\n def __call__(self):\n assert (\n False\n ), \"@hooked decorator object should never be called directly. This can happen if you apply this decorator to a function that is not a method.\"" } ]
import numpy as np from skimage import measure from aitviewer.renderables.bounding_boxes import BoundingBoxes from aitviewer.renderables.lines import Lines from aitviewer.renderables.meshes import Meshes from aitviewer.scene.node import Node from aitviewer.utils.decorators import hooked
19,602
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class SDF(Node): """ Renderable that can be used to draw level sets of a dense SDF volume meshed using marching cubes. This renderable internally uses the marching cubes algorithm from skimage. For a faster marching cubes implementation see the Volume renderable. """ def __init__( self, volume, size=(1, 1, 1), level=0.0, color=(0.7, 0.7, 0.7, 1.0), level_sets=None, level_set_colors=None, mc_step_size=1, **kwargs, ): """Initializer. :param volume: np array of shape (X, Y, Z) of signed distance values :param size: size of the volume in local units. :param level: the level set used for the main mesh. :param color: color of the main mesh. :param level_sets: a list or array of additional level set values to display. :param level_set_colors: a list or array of shape (L, 4) of the same length as the level_set parameter with colors to use for the additional level sets. :param mc_step_size: step size used for marching cubes. :param **kwargs: arguments forwarded to the Node constructor. """ assert len(volume.shape) == 3 and len(size) == 3 kwargs["gui_material"] = False super().__init__(**kwargs) self.volume = volume self.size = np.array((size), np.float32) # Mesh. verts, faces, normals, _ = measure.marching_cubes( volume, level, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) self.mesh = Meshes(verts, faces, vertex_normals=-normals, color=color, name="Mesh") # Level sets. self.level_sets: list[Meshes] = [] if level_sets is not None: if level_set_colors is not None: assert len(level_sets) == len(level_set_colors) for i, s in enumerate(level_sets): verts, faces, normals, _ = measure.marching_cubes( volume, s, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) shell = Meshes(verts, faces, vertex_normals=-normals, name=f"Level {s:.03f}", cast_shadow=False) if level_set_colors is not None: shell.color = tuple(level_set_colors[i]) shell.clip_control = np.array((1, 1, 1)) shell.clip_value = self.size.copy() shell.backface_culling = False self.level_sets.append(shell) # Bounding box. self.bounding_box = BoundingBoxes.from_min_max_diagonal( np.array([[0.0, 0.0, 0.0]]), np.array([self.size], dtype=np.float32), color=(0, 0, 0, 1), name="Bounding Box", gui_affine=False, ) # Clip plane lines. self.clip_lines = [] for i, axis in enumerate(["X", "Y", "Z"]): s0 = self.size[(i + 0) % 3] s1 = self.size[(i + 1) % 3] s2 = self.size[(i + 2) % 3] lines = np.array( ( [s0, 0, 0], [s0, s1, 0], [s0, s1, s2], [s0, 0, s2], [s0, 0, 0], ), dtype=np.float32, ) lines = np.roll(lines, axis=1, shift=(0, i)) color = np.array([0, 0, 0, 1]) color[i] = 1
# Copyright (C) 2023 ETH Zurich, Manuel Kaufmann, Velko Vechev, Dario Mylonopoulos class SDF(Node): """ Renderable that can be used to draw level sets of a dense SDF volume meshed using marching cubes. This renderable internally uses the marching cubes algorithm from skimage. For a faster marching cubes implementation see the Volume renderable. """ def __init__( self, volume, size=(1, 1, 1), level=0.0, color=(0.7, 0.7, 0.7, 1.0), level_sets=None, level_set_colors=None, mc_step_size=1, **kwargs, ): """Initializer. :param volume: np array of shape (X, Y, Z) of signed distance values :param size: size of the volume in local units. :param level: the level set used for the main mesh. :param color: color of the main mesh. :param level_sets: a list or array of additional level set values to display. :param level_set_colors: a list or array of shape (L, 4) of the same length as the level_set parameter with colors to use for the additional level sets. :param mc_step_size: step size used for marching cubes. :param **kwargs: arguments forwarded to the Node constructor. """ assert len(volume.shape) == 3 and len(size) == 3 kwargs["gui_material"] = False super().__init__(**kwargs) self.volume = volume self.size = np.array((size), np.float32) # Mesh. verts, faces, normals, _ = measure.marching_cubes( volume, level, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) self.mesh = Meshes(verts, faces, vertex_normals=-normals, color=color, name="Mesh") # Level sets. self.level_sets: list[Meshes] = [] if level_sets is not None: if level_set_colors is not None: assert len(level_sets) == len(level_set_colors) for i, s in enumerate(level_sets): verts, faces, normals, _ = measure.marching_cubes( volume, s, spacing=self.size / (np.array(self.volume.shape) - 1.0), step_size=mc_step_size ) shell = Meshes(verts, faces, vertex_normals=-normals, name=f"Level {s:.03f}", cast_shadow=False) if level_set_colors is not None: shell.color = tuple(level_set_colors[i]) shell.clip_control = np.array((1, 1, 1)) shell.clip_value = self.size.copy() shell.backface_culling = False self.level_sets.append(shell) # Bounding box. self.bounding_box = BoundingBoxes.from_min_max_diagonal( np.array([[0.0, 0.0, 0.0]]), np.array([self.size], dtype=np.float32), color=(0, 0, 0, 1), name="Bounding Box", gui_affine=False, ) # Clip plane lines. self.clip_lines = [] for i, axis in enumerate(["X", "Y", "Z"]): s0 = self.size[(i + 0) % 3] s1 = self.size[(i + 1) % 3] s2 = self.size[(i + 2) % 3] lines = np.array( ( [s0, 0, 0], [s0, s1, 0], [s0, s1, s2], [s0, 0, s2], [s0, 0, 0], ), dtype=np.float32, ) lines = np.roll(lines, axis=1, shift=(0, i)) color = np.array([0, 0, 0, 1]) color[i] = 1
self.clip_lines.append(Lines(lines, cast_shadow=False, color=color, name=f"Clip {axis}", gui_affine=False))
1
2023-12-07 16:13:50+00:00
24k
nexB/dejacode
dje/admin.py
[ { "identifier": "IS_FILTER_LOOKUP_VAR", "path": "dje/filters.py", "snippet": "IS_FILTER_LOOKUP_VAR = \"_filter_lookup\"" }, { "identifier": "CreatedByListFilter", "path": "dje/filters.py", "snippet": "class CreatedByListFilter(filters.SimpleListFilter):\n \"\"\"\n Filter by the user who created an object according to the objects's\n associated History. The value from LimitToDataspaceListFilter is\n used for the Dataspace if it exists.\n \"\"\"\n\n title = _(\"created by\")\n parameter_name = \"created_by\"\n\n def __init__(self, request, params, model, model_admin):\n self.model_content_type = ContentType.objects.get_for_model(model)\n super().__init__(request, params, model, model_admin)\n\n def lookups(self, request, model_admin):\n dataspace_id = request.GET.get(DataspaceFilter.parameter_name, request.user.dataspace.id)\n # Find users of the selected Dataspace\n users = get_user_model().objects.scope_by_id(dataspace_id)\n\n # Find users who have created at least 1 object\n users = (\n users.filter(\n history__action_flag=History.ADDITION,\n history__content_type=self.model_content_type,\n history__object_dataspace__id=dataspace_id,\n )\n .distinct()\n .order_by(\"last_name\")\n )\n return [(user.pk, user.get_full_name()) for user in users]\n\n def queryset(self, request, queryset):\n if self.value() is not None:\n user_pk = self.value()\n history_entries = History.objects.filter(\n content_type=self.model_content_type, action_flag=History.ADDITION, user__pk=user_pk\n )\n pks = list(history_entries.values_list(\"object_id\", flat=True))\n return queryset.filter(pk__in=pks)" }, { "identifier": "DataspaceFilter", "path": "dje/filters.py", "snippet": "class DataspaceFilter(ChoicesOnlyListFilterMixin, BaseDataspaceLookupsFilter):\n \"\"\"\n Scope the ChangeList results by a Dataspace.\n Default is the current User Dataspace.\n Anyone can look into reference Dataspace.\n Only Reference User can look into other Dataspaces.\n \"\"\"\n\n title = _(\"dataspace\")\n parameter_name = \"dataspace__id__exact\"\n\n def lookups(self, request, model_admin):\n \"\"\"Set the lookup value for the current user dataspace choice to None.\"\"\"\n lookups = super().lookups(request, model_admin)\n return [(None if name == request.user.dataspace.name else pk, name) for pk, name in lookups]\n\n def queryset(self, request, queryset):\n if self.value():\n return queryset.scope_by_id(self.value())\n return queryset.scope(request.user.dataspace)" }, { "identifier": "HistoryCreatedActionTimeListFilter", "path": "dje/filters.py", "snippet": "class HistoryCreatedActionTimeListFilter(HistoryActionTimeListFilter):\n title = _(\"created date\")\n parameter_name = \"created_date\"\n action_flag = History.ADDITION" }, { "identifier": "HistoryModifiedActionTimeListFilter", "path": "dje/filters.py", "snippet": "class HistoryModifiedActionTimeListFilter(HistoryActionTimeListFilter):\n title = _(\"modified date\")\n parameter_name = \"modified_date\"\n action_flag = History.CHANGE" }, { "identifier": "LimitToDataspaceListFilter", "path": "dje/filters.py", "snippet": "class LimitToDataspaceListFilter(filters.RelatedFieldListFilter):\n \"\"\"\n Limit the choices of a filter on a FK to the currently \"filtered\" Dataspace.\n The limit_choices_to declared on the model field will be applied too.\n \"\"\"\n\n def __init__(self, field, request, params, model, model_admin, field_path):\n super().__init__(field, request, params, model, model_admin, field_path)\n\n # The get_limit_choices_to_from_path is broken in 1.7, see code in 1.6\n # limit_choices_to = get_limit_choices_to_from_path(model, field_path)\n limit_choices_to = models.Q(**field.get_limit_choices_to())\n\n dataspace_id = request.GET.get(DataspaceFilter.parameter_name, request.user.dataspace_id)\n queryset = field.related_model.objects.scope_by_id(dataspace_id).filter(limit_choices_to)\n\n if field.related_model.__name__ == \"UsagePolicy\":\n content_type = ContentType.objects.get_for_model(model)\n queryset = queryset.filter(content_type=content_type)\n\n self.lookup_choices = [(x._get_pk_val(), str(x)) for x in queryset]" }, { "identifier": "MissingInFilter", "path": "dje/filters.py", "snippet": "class MissingInFilter(BaseDataspaceLookupsFilter):\n \"\"\"\n Filter by objects missing in the given dataspace, compared with the\n current `DataspaceFilter.parameter_name` or user dataspace.\n Both values for reference and target Dataspace are validated against the\n self.lookup_choices to make sure the user has the proper access permissions.\n This filter is only available to superusers, this is enforced in\n DataspacedAdmin.get_list_filter()\n \"\"\"\n\n title = _(\"missing in\")\n parameter_name = \"missing_in\"\n\n def queryset(self, request, queryset):\n if not self.value():\n return\n\n valid_choices = [str(choice) for choice, _ in self.lookup_choices]\n if str(self.value()) not in valid_choices:\n raise IncorrectLookupParameters()\n\n return queryset.exclude(uuid__in=get_uuids_list_sorted(self.value(), queryset.model))" }, { "identifier": "DataspaceAdminForm", "path": "dje/forms.py", "snippet": "class DataspaceAdminForm(forms.ModelForm):\n def clean(self):\n \"\"\"Add validation for `update_packages_from_scan` field.\"\"\"\n cleaned_data = super().clean()\n enable_package_scanning = cleaned_data.get(\"enable_package_scanning\")\n update_packages_from_scan = cleaned_data.get(\"update_packages_from_scan\")\n\n if update_packages_from_scan and not enable_package_scanning:\n msg = \"Package scanning needs to be enabled to use the automatic updates.\"\n self.add_error(\"update_packages_from_scan\", msg)" }, { "identifier": "DataspacedAdminForm", "path": "dje/forms.py", "snippet": "class DataspacedAdminForm(forms.ModelForm):\n \"\"\"\n Use as the base ModelForm for every Model declared as DataspacedModel.\n This is usually not required for inline Forms.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Injects the dataspace in the instance right after the form\n initialization as a value for dataspace is required for the\n built-in unique_together validation.\n \"\"\"\n super().__init__(*args, **kwargs)\n # request is not injected in inline forms\n request = getattr(self, \"request\", None)\n\n add = not kwargs.get(\"instance\")\n if add and request:\n self.instance.dataspace = request.user.dataspace\n\n def _get_validation_exclusions(self):\n \"\"\"\n Remove the `dataspace` and `uuid` fields from the exclusion (ie: validate on those)\n so the unique_together validation within dataspace is enforced.\n \"\"\"\n exclude = super()._get_validation_exclusions()\n return {field for field in exclude if field not in (\"dataspace\", \"uuid\")}" }, { "identifier": "DejaCodeAuthenticationForm", "path": "dje/forms.py", "snippet": "class DejaCodeAuthenticationForm(AuthenticationForm):\n \"\"\"Login form.\"\"\"\n\n use_required_attribute = False\n\n @property\n def helper(self):\n helper = FormHelper()\n helper.form_id = \"sign-in\"\n helper.form_action = \"login\"\n helper.form_method = \"post\"\n helper.form_tag = False\n\n fields = [\n Field(\"username\", css_class=\"input-block-level mb-3\", placeholder=_(\"Username\")),\n Field(\"password\", css_class=\"input-block-level mb-3\", placeholder=_(\"Password\")),\n Div(\n StrictSubmit(\"submit\", _(\"Sign in\"), css_class=\"btn-warning\"),\n css_class=\"d-grid\",\n ),\n ]\n\n helper.add_layout(Layout(Fieldset(\"\", *fields)))\n return helper\n\n def get_invalid_login_error(self):\n username = self.cleaned_data.get(\"username\")\n if \"@\" in username:\n return ValidationError(\n \"Be sure to enter your DejaCode username rather than your email \"\n \"address to sign in to DejaCode.\"\n )\n return super().get_invalid_login_error()" }, { "identifier": "import_view", "path": "dje/importers.py", "snippet": "@login_required()\ndef import_view(request, importer_class):\n user = request.user\n importer = importer_class(user)\n upload_form_class = ImportableUploadFileForm\n if importer_class.__name__ == \"PackageImporter\":\n upload_form_class = PackageImportableUploadFileForm\n\n opts = importer.model_form._meta.model._meta\n perm_codename = get_permission_codename(\"add\", opts)\n if not user.has_perm(f\"{opts.app_label}.{perm_codename}\"):\n return HttpResponseRedirect(resolve_url(settings.LOGIN_REDIRECT_URL))\n\n if request.GET.get(\"get_template\"): # ?get_template=1\n header = \",\".join(importer.required_fields + importer.supported_fields)\n filename = \"{}_import_template.csv\".format(importer.verbose_name.replace(\" \", \"_\"))\n response = HttpResponse(header, content_type=\"application/csv\")\n response[\"Content-Disposition\"] = f'attachment; filename=\"{filename}\"'\n return response\n\n file_form = None\n if request.method == \"POST\":\n if \"form-TOTAL_FORMS\" in request.POST:\n importer = importer_class(user, formset_data=request.POST)\n importer.save_all()\n else:\n # Every uploaded file are stored in a temp location as we removed\n # the MemoryFileUploadHandler in the FILE_UPLOAD_HANDLERS settings.\n # We do not keep track of this location once the file has been\n # processed.\n file_form = upload_form_class(request.POST, request.FILES)\n if file_form.is_valid():\n uploaded_file = request.FILES[\"file\"]\n file_location = uploaded_file.temporary_file_path()\n importer = importer_class(user, file_location=file_location)\n else:\n file_form = upload_form_class()\n\n return render(\n request,\n \"admin/object_import.html\",\n {\n \"file_form\": file_form,\n \"importer\": importer,\n \"add_to_product_form\": importer.get_add_to_product_form(request),\n },\n )" }, { "identifier": "AsURL", "path": "dje/list_display.py", "snippet": "class AsURL(ListDisplayItem):\n def to_representation(self, value):\n return urlize_target_blank(value)" }, { "identifier": "mass_update_action", "path": "dje/mass_update.py", "snippet": "def mass_update_action(modeladmin, request, queryset):\n if not queryset:\n return\n\n # Dataspace is required for scoping, we trust the queryset for security purpose\n # over the values provided in the request data.\n dataspace = queryset.first().dataspace\n opts = modeladmin.model._meta\n preserved_filters = modeladmin.get_preserved_filters(request)\n\n # Allows to specified a custom mass update Form in the ModelAdmin\n mass_update_form = getattr(modeladmin, \"mass_update_form\", BaseMassUpdateForm)\n MassUpdateForm = modelform_factory(\n modeladmin.model,\n form=mass_update_form,\n exclude=get_protected_fields(modeladmin.model, request.user),\n formfield_callback=not_required,\n )\n MassUpdateForm.admin_site = modeladmin.admin_site # required by the ForeignKeyRawIdWidget\n\n if \"apply\" in request.POST:\n form = MassUpdateForm(request.POST, dataspace=dataspace)\n if form.is_valid():\n changelist_url = reverse(f\"admin:{opts.app_label}_{opts.model_name}_changelist\")\n redirect_url = add_preserved_filters(\n {\"preserved_filters\": preserved_filters, \"opts\": opts}, changelist_url\n )\n\n updated = 0\n errors = []\n for record in queryset:\n for field_name, value in form.cleaned_data.items():\n try:\n field_object = record._meta.get_field(field_name)\n except FieldDoesNotExist:\n continue # field_name is not part of the model\n\n # auto_created is only True on implicit m2m model\n if (\n field_object.many_to_many\n and not field_object.remote_field.through._meta.auto_created\n ):\n set_intermediate_explicit_m2m(record, field_object, value)\n else:\n setattr(record, field_name, value)\n # This have no impact if the model does not declare this field.\n record.last_modified_by = request.user\n # Some validation errors cannot be caught during the form validation as only\n # the \"new\" value of the selected fields are available.\n # We cannot validate those changes against the existing value of others fields on\n # the instance so the error can be raised at save() time.\n try:\n record.save()\n except ValidationError as e:\n errors.append(e.message)\n else:\n updated += 1\n\n if updated:\n messages.info(request, _(f\"Updated {updated} records\"))\n\n if errors:\n messages.error(request, _(f'{len(errors)} error(s): {\", \".join(errors)}'))\n\n action_end.send(\n sender=modeladmin.model,\n action=\"mass_update\",\n request=request,\n queryset=queryset,\n modeladmin=modeladmin,\n form=form,\n )\n return redirect(redirect_url)\n else:\n initial = {\n ACTION_CHECKBOX_NAME: request.POST.getlist(ACTION_CHECKBOX_NAME),\n \"select_across\": request.POST.get(\"select_across\") == \"1\",\n }\n form = MassUpdateForm(initial=initial, dataspace=dataspace)\n\n adminform = AdminForm(form, modeladmin.get_fieldsets(request), {}, [], model_admin=modeladmin)\n\n with suppress(AttributeError):\n form.extra_init(request, modeladmin)\n\n context = {\n \"adminform\": adminform,\n \"form\": form,\n \"opts\": opts,\n \"queryset\": queryset,\n \"preserved_filters\": preserved_filters,\n \"media\": modeladmin.media,\n }\n return render(request, \"admin/mass_update.html\", context)" }, { "identifier": "Dataspace", "path": "dje/models.py", "snippet": "class Dataspace(models.Model):\n \"\"\"\n The Dataspace is a way to keep data for each organization data\n separated and still store them in the same database, schema or table.\n Therefore the Dataspace is part of the primary key of most models\n and it part of a unicity constraint for these models.\n For a given installation there can be several Owner Org defined, but only\n one reference.\n\n This is an important concept used throughout DejaCode to\n separate the reference data provided by nexB from the data used in a given\n installation of DJE.\n\n It is essentially a notion of tenant in a DJE installation and is used to\n segregate org-specific and/or org-private records enabling both\n multi-tenancy as well as nexB-provided reference data and org-specific or\n customized data.\n\n This separation has several purposes such as allowing:\n * orderly and simpler data update from the nexB reference data and inter\n Dataspace data exchange\n * Dataspace specific data customizations (for instance license\n tags configurations or some preferences)\n * multi-tenancy where different organizations can share the same DJE\n instance\n \"\"\"\n\n uuid = models.UUIDField(\n _(\"UUID\"),\n default=uuid.uuid4,\n editable=False,\n unique=True,\n )\n\n name = models.SlugField(\n unique=True,\n max_length=20,\n help_text=_(\n 'Unique name of a Dataspace. The name \"nexB\" is reserved for '\n \"the creators/maintainers of the system software. Dataspace name \"\n \"only allows letters, numbers, underscores and hyphens.\"\n ),\n )\n\n homepage_url = models.URLField(\n _(\"Homepage URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"The homepage URL of the Dataspace owner.\"),\n )\n\n contact_info = models.CharField(\n _(\"Contact information\"),\n max_length=500,\n blank=True,\n help_text=_(\n \"A dedicated email address or URL for contacting the owner of \"\n \"the Dataspace. Can be used for Attribution Package generation.\"\n ),\n )\n\n notes = models.TextField(\n blank=True,\n help_text=_(\"Extended Notes about a Dataspace.\"),\n )\n\n show_license_profile_in_license_list_view = models.BooleanField(\n default=False,\n verbose_name=format_lazy(\n \"Show {license_profile} in license list view\",\n license_profile=_(\"license profile\"),\n ),\n help_text=format_lazy(\n \"When true (checked), include the {license_profile} column in the license list view.\",\n license_profile=_(\"license profile\"),\n ),\n )\n\n show_license_type_in_license_list_view = models.BooleanField(\n default=True,\n help_text=_(\n \"When true (checked), include the license type column in the license list view.\",\n ),\n )\n\n show_spdx_short_identifier_in_license_list_view = models.BooleanField(\n verbose_name=_(\"show SPDX short identifier in license list view\"),\n default=False,\n help_text=_(\n \"When true (checked), include the SPDX short identifier in the license list view.\",\n ),\n )\n\n show_usage_policy_in_user_views = models.BooleanField(\n default=True,\n help_text=_(\n \"When true (checked), include the usage policy in user views that \"\n \"show licenses or components.\",\n ),\n )\n\n show_type_in_component_list_view = models.BooleanField(\n default=False,\n help_text=_(\n \"When true (checked), include the type column in the component list view.\",\n ),\n )\n\n hide_empty_fields_in_component_details_view = models.BooleanField(\n default=False,\n help_text=_(\"When true (checked), hide empty fields in the component details view.\"),\n )\n\n set_usage_policy_on_new_component_from_licenses = models.BooleanField(\n _(\"set usage policy on component or package from license policy\"),\n default=False,\n help_text=_(\n \"When true (checked), the application will automatically assign a usage \"\n \"policy to a component or package when its license expression is set or \"\n \"updated when you create, import, edit, or copy that component or package, \"\n \"based on the associated policies that you have defined on the license policy.\"\n ),\n )\n\n logo_url = models.URLField(\n _(\"Logo URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\"URL to a Dataspace Logo. If set, it will be included in reports.\"),\n )\n\n full_name = models.CharField(\n max_length=100,\n blank=True,\n help_text=_(\n \"The full name of the Dataspace organization. \"\n \"Can be used for Attribution Package generation.\"\n ),\n )\n\n address = models.TextField(\n blank=True,\n help_text=(\n \"The address of the Dataspace organization. \"\n \"Can be used for Attribution Package generation.\"\n ),\n )\n\n open_source_information_url = models.URLField(\n _(\"Open Source Information URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\n \"A public URL where you publish information about the Dataspace \"\n \"organization's Open Source policies and procedures. \"\n \"Can be used for Attribution Package generation.\"\n ),\n )\n\n open_source_download_url = models.URLField(\n _(\"Open Source Download URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\n \"A public URL where you provide copies of Open Source software that \"\n \"require Redistribution when you use them in your products. Can be \"\n \"used for Attribution Package generation.\"\n ),\n )\n\n home_page_announcements = models.TextField(\n blank=True,\n help_text=_(\n \"Use this field to enter text to appear on the DejaCode home page, \"\n \"normally for the purpose of providing your user community with \"\n \"general-purpose announcements about using DejaCode. \"\n \"Note that you can include URL's in the text if you want to direct \"\n \"users to detailed instructions and announcements.\"\n ),\n )\n\n enable_package_scanning = models.BooleanField(\n default=False,\n help_text=_(\n 'When true (checked), allows a user to click the \"Scan Package\" button when viewing '\n \"a Package, initiating a call to ScanCode.io to scan the Package based on its URL. \"\n \"This setting also activates a DejaCode feature to submit any Package created using \"\n 'the \"Add Package\" button to ScanCode.io for scanning, and it activates the Scans '\n \"choice from the DejaCode Tools dropdown menu.\"\n ),\n )\n\n update_packages_from_scan = models.BooleanField(\n _(\"Update packages automatically from scan\"),\n default=False,\n help_text=_(\n \"When true (checked), enables an automatic DejaCode process to update \"\n \"selected Package fields (such as license expression, primary language, \"\n \"copyright, etc.) when a package scan is completed, depending on the \"\n \"quality of the scan results.\"\n ),\n )\n\n enable_purldb_access = models.BooleanField(\n _(\"Enable PurlDB access\"),\n default=False,\n help_text=_(\n \"When true (checked), enables user access to the PurlDB option from the Tools menu, \"\n \"which presents a list of PurlDB data mined and scanned automatically from multiple \"\n \"public sources. Users can view PurlDB details and can create DejaCode Package \"\n \"definitions using those details, and DejaCode also presents a new PurlDB tab when \"\n \"viewing the details of a Package with matching key values. This option also enhances \"\n \"the Global Search feature to extend the search scope beyond the standard DejaCode \"\n \"objects (Packages, Components, Licenses, Owners) and perform an asynchronous query of \"\n \"the PurlDB to find relevant data.\"\n ),\n )\n\n enable_vulnerablecodedb_access = models.BooleanField(\n _(\"Enable VulnerableCodeDB access\"),\n default=False,\n help_text=_(\n \"When true (checked), authorizes DejaCode to access the VulnerableCodeDB \"\n \"using a Package URL (purl) to determine if there are any reported \"\n \"vulnerabilities for a specific Package and return the Vulnerability ID \"\n \"and related URLs to a Vulnerabilities tab in the Package details user \"\n \"view.\"\n ),\n )\n\n objects = DataspaceManager()\n\n class Meta:\n ordering = [\"name\"]\n\n def __str__(self):\n return self.name\n\n def get_admin_url(self):\n opts = self._meta\n viewname = f\"admin:{opts.app_label}_{opts.model_name}_change\"\n return reverse(viewname, args=[self.pk])\n\n def natural_key(self):\n return (self.name,)\n\n @cached_property\n def is_reference(self):\n \"\"\"Return True if this Dataspace is the reference.\"\"\"\n reference = self.__class__._default_manager.get_reference()\n return True if reference and self == reference else False\n\n def get_configuration(self, field_name=None):\n \"\"\"\n Return the associated DataspaceConfiguration.\n If a `field_name` is provided, Return the value for that field from\n the `DataspaceConfiguration`.\n \"\"\"\n try:\n configuration = self.configuration\n except ObjectDoesNotExist:\n return\n\n if field_name:\n return getattr(configuration, field_name, None)\n return configuration\n\n @property\n def has_configuration(self):\n \"\"\"Return True if an associated DataspaceConfiguration instance exists.\"\"\"\n return bool(self.get_configuration())\n\n @property\n def tab_permissions_enabled(self):\n return bool(self.get_configuration(\"tab_permissions\"))" }, { "identifier": "DataspaceConfiguration", "path": "dje/models.py", "snippet": "class DataspaceConfiguration(models.Model):\n dataspace = models.OneToOneField(\n to=\"dje.Dataspace\",\n on_delete=models.CASCADE,\n related_name=\"configuration\",\n )\n\n tab_permissions = models.JSONField(\n blank=True,\n default=dict,\n )\n\n copy_defaults = models.JSONField(\n blank=True,\n null=True,\n )\n\n homepage_layout = models.ForeignKey(\n to=\"reporting.CardLayout\",\n null=True,\n blank=True,\n related_name=\"+\",\n on_delete=models.CASCADE,\n serialize=False,\n help_text=_(\n \"Select a general purpose Card layout that provides timely query \"\n \"results on the DejaCode homepage to your application users.\"\n ),\n )\n\n scancodeio_url = models.URLField(\n _(\"ScanCode.io URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\n \"Enter the URL of your organization's private ScanCode.io instance, \"\n \"if available. If not, DejaCode will use the public ScanCode.io instance \"\n \"to scan your Packages.\"\n ),\n )\n\n scancodeio_api_key = models.CharField(\n _(\"ScanCode.io API key\"),\n max_length=40,\n blank=True,\n help_text=_(\n \"If your organization's private ScanCode.io instance requires an API key \"\n \"for access, provide it here. Otherwise, you can leave this field empty.\"\n ),\n )\n\n vulnerablecode_url = models.URLField(\n _(\"VulnerableCode URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\n \"If your organization has a private VulnerableCode instance, enter its URL \"\n \"here. Otherwise, DejaCode will use the public VulnerableCode to check for \"\n \"vulnerabilities\"\n ),\n )\n\n vulnerablecode_api_key = models.CharField(\n _(\"VulnerableCode API key\"),\n max_length=40,\n blank=True,\n help_text=_(\n \"If your private VulnerableCode instance requires an API key for access, \"\n \"input it here. If not, you can leave this field blank.\"\n ),\n )\n\n purldb_url = models.URLField(\n _(\"PurlDB URL\"),\n max_length=1024,\n blank=True,\n help_text=_(\n \"Enter the URL of your organization's private PurlDB instance, if \"\n \"applicable. If not, DejaCode will utilize the public PurlDB to offer a \"\n \"database of Packages collected from public sources.\"\n ),\n )\n\n purldb_api_key = models.CharField(\n _(\"PurlDB API key\"),\n max_length=40,\n blank=True,\n help_text=_(\n \"If your organization's private PurlDB instance requires an API key for \"\n \"access, provide it here. If not, you can leave this field empty.\"\n ),\n )\n\n def __str__(self):\n return f\"Configuration for {self.dataspace}\"" }, { "identifier": "DejacodeUser", "path": "dje/models.py", "snippet": "class DejacodeUser(AbstractUser):\n uuid = models.UUIDField(\n _(\"UUID\"),\n default=uuid.uuid4,\n editable=False,\n unique=True,\n )\n\n dataspace = models.ForeignKey(\n to=\"dje.Dataspace\",\n on_delete=models.PROTECT,\n help_text=DATASPACE_FIELD_HELP_TEXT,\n )\n\n data_email_notification = models.BooleanField(\n default=False,\n help_text=_(\n \"Check this to send email notifications to the user regarding DejaCode data updates; \"\n \"note that the volume could be very large, so this option is normally enabled only \"\n \"during system setup and rollout to monitor application activity.\"\n ),\n )\n\n workflow_email_notification = models.BooleanField(\n default=False,\n help_text=_(\n \"Check this to send email notifications to the user for associated workflow requests; \"\n \"otherwise, request notifications alerts will appear in the DejaCode notifications \"\n \"form.\"\n ),\n )\n\n updates_email_notification = models.BooleanField(\n default=False,\n help_text=_(\n \"Check this to receive email notifications with updates on DejaCode \"\n \"features and news.\"\n ),\n )\n\n company = models.CharField(\n max_length=30,\n blank=True,\n help_text=_(\n \"The company the user is associated with. \"\n \"This can be submitted during the signup process.\"\n ),\n )\n\n last_api_access = models.DateTimeField(\n verbose_name=_(\"last API access\"),\n blank=True,\n null=True,\n )\n\n homepage_layout = models.ForeignKey(\n to=\"reporting.CardLayout\",\n null=True,\n blank=True,\n related_name=\"+\",\n on_delete=models.CASCADE,\n serialize=False,\n help_text=_(\n \"Select a Card layout that provides the query results on the \"\n \"DejaCode homepage that are useful and interesting to you.\"\n ),\n )\n\n objects = DejacodeUserManager()\n\n class Meta:\n ordering = [\"username\"]\n\n def save(self, *args, **kwargs):\n \"\"\"\n Send an email to the user when his password has been changed.\n\n The password can be changed from those locations:\n - Change password user view : /account/password_change/\n - Password reset user view: /account/password_reset/\n - Management command: ./manage.py changepassword\n - Model instance: set_password() + save()\n \"\"\"\n from dje.notification import send_password_changed_email\n\n # self._password will be set to None during a hasher upgrade as we do not\n # want to notify in that case, as it's not considered a password changes.\n # See AbstractBaseUser.check_password.setter\n password_changed = self._password is not None\n user_exists = bool(self.pk)\n\n super().save(*args, **kwargs)\n\n # Do not notify users that are setting their initial password during registration\n if password_changed and user_exists and self.last_login:\n send_password_changed_email(self)\n\n @property\n def last_active(self):\n activity_date_fields = [\n self.date_joined,\n self.last_login,\n self.last_api_access,\n ]\n return max([field for field in activity_date_fields if field])\n\n def get_group_names(self):\n \"\"\"\n Return the group names assigned to the User through the DB and LDAP\n authentication (when enabled).\n \"\"\"\n group_names_from_db = list(self.groups.values_list(\"name\", flat=True))\n\n ldap_user = getattr(self, \"ldap_user\", None)\n if ldap_user:\n return list(set(group_names_from_db).union(ldap_user.group_names))\n\n return group_names_from_db\n\n def get_homepage_layout(self):\n \"\"\"\n Return the User `homepage_layout`, from the this instance first,\n or fallback on the Dataspace layout, if set.\n \"\"\"\n return self.homepage_layout or self.dataspace.get_configuration(\"homepage_layout\")\n\n def email_user(self, subject, message, from_email=None, **kwargs):\n \"\"\"Wrap the method in a task.\"\"\"\n send_mail_task.delay(subject, message, from_email, [self.email], **kwargs)\n\n def regenerate_api_key(self):\n \"\"\"\n Regenerate the user API key.\n Since the `key` value is the primary key on the Token `model`,\n the old key needs to be deleted first, a new one is then created.\n \"\"\"\n self.auth_token.delete()\n Token.objects.create(user=self)\n\n def serialize_user_data(self):\n fields = [\n \"email\",\n \"first_name\",\n \"last_name\",\n \"username\",\n \"company\",\n \"last_login\",\n \"date_joined\",\n \"last_api_access\",\n \"last_active\",\n \"is_superuser\",\n \"is_staff\",\n \"is_active\",\n \"updates_email_notification\",\n \"dataspace\",\n ]\n\n return {\n field: str(value) for field in fields if (value := getattr(self, field)) is not None\n }\n\n def serialize_hook(self, hook):\n return {\n \"hook\": hook.dict(),\n **self.serialize_user_data(),\n }" }, { "identifier": "ExternalReference", "path": "dje/models.py", "snippet": "class ExternalReference(HistoryFieldsMixin, DataspacedModel):\n \"\"\"\n Maps DJE objects to external resources.\n One DJE object may have several ExternalReference when it's referenced on\n multiple sources.\n Also, there is no unicity collision possible as we use the object_id.\n\n The copy for GenericForeignKey field is not supported yet.\n \"\"\"\n\n # The following models should always inherit from ExternalReferenceMixin\n # for the proper deletion in CASCADE behavior.\n CT_LIMIT = (\n models.Q(app_label=\"organization\", model=\"owner\")\n | models.Q(app_label=\"license_library\", model=\"license\")\n | models.Q(app_label=\"component_catalog\", model=\"component\")\n | models.Q(app_label=\"component_catalog\", model=\"package\")\n )\n\n content_type = models.ForeignKey(\n to=ContentType,\n limit_choices_to=CT_LIMIT,\n on_delete=models.PROTECT,\n )\n\n object_id = models.PositiveIntegerField()\n\n content_object = GenericForeignKey(\"content_type\", \"object_id\")\n\n external_source = models.ForeignKey(\n to=\"dje.ExternalSource\",\n on_delete=models.PROTECT,\n )\n\n external_id = models.CharField(\n max_length=500,\n blank=True,\n help_text=_(\"Value of the identifier used on the source to reference the object.\"),\n )\n\n external_url = models.URLField(\n max_length=1024,\n blank=True,\n help_text=_(\"A URL to the component, or component metadata, in the external source.\"),\n )\n\n objects = ExternalReferenceManager()\n\n class Meta:\n unique_together = (\"dataspace\", \"uuid\")\n ordering = [\"external_source\", \"external_id\"]\n\n def __str__(self):\n return f\"{self.external_source}: {self.external_id}\"\n\n def save(self, *args, **kwargs):\n self.dataspace = self.content_object.dataspace\n super().save(*args, **kwargs)" }, { "identifier": "ExternalSource", "path": "dje/models.py", "snippet": "class ExternalSource(DataspacedModel):\n label = models.CharField(\n max_length=50,\n help_text=_(\"A Label is a concise name of the external source as it \" \"is commonly known.\"),\n )\n\n notes = models.TextField(\n blank=True,\n help_text=_(\n \"Notes describe the purpose and special characteristics \" \"of the external source.\"\n ),\n )\n\n homepage_url = models.URLField(\n max_length=1024,\n blank=True,\n help_text=_(\"Main homepage URL of the external source.\"),\n )\n\n class Meta:\n ordering = [\"label\"]\n unique_together = ((\"dataspace\", \"label\"), (\"dataspace\", \"uuid\"))\n\n def __str__(self):\n return self.label" }, { "identifier": "History", "path": "dje/models.py", "snippet": "class History(models.Model):\n ADDITION = ADDITION\n CHANGE = CHANGE\n DELETION = DELETION\n\n ACTION_FLAG_CHOICES = (\n (ADDITION, _(\"Addition\")),\n (CHANGE, _(\"Change\")),\n (DELETION, _(\"Deletion\")),\n )\n\n object_dataspace = models.ForeignKey(\n to=\"dje.Dataspace\",\n on_delete=models.CASCADE,\n null=True,\n blank=True,\n editable=False,\n )\n\n serialized_data = models.TextField(\n null=True,\n blank=True,\n editable=False,\n help_text=_(\"Serialized data of the instance just before this change.\"),\n )\n\n # The following fields are directly taken from django.contrib.admin.models.LogEntry\n # Since the LogEntry is not abstract we cannot properly inherit from it.\n\n action_time = models.DateTimeField(\n _(\"action time\"),\n default=timezone.now,\n editable=False,\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n models.CASCADE,\n verbose_name=_(\"user\"),\n )\n\n content_type = models.ForeignKey(\n ContentType,\n models.SET_NULL,\n verbose_name=_(\"content type\"),\n blank=True,\n null=True,\n )\n\n object_id = models.TextField(\n _(\"object id\"),\n blank=True,\n null=True,\n )\n\n object_repr = models.CharField(\n _(\"object repr\"),\n max_length=200,\n )\n\n action_flag = models.PositiveSmallIntegerField(\n _(\"action flag\"),\n choices=ACTION_FLAG_CHOICES,\n )\n\n # change_message is either a string or a JSON structure\n change_message = models.TextField(\n _(\"change message\"),\n blank=True,\n )\n\n objects = HistoryManager()\n\n class Meta:\n verbose_name = _(\"history entry\")\n verbose_name_plural = _(\"history entries\")\n ordering = (\"-action_time\",)\n\n # Clone the method from Django's LogEntry model.\n __repr__ = LogEntry.__repr__\n __str__ = LogEntry.__str__\n is_addition = LogEntry.is_addition\n is_change = LogEntry.is_change\n is_deletion = LogEntry.is_deletion\n get_change_message = LogEntry.get_change_message\n get_edited_object = LogEntry.get_edited_object\n get_admin_url = LogEntry.get_edited_object\n\n @classmethod\n def log_addition(cls, user, obj, message=None):\n \"\"\"Create History entry on Addition with the proper `change_message`.\"\"\"\n if not message:\n message = [{\"added\": {}}]\n\n return cls.objects.log_action(user, obj, cls.ADDITION, message)\n\n @classmethod\n def log_change(cls, user, obj, message, serialized_data=None):\n \"\"\"Create History entry on Change.\"\"\"\n return cls.objects.log_action(user, obj, cls.CHANGE, message, serialized_data)\n\n @classmethod\n def log_deletion(cls, user, obj):\n \"\"\"\n Create History entry on Deletion.\n Include the serialized_data if `as_json()` is available on the model class.\n \"\"\"\n serialized_data = None\n with suppress(AttributeError):\n serialized_data = obj.as_json()\n\n return cls.objects.log_action(user, obj, cls.DELETION, serialized_data=serialized_data)" }, { "identifier": "HistoryFieldsMixin", "path": "dje/models.py", "snippet": "class HistoryFieldsMixin(HistoryUserFieldsMixin, HistoryDateFieldsMixin):\n \"\"\"Add the created_date, last_modified_date, created_by, last_modified_by fields.\"\"\"\n\n class Meta:\n abstract = True" }, { "identifier": "is_dataspace_related", "path": "dje/models.py", "snippet": "def is_dataspace_related(model_class):\n \"\"\"\n Return True if the given model_class has a ForeignKey field related to\n the Dataspace model.\n \"\"\"\n return any(\n 1\n for f in model_class._meta.get_fields()\n if f.many_to_one and (f.related_model == Dataspace or f.related_model == \"dje.Dataspace\")\n )" }, { "identifier": "send_notification_email", "path": "dje/notification.py", "snippet": "def send_notification_email(user, instance, action, message=\"\"):\n if not has_email_settings():\n return\n\n if not hasattr(instance, \"dataspace\"):\n return\n\n recipients = get_user_model().objects.get_data_update_recipients(instance.dataspace)\n if not recipients:\n return\n\n verbose_name = instance._meta.verbose_name.capitalize()\n verbose_action = VERBOSE_ACTION[action]\n subject = f'{verbose_action} {verbose_name}: \"{instance}\"'\n body = (\n f'{verbose_name} \"{instance}\" in dataspace \"{instance.dataspace.name}\" '\n f\"{verbose_action.lower()} by: {user.first_name} {user.last_name} ({user.username})\"\n )\n\n if action is History.CHANGE and message:\n if message == \"No fields changed.\":\n return\n body += f\"\\n\\n{message}\"\n\n if action is not History.DELETION and settings.SITE_URL:\n site_url = settings.SITE_URL.rstrip(\"/\")\n body += f\"\\n\\n{site_url}{instance.get_admin_url()}\"\n\n send_mail_task.delay(subject, body, settings.DEFAULT_FROM_EMAIL, recipients)" }, { "identifier": "send_notification_email_on_queryset", "path": "dje/notification.py", "snippet": "def send_notification_email_on_queryset(user, queryset, action, message=\"\"):\n if not has_email_settings():\n return\n\n if not queryset:\n return\n\n if len(queryset) == 1:\n return send_notification_email(user, queryset[0], action, message)\n\n first = queryset[0]\n if not hasattr(first, \"dataspace\"):\n return\n\n recipients = get_user_model().objects.get_data_update_recipients(first.dataspace)\n if not recipients:\n return\n\n verbose_name_plural = first._meta.verbose_name_plural.capitalize()\n verbose_action = VERBOSE_ACTION[action]\n\n subject = f\"Multiple {verbose_name_plural} {verbose_action.lower()}\"\n body = (\n f'{verbose_name_plural} in dataspace \"{first.dataspace.name}\" '\n f\"{verbose_action.lower()} by {user.first_name} {user.last_name} ({user.username}):\"\n )\n\n for instance in queryset:\n body += f\"\\n- {instance}\"\n\n if action is not History.DELETION and settings.SITE_URL:\n site_url = settings.SITE_URL.rstrip(\"/\")\n body += f\" {site_url}{instance.get_admin_url()}\"\n\n if message:\n body += f\"\\n\\n{message}\"\n\n send_mail_task.delay(subject, body, settings.DEFAULT_FROM_EMAIL, recipients)" }, { "identifier": "get_protected_fields", "path": "dje/permissions.py", "snippet": "def get_protected_fields(model_class, user):\n \"\"\"Return the list of protected fields names for the given `user`.\"\"\"\n protected_fields = getattr(model_class(), \"permission_protected_fields\", {})\n\n return [\n field_name\n for field_name, perm_codename in protected_fields.items()\n if not user.has_perm(f\"{model_class._meta.app_label}.{perm_codename}\")\n ]" }, { "identifier": "advanced_search", "path": "dje/search.py", "snippet": "def advanced_search(search_terms, search_fields):\n lookup_types = {\n \":\": \"icontains\",\n \"=\": \"iexact\",\n \"^\": \"istartswith\",\n }\n or_queries = []\n\n for term in split_search_terms(search_terms):\n lookup_type = \"icontains\" # default\n lookup_fields = search_fields\n lookup_operators = [force_str(key) for key in lookup_types.keys()]\n\n # 'apache', '^apache', '=apache', , ':apache'\n if term.startswith(tuple(lookup_operators)):\n term, lookup_type = term[1:], lookup_types.get(term[0])\n\n # 'name:apache', 'name^apache', 'name=apache'\n else:\n for field_name in lookup_fields:\n missing_operator = term == field_name\n if not term.startswith(field_name) or missing_operator:\n continue\n\n operator = term[len(field_name)]\n if operator in lookup_operators:\n lookup_type = lookup_types.get(operator)\n lookup_fields = [field_name]\n _, term = term.split(f\"{field_name}{operator}\", 1)\n break\n\n if not term:\n continue\n\n orm_lookups = [f\"{field}__{lookup_type}\" for field in lookup_fields]\n or_queries.extend([models.Q(**{orm_lookup: term}) for orm_lookup in orm_lookups])\n\n if or_queries:\n return reduce(or_, or_queries)" }, { "identifier": "CHANGELIST_LINK_TEMPLATE", "path": "dje/utils.py", "snippet": "CHANGELIST_LINK_TEMPLATE = (\n '<strong>See the <a href=\"{}\" target=\"_blank\">{} {}</a> in changelist</strong>'\n)" }, { "identifier": "class_wrap", "path": "dje/utils.py", "snippet": "def class_wrap(value, class_):\n \"\"\"Return the given HTML wrapped in a div with the given class set.\"\"\"\n return format_html('<div class=\"{}\">{}</div>', class_, mark_safe(value))" }, { "identifier": "construct_changes_details_message", "path": "dje/utils.py", "snippet": "def construct_changes_details_message(changes_details):\n msg = []\n header = '\\n\\n\\nChanges details for {model_class} \"{instance}\"'\n change_line = \"\\n\\n* {field}\\nOld value: {old}\\nNew value: {new}\"\n\n for instance, data in changes_details.items():\n msg.append(header.format(model_class=instance.__class__.__name__, instance=instance))\n for field, old, new in data:\n msg.append(change_line.format(field=field, old=old, new=new))\n return \"\".join(msg)" }, { "identifier": "get_previous_next", "path": "dje/utils.py", "snippet": "def get_previous_next(ids, current_id):\n \"\"\"\n Return the previous and next entries from a given `ids` list\n and the current id value.\n\n >>> get_previous_next([1, 2, 3, 4], 3)\n (2, 4)\n >>> get_previous_next(['a', 'b', 'c'], 'b')\n ('a', 'c')\n \"\"\"\n previous, next = None, None\n\n try:\n index = ids.index(current_id)\n except ValueError:\n return None, None\n\n if index > 0:\n previous = ids[index - 1]\n if index < len(ids) - 1:\n next = ids[index + 1]\n\n return previous, next" }, { "identifier": "group_by", "path": "dje/utils.py", "snippet": "def group_by(queryset, field_name, values=None, count_on=None, distinct=False):\n from django.db.models import Count\n\n values = values or [field_name]\n count_on = count_on or field_name\n\n return queryset.values(*values).order_by().annotate(count=Count(count_on, distinct=distinct))" }, { "identifier": "has_permission", "path": "dje/utils.py", "snippet": "def has_permission(model, user, action):\n \"\"\"Return True is the `user` has the Permission for the given action of the model.\"\"\"\n opts = model._meta\n codename = get_permission_codename(action, opts)\n return user.has_perm(f\"{opts.app_label}.{codename}\")" }, { "identifier": "queryset_to_changelist_href", "path": "dje/utils.py", "snippet": "def queryset_to_changelist_href(queryset, params=None):\n \"\"\"Return an URL to a changelist based on the given queryset.\"\"\"\n if not queryset:\n return\n\n if params is None:\n params = {}\n\n opts = queryset.model._meta\n url = reverse(f\"admin:{opts.app_label}_{opts.model_name}_changelist\")\n\n ids = queryset.values_list(\"id\", flat=True)\n params.update({\"id__in\": \",\".join(str(id_) for id_ in ids)})\n\n return f\"{url}?{urlencode(params)}\"" }, { "identifier": "ActivityLog", "path": "dje/views.py", "snippet": "class ActivityLog(\n LoginRequiredMixin,\n BootstrapCSSMixin,\n DownloadableMixin,\n TemplateView,\n):\n template_name = \"activity_log.html\"\n model = None\n action_flag_map = {\n History.ADDITION: \"Addition\",\n History.CHANGE: \"Change\",\n History.DELETION: \"Deletion\",\n }\n\n def get_days(self):\n days = self.request.GET.get(\"days\", None)\n try:\n return int(days)\n except (TypeError, ValueError):\n return 90 # Default value\n\n def get_history_entries(self, days):\n \"\"\"\n Return all the History entries, filtered by the number of days and\n scoped using the current user Dataspace.\n \"\"\"\n user_dataspace_id = self.request.user.dataspace_id\n content_type = ContentType.objects.get_for_model(self.model)\n start = datetime.datetime.now() - datetime.timedelta(days=days)\n\n history_entries = History.objects.filter(\n object_dataspace_id=user_dataspace_id,\n content_type=content_type,\n action_time__gt=start,\n )\n\n has_history_fields = issubclass(self.model, HistoryFieldsMixin)\n if has_history_fields:\n # Use the history fields from the model for the Addition entries\n history_entries = history_entries.exclude(action_flag=History.ADDITION)\n\n objects = self.model.objects.scope_by_id(user_dataspace_id).filter(\n created_date__gt=start\n )\n\n addition_entries = []\n for obj in objects:\n addition_entries.append(\n History(\n action_time=obj.created_date,\n user=obj.created_by,\n object_dataspace=obj.dataspace,\n content_type=content_type,\n object_id=obj.id,\n action_flag=History.ADDITION,\n change_message=\"Added.\",\n )\n )\n\n history_entries = list(history_entries) + addition_entries\n\n return history_entries\n\n @staticmethod\n def get_object_or_repr(history_entry):\n try:\n return history_entry.get_edited_object()\n except ObjectDoesNotExist:\n return history_entry.object_repr\n\n def get_objects(self, days):\n history_entries = self.get_history_entries(days)\n\n objects = []\n for history_entry in history_entries:\n objects.append(\n {\n \"history\": history_entry,\n \"obj\": self.get_object_or_repr(history_entry),\n \"action\": self.action_flag_map[history_entry.action_flag],\n }\n )\n\n return objects\n\n def get_format(self):\n return \"html\"\n\n def get_root_filename(self):\n model_name = self.model._meta.model_name\n return f\"{model_name}_activity_log\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n days = self.get_days()\n context.update(\n {\n \"days\": days,\n \"objects\": self.get_objects(days),\n \"verbose_name\": self.model._meta.verbose_name,\n \"dataspace\": self.request.user.dataspace,\n \"now\": datetime.datetime.now(),\n }\n )\n return context" }, { "identifier": "clone_dataset_view", "path": "dje/views.py", "snippet": "@login_required\ndef clone_dataset_view(request, pk):\n \"\"\"Call the clonedataset management command as a celery task.\"\"\"\n changelist_url = reverse(\"admin:dje_dataspace_changelist\")\n user = request.user\n template_dataspace = settings.TEMPLATE_DATASPACE\n\n if not all(\n [\n template_dataspace,\n user.is_superuser,\n user.dataspace.is_reference,\n ]\n ):\n return redirect(changelist_url)\n\n try:\n reference = Dataspace.objects.get(name=template_dataspace)\n target = Dataspace.objects.get(pk=pk)\n except Dataspace.DoesNotExist:\n return redirect(changelist_url)\n\n call_management_command.delay(\n \"clonedataset\",\n reference.name,\n target.name,\n user.username,\n user_id=user.id,\n product_portfolio=True,\n )\n\n msg = \"Cloning task in progress.\"\n if user.email:\n msg += f' An email will be sent to \"{user.email}\" on completion.'\n messages.success(request, msg)\n\n return redirect(changelist_url)" }, { "identifier": "docs_models_view", "path": "dje/views.py", "snippet": "@login_required\ndef docs_models_view(request):\n from dje.admin import dejacode_site\n\n apps_white_list = [\n \"organization\",\n \"license_library\",\n \"component_catalog\",\n \"product_portfolio\",\n \"workflow\",\n \"reporting\",\n \"policy\",\n ]\n\n model_classes = [\n model for model in apps.get_models() if model._meta.app_label in apps_white_list\n ]\n\n def get_limited_fields(model):\n return [\n f\n for f in model._meta.get_fields()\n if not f.auto_created and f.name not in [\"dataspace\", \"uuid\"]\n ]\n\n help_data = defaultdict(list)\n for model in model_classes:\n opts = model._meta\n app_verbose_name = apps.app_configs.get(opts.app_label).verbose_name\n model_admin = dejacode_site._registry.get(model)\n help_data[app_verbose_name].append(\n {\n \"verbose_name\": opts.verbose_name,\n \"short_description\": getattr(model_admin, \"short_description\", \"\"),\n \"long_description\": getattr(model_admin, \"long_description\", \"\"),\n \"fields\": get_limited_fields(model),\n }\n )\n\n context = {\n \"help_data\": dict(help_data),\n \"document\": {\"title\": \"Models documentation\"},\n }\n\n return render(request, \"admin/docs/models.html\", context)" }, { "identifier": "manage_copy_defaults_view", "path": "dje/views.py", "snippet": "@login_required\ndef manage_copy_defaults_view(request, pk):\n changelist_url = reverse(\"admin:dje_dataspace_changelist\")\n\n supported_apps = [\n \"dje\",\n \"organization\",\n \"license_library\",\n \"component_catalog\",\n \"product_portfolio\",\n \"workflow\",\n \"reporting\",\n \"policy\",\n \"notification\",\n ]\n\n try:\n dataspace = Dataspace.objects.get(pk=unquote_plus(pk))\n except Dataspace.DoesNotExist:\n return redirect(changelist_url)\n\n if not request.user.is_superuser or dataspace != request.user.dataspace:\n return redirect(changelist_url)\n\n if request.method == \"POST\":\n formset = CopyDefaultsFormSet(request.POST)\n if formset.is_valid():\n formset.save(dataspace)\n formset.log_change(request, dataspace, \"Changed copy defaults configuration.\")\n messages.success(request, _(\"Copy defaults updated.\"))\n else:\n messages.error(request, _(\"Error, please refresh the page and try again.\"))\n else:\n initial = [\n {\"app_name\": str(apps.get_app_config(app_label).verbose_name)}\n for app_label in supported_apps\n ]\n formset = CopyDefaultsFormSet(initial=initial)\n formset.load(dataspace, default=COPY_DEFAULT_EXCLUDE)\n\n context = {\n \"formset\": formset,\n \"object\": dataspace,\n \"opts\": dataspace._meta,\n }\n\n return render(request, \"admin/dje/dataspace/copy_defaults_form.html\", context)" }, { "identifier": "manage_tab_permissions_view", "path": "dje/views.py", "snippet": "@login_required\ndef manage_tab_permissions_view(request, pk):\n changelist_url = reverse(\"admin:dje_dataspace_changelist\")\n\n try:\n dataspace = Dataspace.objects.get(pk=unquote_plus(pk))\n except Dataspace.DoesNotExist:\n return redirect(changelist_url)\n\n if not request.user.is_superuser or dataspace != request.user.dataspace:\n return redirect(changelist_url)\n\n if request.method == \"POST\":\n formset = TabPermissionsFormSet(request.POST)\n if formset.is_valid():\n formset.save(dataspace)\n formset.log_change(request, dataspace, \"Changed tab permissions configuration.\")\n messages.success(request, _(\"Tab permissions updated.\"))\n else:\n messages.error(request, _(\"Error, please refresh the page and try again.\"))\n else:\n initial = [{\"group_name\": group.name} for group in Group.objects.all()]\n formset = TabPermissionsFormSet(initial=initial)\n formset.load(dataspace)\n\n context = {\n \"formset\": formset,\n \"object\": dataspace,\n \"opts\": dataspace._meta,\n }\n\n return render(request, \"admin/dje/dataspace/tab_permissions_form.html\", context)" }, { "identifier": "object_compare_view", "path": "dje/views.py", "snippet": "@login_required\ndef object_compare_view(request):\n target_dataspace_id = request.GET.get(\"target\", \"\")\n ids = request.GET.get(\"ids\", \"\").split(\",\")\n\n model_class = get_model_class_from_path(request.path)\n opts = model_class._meta\n preserved_filters = get_preserved_filters(\n request, model_class, parameter_name=\"_changelist_filters\"\n )\n changelist_url = reverse(f\"admin:{opts.app_label}_{opts.model_name}_changelist\")\n redirect_url = add_preserved_filters(\n {\"preserved_filters\": preserved_filters, \"opts\": opts}, changelist_url\n )\n\n if len(ids) != 1:\n messages.warning(\n request, \"Compare allows 1 object only. Please select 1 object to compare.\"\n )\n return redirect(redirect_url)\n\n if request.user.dataspace.is_reference:\n if not target_dataspace_id:\n return dataspace_choice_for_compare_view(request)\n try:\n target_dataspace = Dataspace.objects.get(pk=target_dataspace_id)\n except ObjectDoesNotExist:\n return redirect(redirect_url)\n else:\n target_dataspace = request.user.dataspace\n\n try:\n source_object = model_class.objects.get(id=ids[0])\n except ObjectDoesNotExist:\n return redirect(redirect_url)\n\n target_object = get_object_in(source_object, target_dataspace)\n if not target_object:\n error = f'No related object in the Dataspace \"{target_dataspace}\"'\n messages.warning(request, error)\n return redirect(redirect_url)\n\n # First section of the view, presenting the per filed diff.\n if request.method == \"GET\":\n excluded = [\n \"last_modified_date\",\n \"created_date\",\n \"completion_level\",\n \"usage_policy\",\n \"guidance\",\n \"guidance_url\",\n ]\n\n compare_diff, compare_diff_m2m = get_object_compare_diff(\n source_object, target_object, excluded\n )\n\n return render(\n request,\n \"admin/object_compare.html\",\n {\n \"source_object\": source_object,\n \"target_object\": target_object,\n \"compare_diff\": compare_diff,\n \"compare_diff_m2m\": compare_diff_m2m,\n \"opts\": opts,\n \"preserved_filters\": preserved_filters,\n },\n )\n\n # POST section of the view\n updated_fields = []\n\n for field_name in request.POST.getlist(\"checkbox_select\"):\n field = source_object._meta.get_field(field_name)\n field_value = getattr(source_object, field_name)\n\n if isinstance(field, models.ForeignKey):\n if field_value is not None:\n fk_in_target = get_or_create_in(field_value, target_dataspace, request.user)\n if fk_in_target:\n setattr(target_object, field_name, fk_in_target)\n updated_fields.append(field_name)\n else:\n setattr(target_object, field_name, None)\n updated_fields.append(field_name)\n else:\n setattr(target_object, field_name, field_value)\n updated_fields.append(field_name)\n\n # Saving only at the end of the process, if at least one field was modified\n if updated_fields:\n target_object.save()\n message = 'Changed {}, values updated from \"{}\".'.format(\n \", \".join(updated_fields), source_object.dataspace.name\n )\n History.log_addition(request.user, target_object, message)\n messages.success(request, message)\n\n return redirect(redirect_url)" }, { "identifier": "object_copy_view", "path": "dje/views.py", "snippet": "@login_required\ndef object_copy_view(request):\n \"\"\"\n Copy objects across Dataspaces.\n This is the view called by the \"copy objects\" admin action.\n The first step is to present to the user the list of Object to be copied\n into his Dataspace and the list of Object that already exists\n in the target and give the choice to update those.\n If the User is a member of the Reference Dataspace, he's allowed\n to copy from any source Dataspace and to select any destination.\n This result as an extra step of presenting the target Dataspace list of\n choices.\n \"\"\"\n user_dataspace = request.user.dataspace\n # Declared here as it required in GET and POST cases.\n M2MConfigurationFormSet = formset_factory(\n wraps(M2MCopyConfigurationForm)(partial(M2MCopyConfigurationForm, user=request.user)),\n extra=0,\n )\n\n model_class = get_model_class_from_path(request.path)\n\n # Default entry point of the view, requested using a GET\n # At that stage, we are only looking at what the User requested,\n # making sure everything is in order, present him what is going to\n # happens and ask for his confirmation.\n if request.method == \"GET\":\n requested_ids = request.GET.get(\"ids\", \"\")\n\n # In case the view is not requested with the proper parameters\n if not requested_ids:\n raise Http404\n\n opts = model_class._meta\n preserved_filters = get_preserved_filters(\n request, model_class, parameter_name=\"_changelist_filters\"\n )\n changelist_url = reverse(f\"admin:{opts.app_label}_{opts.model_name}_changelist\")\n redirect_url = add_preserved_filters(\n {\"preserved_filters\": preserved_filters, \"opts\": opts}, changelist_url\n )\n\n # Ids of objects to be copied\n ids = requested_ids.split(\",\")\n\n # Limit the copy to 100 Objects at the time, as it's the number of\n # Objects we display per page, default value for the list_per_page\n # of the ModelAdmin\n COPY_NB_OBJECT_LIMIT = 100\n if len(ids) > COPY_NB_OBJECT_LIMIT:\n msg = (\n f\"Maximum of objects that can be copied at once is \"\n f\"limited to {COPY_NB_OBJECT_LIMIT} (by system-wide settings)\"\n )\n messages.warning(request, msg)\n return redirect(redirect_url)\n\n # Let's find the Source Dataspace using the first id\n # This block will redirect the user to the list if the\n # first id of the list do not exist\n try:\n source_object = model_class.objects.get(id=ids[0])\n except ObjectDoesNotExist:\n return redirect(redirect_url)\n\n # No custom permission for 'copy', we use the 'add' one\n if not has_permission(source_object, request.user, \"add\"):\n messages.error(request, _(\"Sorry you do not have rights to execute this action\"))\n return redirect(redirect_url)\n\n source = source_object.dataspace\n # As a non-Reference Dataspace User, I can only use the Reference\n # data as the source and my Dataspace as the target\n # As a Reference User, I can choose both, source and target.\n if user_dataspace.is_reference:\n # The following is only used when the User is in the Reference\n targets_from_request = request.GET.getlist(\"target\")\n # If the target has been set, then we can continue\n if targets_from_request:\n data = {\"target\": targets_from_request}\n choice_form = MultiDataspaceChoiceForm(source, request.user, data=data)\n if not choice_form.is_valid():\n return redirect(redirect_url)\n targets = choice_form.cleaned_data[\"target\"]\n # else, we build a form to offer the choice to the user,\n # choices do not include the current source\n else:\n initial = {\n \"ids\": requested_ids,\n \"_changelist_filters\": dict(parse_qsl(preserved_filters)).get(\n \"_changelist_filters\"\n ),\n }\n is_popup = request.GET.get(IS_POPUP_VAR, False)\n if is_popup:\n initial[\"_popup\"] = is_popup\n choice_form = MultiDataspaceChoiceForm(source, request.user, initial=initial)\n return render(\n request,\n \"admin/object_copy_dataspace_form.html\",\n {\n \"form\": choice_form,\n \"opts\": opts,\n \"is_popup\": is_popup,\n \"preserved_filters\": preserved_filters,\n },\n )\n elif not source.is_reference:\n # As a non-Reference User my only \"external\" source of data allowed\n # is the Reference Dataspace\n return redirect(redirect_url)\n else:\n targets = [user_dataspace]\n\n # At this stage, we have the Source and Target Dataspaces\n # Let's see which objects are eligible for copy, or offer the update\n copy_candidates = []\n update_candidates = []\n\n # Building a QuerySet based on the given ids, if an non-authorized or\n # non-existing id was injected it will be ignored thanks to the\n # id__in and the dataspace scoping.\n queryset = model_class.objects.scope(source).filter(id__in=ids)\n\n for target in targets:\n for source_instance in queryset:\n matched_object = get_object_in(source_instance, target)\n if matched_object:\n # Inject the source_instance for future usage in the template\n update_candidates.append((matched_object, source_instance))\n else:\n copy_candidates.append((source_instance, target))\n\n initial = {\n \"source\": source,\n \"targets\": targets,\n \"ct\": ContentType.objects.get_for_model(model_class).id,\n }\n form = CopyConfigurationForm(request.user, initial=initial)\n\n # Many2Many exclude on copy/update\n m2m_initial = [\n {\"ct\": ContentType.objects.get_for_model(m2m_field.remote_field.through).id}\n for m2m_field in model_class._meta.many_to_many\n ]\n\n # Also handle relational fields if explicitly declared on the Model using the\n # get_extra_relational_fields method.\n for field_name in model_class.get_extra_relational_fields():\n related_model = model_class._meta.get_field(field_name).related_model\n if related_model().get_exclude_candidates_fields():\n ct = ContentType.objects.get_for_model(related_model)\n m2m_initial.append({\"ct\": ct.id})\n\n m2m_formset = M2MConfigurationFormSet(initial=m2m_initial)\n\n return render(\n request,\n \"admin/object_copy.html\",\n {\n \"copy_candidates\": copy_candidates,\n \"update_candidates\": update_candidates,\n \"form\": form,\n \"m2m_formset\": m2m_formset,\n \"opts\": source_object._meta,\n \"preserved_filters\": preserved_filters,\n },\n )\n\n # Second section of the view, following the POST\n if request.method == \"POST\":\n config_form = CopyConfigurationForm(request.user, request.POST)\n\n if not config_form.is_valid():\n raise Http404\n\n model_class = config_form.model_class\n opts = model_class._meta\n preserved_filters = get_preserved_filters(\n request, model_class, parameter_name=\"_changelist_filters\"\n )\n\n # We use False rather than empty list to keep track of the non-selection\n # vs unknown in lower level copy method.\n exclude_copy = {model_class: config_form.cleaned_data.get(\"exclude_copy\")}\n exclude_update = {model_class: config_form.cleaned_data.get(\"exclude_update\")}\n\n # Append the m2m copy configuration\n for m2m_form in M2MConfigurationFormSet(request.POST):\n if not m2m_form.is_valid():\n continue\n m2m_model_class = m2m_form.model_class\n cleaned_data = m2m_form.cleaned_data\n skip_on_copy = cleaned_data.get(\"skip_on_copy\")\n skip_on_update = cleaned_data.get(\"skip_on_update\")\n exclude_copy.update(\n {m2m_model_class: SKIP if skip_on_copy else cleaned_data.get(\"exclude_copy\")}\n )\n exclude_update.update(\n {m2m_model_class: SKIP if skip_on_update else cleaned_data.get(\"exclude_update\")}\n )\n\n copy_candidates = request.POST.get(\"copy_candidates\", \"\")\n selected_for_update = request.POST.getlist(\"select_for_update\")\n\n source, copied, updated, errors = config_form.submit(\n copy_candidates, selected_for_update, exclude_copy, exclude_update\n )\n\n if copied or updated:\n msg = \"Copied/updated from {} dataspace.\".format(\n source.name if source.is_reference else \"another\"\n )\n\n if errors:\n errors_count = len(errors)\n msg = \"{} object w{} not copied/updated.\".format(\n errors_count, pluralize(errors_count, \"as,ere\")\n )\n messages.error(request, msg)\n\n object_for_raw_id_lookup = None\n if request.GET.get(IS_POPUP_VAR, 0):\n object_for_raw_id_lookup = copied + updated\n if len(object_for_raw_id_lookup) == 1:\n object_for_raw_id_lookup = object_for_raw_id_lookup[0][1]\n else:\n object_for_raw_id_lookup = None\n\n return render(\n request,\n \"admin/object_copy_results.html\",\n {\n \"copied\": copied,\n \"updated\": updated,\n \"errors\": errors,\n \"opts\": opts,\n \"preserved_filters\": preserved_filters,\n \"object_for_raw_id_lookup\": object_for_raw_id_lookup,\n },\n )" } ]
import csv import operator from collections import OrderedDict from copy import copy from functools import reduce from django import forms from django.conf import settings from django.contrib import admin from django.contrib import messages from django.contrib.admin.helpers import ACTION_CHECKBOX_NAME from django.contrib.admin.options import IS_POPUP_VAR from django.contrib.admin.sites import AdminSite from django.contrib.admin.templatetags.admin_urls import add_preserved_filters from django.contrib.admin.utils import lookup_spawns_duplicates from django.contrib.admin.utils import unquote from django.contrib.admin.views.main import ChangeList from django.contrib.admin.widgets import AdminDateWidget from django.contrib.auth.admin import GroupAdmin from django.contrib.auth.admin import UserAdmin from django.contrib.auth.models import Group from django.contrib.auth.models import Permission from django.contrib.auth.views import LogoutView from django.contrib.contenttypes.admin import GenericTabularInline from django.contrib.contenttypes.models import ContentType from django.core import checks from django.core.exceptions import FieldDoesNotExist from django.core.exceptions import FieldError from django.core.exceptions import PermissionDenied from django.db import models from django.forms.formsets import DELETION_FIELD_NAME from django.http import HttpResponse from django.http.request import QueryDict from django.shortcuts import get_object_or_404 from django.shortcuts import redirect from django.template.loader import render_to_string from django.template.response import TemplateResponse from django.urls import path from django.urls import reverse from django.utils.encoding import force_str from django.utils.html import format_html from django.utils.html import format_html_join from django.utils.http import urlencode from django.utils.translation import gettext as _ from django.views.generic import RedirectView from django_registration.backends.activation.views import RegistrationView from dje.filters import IS_FILTER_LOOKUP_VAR from dje.filters import CreatedByListFilter from dje.filters import DataspaceFilter from dje.filters import HistoryCreatedActionTimeListFilter from dje.filters import HistoryModifiedActionTimeListFilter from dje.filters import LimitToDataspaceListFilter from dje.filters import MissingInFilter from dje.forms import DataspaceAdminForm from dje.forms import DataspacedAdminForm from dje.forms import DejaCodeAuthenticationForm from dje.importers import import_view from dje.list_display import AsURL from dje.mass_update import mass_update_action from dje.models import Dataspace from dje.models import DataspaceConfiguration from dje.models import DejacodeUser from dje.models import ExternalReference from dje.models import ExternalSource from dje.models import History from dje.models import HistoryFieldsMixin from dje.models import is_dataspace_related from dje.notification import send_notification_email from dje.notification import send_notification_email_on_queryset from dje.permissions import get_protected_fields from dje.search import advanced_search from dje.utils import CHANGELIST_LINK_TEMPLATE from dje.utils import class_wrap from dje.utils import construct_changes_details_message from dje.utils import get_previous_next from dje.utils import group_by from dje.utils import has_permission from dje.utils import queryset_to_changelist_href from dje.views import ActivityLog from dje.views import clone_dataset_view from dje.views import docs_models_view from dje.views import manage_copy_defaults_view from dje.views import manage_tab_permissions_view from dje.views import object_compare_view from dje.views import object_copy_view from axes.admin import AccessAttemptAdmin from axes.models import AccessAttempt
19,188
( "Application Process Settings", { "fields": ( "set_usage_policy_on_new_component_from_licenses", "enable_package_scanning", "update_packages_from_scan", "enable_purldb_access", "enable_vulnerablecodedb_access", ) }, ), ) search_fields = ("name",) inlines = [DataspaceConfigurationInline] form = DataspaceAdminForm change_form_template = "admin/dje/dataspace/change_form.html" change_list_template = "admin/change_list_extended.html" def has_change_permission(self, request, obj=None): """ Bypass the ReferenceOnlyPermissions to allow regular Dataspace admins, with the right permission, to edit their own Dataspace. """ return super(admin.ModelAdmin, self).has_change_permission(request, obj) def get_readonly_fields(self, request, obj=None): """Make Dataspace.name field readonly on edit except for reference Dataspace superusers.""" readonly_fields = super().get_readonly_fields(request, obj) user = request.user if obj and not (user.dataspace.is_reference and user.is_superuser): readonly_fields += ("name",) return readonly_fields def get_urls(self): info = self.model._meta.app_label, self.model._meta.model_name urls = [ path( "<pk>/clonedataset/", self.admin_site.admin_view(clone_dataset_view), name="{}_{}_clonedataset".format(*info), ), path( "<pk>/tab_permissions/", self.admin_site.admin_view(manage_tab_permissions_view), name="{}_{}_tab_permissions".format(*info), ), path( "<pk>/copy_defaults/", self.admin_site.admin_view(manage_copy_defaults_view), name="{}_{}_copy_defaults".format(*info), ), ] return urls + super().get_urls() def get_queryset(self, request): """ Limit the QuerySet to the current user Dataspace. + the Reference one. If the user Dataspace is the Reference then show all. """ qs = super().get_queryset(request) if not request.user.dataspace.is_reference: qs = qs.filter(id=request.user.dataspace_id) return qs def get_actions(self, request): """Remove the bulk delete action, it does not make sense for Dataspace.""" actions = super().get_actions(request) if "delete_selected" in actions: del actions["delete_selected"] return actions def changeform_view(self, request, object_id=None, form_url="", extra_context=None): extra_context = extra_context or {} extra_context["template_dataspace"] = settings.TEMPLATE_DATASPACE return super().changeform_view(request, object_id, form_url, extra_context) class ChildRelationshipInline(DataspacedFKMixin, admin.TabularInline): fk_name = "parent" extra = 0 classes = ("grp-collapse grp-open",) raw_id_fields = ("child",) autocomplete_lookup_fields = {"fk": ["child"]} verbose_name = _("Child") class ExternalReferenceInline(DataspacedFKMixin, GenericTabularInline): model = ExternalReference extra = 0 classes = ("grp-collapse grp-open",) @admin.register(ExternalSource, site=dejacode_site) class ExternalSourceAdmin(DataspacedAdmin): def references(self, obj): """ Return links to the content_object changelist of ExternalReference instances, for the given ExternalSource instance, grouped per ContentType. """ changelist_links = [] queryset = obj.externalreference_set grouped = group_by(queryset, "content_type", count_on="object_id", distinct=True) for value in grouped: model_class = ContentType.objects.get(id=value["content_type"]).model_class() opts = model_class._meta url = reverse(f"admin:{opts.app_label}_{opts.model_name}_changelist") params = {EXTERNAL_SOURCE_LOOKUP: obj.id} href = f"{url}?{urlencode(params)}" changelist_link = format_html( CHANGELIST_LINK_TEMPLATE, href, value["count"], opts.verbose_name_plural ) changelist_links.append([changelist_link]) html_list = "<ul>{}</ul>".format(format_html_join("", "<li>{}</li>", changelist_links))
# # Copyright (c) nexB Inc. and others. All rights reserved. # DejaCode is a trademark of nexB Inc. # SPDX-License-Identifier: AGPL-3.0-only # See https://github.com/nexB/dejacode for support or download. # See https://aboutcode.org for more information about AboutCode FOSS projects. # EXTERNAL_SOURCE_LOOKUP = "external_references__external_source_id" ADDITION = History.ADDITION CHANGE = History.CHANGE DELETION = History.DELETION class DejaCodeAdminSite(AdminSite): login_template = "registration/login.html" login_form = DejaCodeAuthenticationForm site_title = _("DejaCode Administration") site_header = _("DejaCode Administration") index_title = _("DejaCode Administration") empty_value_display = "" def get_urls(self): """Override the admin:logout and admin:password_change views to the default ones.""" urls = [ path("logout/", LogoutView.as_view(next_page="login"), name="logout"), path( "password_change/", RedirectView.as_view(url="/account/password_change/", permanent=True), name="password_change", ), path("docs/models/", docs_models_view, name="docs_models"), ] return urls + super().get_urls() dejacode_site = DejaCodeAdminSite() @admin.display(description="") def get_hierarchy_link(obj): """Return a link to the Hierarchy view if the obj has at least 1 parent or 1 child.""" if obj.has_parent_or_child(): return format_html( '<a href="{}#hierarchy" target="_blank" class="hierarchy-icon"' ' title="Hierarchy">&nbsp;</a>', obj.get_absolute_url(), ) def get_additional_information_fieldset(pre_fields=None): fields = ( "dataspace", "uuid", "created_date", "created_by", "last_modified_date", "last_modified_by", ) if pre_fields: fields = pre_fields + fields return ("Additional Information", {"classes": ("grp-collapse grp-closed",), "fields": fields}) class ReferenceOnlyPermissions: def has_add_permission(self, request): """Limits the addition to Reference dataspace users.""" perm = super().has_add_permission(request) return perm and request.user.dataspace.is_reference def has_change_permission(self, request, obj=None): """Limits the change to Reference dataspace users.""" perm = super().has_change_permission(request, obj) return perm and request.user.dataspace.is_reference def has_delete_permission(self, request, obj=None): """Limits the deletion to Reference dataspace users.""" perm = super().has_delete_permission(request, obj) return perm and request.user.dataspace.is_reference def has_view_permission(self, request, obj=None): perm = super().has_view_permission(request, obj) return perm and request.user.dataspace.is_reference class DataspacedFKMixin: """ Limit the QuerySet of ForeignKeys to the current Dataspace, or to the parent object in case of Inlines. On ADDITION, the Dataspace is taken from the User On MODIFICATION, it's taken on the current object instance or parent instance in case of Inlines. This class can be applied to ModelAdmins and Inlines. The declared limit_choices_to on the model field will be respected. """ # The QuerySet for the fields in this list will be scoped by the Model content_type content_type_scope_fields = [] def formfield_for_foreignkey(self, db_field, request=None, **kwargs): # If a QuerySet was given in the kwargs of the calling method, we then # assume that the filtering was done and we skip further processing. qs = kwargs.get("queryset", None) if qs is not None: return super().formfield_for_foreignkey(db_field, request, **kwargs) related_model = db_field.related_model if is_dataspace_related(related_model): # No instance, ADDITION, get dataspace from user if not getattr(request, "_object", None): dataspace = request.user.dataspace # Parent instance, MODIFICATION, dataspace from instance else: dataspace = request._object.dataspace kwargs["queryset"] = db_field.related_model.objects.scope(dataspace).complex_filter( db_field.remote_field.limit_choices_to ) if db_field.name in self.content_type_scope_fields: kwargs["queryset"] = kwargs["queryset"].filter( content_type=ContentType.objects.get_for_model(self.model) ) return super().formfield_for_foreignkey(db_field, request, **kwargs) class ProtectedFieldsMixin: def get_readonly_fields(self, request, obj=None): """Add field level permissions.""" readonly_fields = super().get_readonly_fields(request, obj) protected_fields = get_protected_fields(self.model, request.user) if protected_fields: readonly_fields += tuple(protected_fields) return readonly_fields class ChangelistPopupPermissionMixin: """ Allow the changelist view access in popup mode for users without change permission. In the case of raw_id_fields feature, this view need be be available to select the related object. This mixin bypass the limitation in Django: https://code.djangoproject.com/ticket/11561 Only the changelist is available, the form is never accessible. """ def has_change_permission(self, request, obj=None): if obj is None and IS_POPUP_VAR in request.GET: return True return super().has_change_permission(request, obj) class ProhibitDataspaceLookupMixin: """ Prohibit all `dataspace` related lookups. Remove the possibility to look into other Dataspaces. """ def lookup_allowed(self, lookup, value): if lookup.startswith("dataspace"): return False return super().lookup_allowed(lookup, value) def check(self, **kwargs): errors = super().check(**kwargs) has_dataspace_filter = DataspaceFilter in self.list_filter if has_dataspace_filter: errors.append( checks.Error(f"Remove {DataspaceFilter} from {self}.list_filter", obj=self) ) return errors def get_queryset(self, request): return super().get_queryset(request).scope_for_user(request.user) class AdvancedSearchAdminMixin: def get_search_results(self, request, queryset, search_term): """Replace default search with advanced system.""" use_distinct = False search_fields = self.get_search_fields(request) if search_fields and search_term: filters = [] try: filters = advanced_search(search_term, search_fields) except FieldError as e: messages.error(request, e) except ValueError as e: messages.error(request, f"Search terms error: {e}") if filters: queryset = queryset.filter(filters) if not use_distinct: for search_spec, __ in filters.children: if lookup_spawns_duplicates(self.opts, search_spec): use_distinct = True break return queryset, use_distinct class HistoryAdminMixin: def log_addition(self, request, object, change_message=None): history_entry = History.log_addition(request.user, object) if ADDITION in getattr(self, "email_notification_on", []): send_notification_email(request.user, object, ADDITION) return history_entry def log_change(self, request, object, message): """ Add notification on object update. The notification system can be disabled by setting _disable_notification to True on the request. """ serialized_data = getattr(request, "_serialized_data", None) history_entry = History.log_change(request.user, object, message, serialized_data) message = history_entry.get_change_message() disable_notification = getattr(request, "_disable_notification", False) if CHANGE in getattr(self, "email_notification_on", []) and not disable_notification: # Expending the base message with details changes_details = getattr(request, "changes_details", {}) message += construct_changes_details_message(changes_details) send_notification_email(request.user, object, CHANGE, message) return history_entry def log_deletion(self, request, object, object_repr): """ Log that an object will be deleted. Note that this method must be called before the deletion. """ return History.log_deletion(request.user, object) def history_view(self, request, object_id, extra_context=None): response = super().history_view(request, object_id, extra_context) context_data = getattr(response, "context_data", None) if context_data: # In case response is a HttpResponseRedirect obj = context_data["object"] history_qs = History.objects if is_dataspace_related(self.model): history_qs = history_qs.filter(object_dataspace__id=obj.dataspace_id) history_entries = ( history_qs.filter( object_id=unquote(object_id), content_type=ContentType.objects.get_for_model(self.model), ) .select_related() .order_by("-action_time") ) obj_has_history_fields = isinstance(obj, HistoryFieldsMixin) if obj_has_history_fields: # Use the history fields from the model for the Addition entry. addition_entry = History( action_time=obj.created_date, user=obj.created_by, change_message="Added.", ) history_entries = history_entries.exclude(action_flag=History.ADDITION) history_entries = list(history_entries) + [addition_entry] response.context_data["action_list"] = history_entries return response class ColoredIconAdminMixin: class Media: js = [ "fontawesomefree/js/all.min.js", ] def colored_icon(self, obj): if obj.icon and obj.color_code: return obj.get_icon_as_html() class DataspacedChangeList(ChangeList): def get_results(self, request): """ Store the result_list ids in the session for the Previous and Next navigation button and "Save and go to next" feature. The session values are then used in the change_view() method of the ModelAdmin. Injects the preserved_filters on each object of the result_list to be used in list_display callable, as the request is not available there. Hierarchy links on ComponentAdmin and OwnerAdmin, as well as the annotation link on the LicenseAdmin requires this. This workaround could be removed once if the the following gets solved in Django: https://code.djangoproject.com/ticket/13659 """ super().get_results(request) for obj in self.result_list: obj._preserved_filters = self.preserved_filters self.set_reference_link(request) @property def has_filters_activated(self): return bool(self.get_filters_params()) def get_filters_params(self, params=None): lookup_params = super().get_filters_params(params) if IS_FILTER_LOOKUP_VAR in lookup_params: del lookup_params[IS_FILTER_LOOKUP_VAR] return lookup_params def set_reference_link(self, request): """Add a 'View Reference Data' or 'View My Data 'link in the changelist header.""" do_set_link = all( [ DataspaceFilter in self.model_admin.list_filter, self.model_admin.lookup_allowed(DataspaceFilter.parameter_name, None), not self.is_popup, ] ) if not do_set_link: return reference_dataspace = Dataspace.objects.get_reference() if reference_dataspace and reference_dataspace != request.user.dataspace: dataspace_id = request.GET.get(DataspaceFilter.parameter_name) if dataspace_id and dataspace_id != request.user.dataspace_id: self.my_dataspace_link = True else: params = f"?{DataspaceFilter.parameter_name}={reference_dataspace.id}" self.reference_params = params class DataspacedAdmin( DataspacedFKMixin, ProtectedFieldsMixin, AdvancedSearchAdminMixin, HistoryAdminMixin, admin.ModelAdmin, ): formfield_overrides = { models.DateField: {"widget": AdminDateWidget(attrs={"placeholder": "YYYY-MM-DD"})}, } list_filter = (DataspaceFilter,) readonly_fields = ( "dataspace", "uuid", ) actions = ["copy_to", "compare_with"] actions_to_remove = [] email_notification_on = [ADDITION, CHANGE, DELETION] save_as = True # Display only the current count show_full_result_count = False # Display a warning if any of the identifier_fields has changed identifier_fields_warning = True # Default form, customized form should always extend DataspacedAdminForm form = DataspacedAdminForm # Using extended version of base templates to avoid code duplication change_form_template = "admin/change_form_extended.html" change_list_template = "admin/change_list_extended.html" # Set this to a BaseImporter extension of the Model to enable the import importer_class = None # Set this to a DejacodeMassUpdateForm to enable the mass update action mass_update_form = None # Set this to False to disable the Activity Log feature activity_log = True # Set this to True to enable the Previous and Next buttons in change view navigation_buttons = False preserve_filters = True # Do not display the View on site links by default # Set: view_on_site = DataspacedAdmin.changeform_view_on_site # for the default obj.get_absolute_url() view_on_site = False def __init__(self, model, admin_site): self.form.admin_site = admin_site super().__init__(model, admin_site) def check(self, **kwargs): errors = super().check(**kwargs) has_wrong_form_subclass = all( [ not issubclass(self.form, DataspacedAdminForm), self.model._meta.unique_together != (("dataspace", "uuid"),), ] ) if has_wrong_form_subclass: errors.extend( [checks.Error(f"{self.form} is not a subclass of {DataspacedAdminForm}", obj=self)] ) return errors def changeform_view_on_site(self, obj): return obj.get_absolute_url() @admin.display(description=_("View")) def changelist_view_on_site(self, obj): return format_html('<a href="{}" target="_blank">View</a>', obj.get_absolute_url()) @admin.display(description=_("URN")) def urn_link(self, instance): """Attach the URN link if URN is supported on the Model.""" if instance.pk: return instance.urn_link return f"URN will be available once the {instance._meta.verbose_name} is saved." def get_queryset(self, request): qs = super().get_queryset(request) return qs.scope_for_user_in_admin(request.user) def get_changelist(self, request, **kwargs): return DataspacedChangeList def get_list_filter(self, request): """Limit the availability of `MissingInFilter` to superusers.""" list_filter = list(super().get_list_filter(request)) if not request.user.is_superuser and MissingInFilter in list_filter: del list_filter[list_filter.index(MissingInFilter)] # Custom LogEntry-based filters when field not available on the model history_filters = { "created_by": CreatedByListFilter, "last_modified_by": None, "created_date": HistoryCreatedActionTimeListFilter, "last_modified_date": HistoryModifiedActionTimeListFilter, } for field_name, default_filter in history_filters.items(): try: field = self.model._meta.get_field(field_name) except FieldDoesNotExist: if default_filter: list_filter.append(default_filter) continue filtr = field_name if isinstance(field, models.ForeignKey): filtr = (field_name, LimitToDataspaceListFilter) list_filter.append(filtr) return list_filter def get_readonly_fields(self, request, obj=None): readonly_fields = super().get_readonly_fields(request, obj) if issubclass(self.model, HistoryFieldsMixin): readonly_fields += ( "created_date", "created_by", "last_modified_date", "last_modified_by", ) return readonly_fields def change_view(self, request, object_id, form_url="", extra_context=None): """ Render the changelist using the current preserved filters to gather the previous and next id. """ context = extra_context or {} # WARNING: request.GET is important as a condition since we do not want to run this # expensive operation in case no given filter/search/sort is applied. # For example when the admin form is reached directly from the user details view. if self.navigation_buttons and request.method == "GET" and request.GET: fake_request = copy(request) query_dict = fake_request.GET.copy() preserved_filters = query_dict.pop("_changelist_filters", "") if preserved_filters: preserved_filters = force_str(preserved_filters[0]) fake_request.GET = QueryDict(preserved_filters) changelist_view = self.changelist_view(fake_request) # Sanity check, if the _changelist_filters were manually changed for example. if hasattr(changelist_view, "context_data"): # Do not use ".values_list('id', flat=True)" to avoid an extra query ids_list = [str(obj.id) for obj in changelist_view.context_data["cl"].result_list] previous_id, next_id = get_previous_next(ids_list, str(object_id)) context.update( { "previous_id": previous_id, "next_id": next_id, } ) if self.save_as and self.identifier_fields_warning: identifier_fields = self.model.get_identifier_fields() context["identifier_fields"] = identifier_fields return super().change_view(request, object_id, form_url, context) def render_change_form(self, request, context, add=False, change=False, form_url="", obj=None): response = super().render_change_form(request, context, add, change, form_url, obj) # De-activating the 'Save as' option if the user is editing an Object # belonging to another Dataspace. # We are able to update the context_data at that point as the # TemplateResponse as not been rendered yet. if obj and obj.dataspace != request.user.dataspace: response.context_data["save_as"] = False return response @staticmethod def get_selected_ids_from_request(request, queryset): select_across = request.POST.get("select_across", 0) if int(select_across): # This is the "Selected all" case, we are using the all queryset object_ids = list(queryset.values_list("id", flat=True)) else: object_ids = request.POST.getlist(ACTION_CHECKBOX_NAME) # Converting the ids list in a comma separated string, to be used # in the GET parameters return ",".join(str(id_) for id_ in object_ids) def base_action_with_redirect(self, request, queryset, viewname): ids = self.get_selected_ids_from_request(request, queryset) opts = self.model._meta preserved_filters = self.get_preserved_filters(request) view_url = reverse(f"admin:{opts.app_label}_{opts.model_name}_{viewname}") url_with_params = "{}?{}".format(view_url, urlencode({"ids": ids})) redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, url_with_params ) return redirect(redirect_url) @admin.display(description=_("Copy the selected objects")) def copy_to(self, request, queryset): """Copy the selected objects to another Dataspace.""" return self.base_action_with_redirect(request, queryset, "copy") @admin.display(description=_("Compare the selected object")) def compare_with(self, request, queryset): """Compare one selected object with a matching object in another Dataspace.""" return self.base_action_with_redirect(request, queryset, "compare") @admin.display(description=_("Check for updates in reference data")) def check_updates_in_reference(self, request, queryset): values = queryset.values_list("uuid", "last_modified_date") orm_lookups = [ models.Q(**{"uuid": uuid, "last_modified_date__gt": last_modified_date}) for uuid, last_modified_date in values ] return self.base_check_in_reference_action(request, self.model, orm_lookups) @admin.display(description=_("Check for newer versions in reference data")) def check_newer_version_in_reference(self, request, queryset): values = queryset.values_list("name", "version") orm_lookups = [ models.Q(**{"name": name, "version__gt": version}) for name, version in values ] return self.base_check_in_reference_action(request, self.model, orm_lookups) @staticmethod def base_check_in_reference_action(request, model_class, orm_lookups): reference_dataspace = Dataspace.objects.get_reference() if not reference_dataspace or not orm_lookups: return updated_qs = model_class.objects.scope(reference_dataspace).filter( reduce(operator.or_, orm_lookups) ) params = {DataspaceFilter.parameter_name: reference_dataspace.pk} changelist_href = queryset_to_changelist_href(updated_qs, params) if changelist_href: return redirect(changelist_href) messages.warning(request, "No updates available in the reference dataspace.") @staticmethod def get_changes_details(form): """ Introspect a given form to collect the changes details. Original values are collected on the DB instance (pre-save value) and New values are collected on the form (post-save value) """ if not form.instance.pk: return {} model_class = form.instance.__class__ original_instance = model_class.objects.get(pk=form.instance.pk) changes_details = [] # Using form.changed_data to only iterate on updated fields for field in form.changed_data: original_value = getattr(original_instance, field) new_value = getattr(form.instance, field) changes_details.append((field, original_value, new_value)) return {form.instance: changes_details} def save_model(self, request, obj, form, change): """Set the created_by and last_modified_by fields at save time.""" # This have no impact on save() if the model does not declare those fields. obj.last_modified_by = request.user if not change: obj.created_by = request.user # Injecting the results in the request for future use in the # log_change() method. This content will be used to add the changes # details into the notification message. # Using an OrderedDict to keep the main instance details first # The related objects changes (inlines) are gathered in # self.save_formset() request.changes_details = OrderedDict() if change and CHANGE in self.email_notification_on: request.changes_details.update(self.get_changes_details(form)) super().save_model(request, obj, form, change) def save_formset(self, request, form, formset, change): """ Set the Dataspace on the Inline instance before it's saved. Using the Dataspace of the Model instance of this ModelAdmin. Also craft the change details of the Inlines. """ for f in formset.forms: # Skip if nothing has changed in the current inline form if not f.changed_data: continue # Set the Dataspace on the Inline instance in case of addition of # the current inline. # The Dataspace is taken from the main form instance. if not f.instance.dataspace_id: f.instance.dataspace = form.instance.dataspace # Only in case of a 'change' on the main instance if change and CHANGE in self.email_notification_on: # As the `change` param is only about the main instance, we use # the pk of the inline instance to make sure we are in a # MODIFICATION case. # # If the pk of the inline instance is None, this is an ADDITION, # so skip the details creation. Also, if DELETION_FIELD_NAME is # in changed_data, we are in an inline deletion case, skipping # too. if f.instance.pk and DELETION_FIELD_NAME not in f.changed_data: # request.changes_details is created in self.save_model() request.changes_details.update(self.get_changes_details(f)) super().save_formset(request, form, formset, change) def delete_model(self, request, obj): # We are using this rather than self.log_deletion because it's not called # Here, History.log_deletion is called for each object in the bulk. History.log_deletion(request.user, obj) super().delete_model(request, obj) if DELETION in self.email_notification_on: send_notification_email(request.user, obj, DELETION) def delete_queryset(self, request, queryset): """ Add the email notification on bulk deletion through the default django 'delete_selected' action. """ send_notification_email_on_queryset(request.user, queryset, DELETION) super().delete_queryset(request, queryset) def get_urls(self): info = self.model._meta.app_label, self.model._meta.model_name urls = [] if self.activity_log: urls += [ path( "activity_log/", self.admin_site.admin_view(ActivityLog.as_view(model=self.model)), name="{}_{}_activity_log".format(*info), ) ] actions = getattr(self, "actions", []) actions_to_remove = getattr(self, "actions_to_remove", []) if "copy_to" in actions and "copy_to" not in actions_to_remove: urls += [ path( "copy/", self.admin_site.admin_view(object_copy_view), name="{}_{}_copy".format(*info), ), ] if "compare_with" in actions and "compare_with" not in actions_to_remove: urls += [ path( "compare/", self.admin_site.admin_view(object_compare_view), name="{}_{}_compare".format(*info), ), ] if self.importer_class: urls += [ path( "import/", self.admin_site.admin_view(import_view), {"importer_class": self.importer_class}, name="{}_{}_import".format(*info), ), ] return urls + super().get_urls() def get_form(self, request, obj=None, **kwargs): """ Set the `obj` instance on the `request` for future processing. Set the serialized_data of the object on the `request` to be used in the `log_change` method. Set the `request` on the `form_class`. """ if obj: request._object = obj request._serialized_data = obj.as_json() form_class = super().get_form(request, obj, **kwargs) form_class.request = request return form_class def get_fieldsets(self, request, obj=None): """Exclude form fields from the ADMIN_FORMS_CONFIGURATION settings.""" fieldsets = super().get_fieldsets(request, obj) forms_config = settings.ADMIN_FORMS_CONFIGURATION if not forms_config: return fieldsets model_config = forms_config.get(self.model._meta.model_name, {}) exclude = model_config.get("exclude", []) if not exclude: return fieldsets fieldsets_with_exclude = [] for label, entry in fieldsets: fields = entry.get("fields") if fields: entry["fields"] = [field for field in fields if field not in exclude] fieldsets_with_exclude.append((label, entry)) return fieldsets_with_exclude def get_inline_instances(self, request, obj=None): """Injects the ``request`` in each inline form to be used in validation.""" base_instances = super().get_inline_instances(request, obj) instances = [] request._object = obj for inline in base_instances: inline.form.request = request instances.append(inline) return instances def get_actions(self, request): """Limit the available actions based on who you are and what you are looking at.""" if IS_POPUP_VAR in request.GET: return OrderedDict() actions = super().get_actions(request) is_user_dataspace = DataspaceFilter.parameter_name not in request.GET can_mass_update = all( [ self.mass_update_form, has_permission(self.model, request.user, "change"), is_user_dataspace or request.user.dataspace.is_reference, ] ) if can_mass_update: actions["mass_update"] = (mass_update_action, "mass_update", "Mass update") if not has_permission(self.model, request.user, "add") and "copy_to" in actions: del actions["copy_to"] if not request.user.dataspace.is_reference: if is_user_dataspace: # The user is looking at his own Dataspace if "copy_to" in actions: del actions["copy_to"] if "compare_with" in actions: del actions["compare_with"] else: # The user is looking at another Dataspace if "delete_selected" in actions: del actions["delete_selected"] if request.user.dataspace.is_reference or not is_user_dataspace: if "check_updates_in_reference" in actions: del actions["check_updates_in_reference"] if "check_newer_version_in_reference" in actions: del actions["check_newer_version_in_reference"] for action in self.actions_to_remove: if action in actions: del actions[action] return actions @admin.display(description=_("Copy to my Dataspace")) def copy_link(self, obj): return format_html( '<strong><a href="{}&{}=1">{}</a></strong>', obj.get_copy_url(), IS_POPUP_VAR, _("Copy to my Dataspace"), ) @staticmethod def hide_display_links(request): return all( [ DataspaceFilter.parameter_name in request.GET, request.GET.get(DataspaceFilter.parameter_name) != str(request.user.dataspace_id), ] ) def get_list_display(self, request): """ Remove the view_on_site and hierarchy links in popup mode. Also insert the copy link when looking at another dataspace. """ list_display = super().get_list_display(request) if IS_POPUP_VAR in request.GET: list_display = list(list_display) if "changelist_view_on_site" in list_display: list_display.remove("changelist_view_on_site") if get_hierarchy_link in list_display: list_display.remove(get_hierarchy_link) if self.hide_display_links(request): list_display = list(list_display) if "copy_to" not in self.actions_to_remove: list_display.insert(0, "copy_link") if get_hierarchy_link in list_display: list_display.remove(get_hierarchy_link) return list_display def get_list_display_links(self, request, list_display): """Remove all the display_links when looking at another dataspace.""" if not self.hide_display_links(request): return super().get_list_display_links(request, list_display) def response_change(self, request, obj): """Add the logic for the "Save and go to next" feature.""" next_id = request.POST.get("next_id") if "_next" in request.POST and next_id: opts = self.model._meta preserved_filters = self.get_preserved_filters(request) msg_dict = { "name": str(opts.verbose_name), "obj": str(obj), } msg = 'The {name} "{obj}" was changed successfully.'.format(**msg_dict) self.message_user(request, msg, messages.SUCCESS) viewname = f"admin:{opts.app_label}_{opts.model_name}_change" next_url = reverse(viewname, args=[next_id], current_app=self.admin_site.name) redirect_url = add_preserved_filters( {"preserved_filters": preserved_filters, "opts": opts}, next_url ) return redirect(redirect_url) return super().response_change(request, obj) def lookup_allowed(self, lookup, value): if lookup in [EXTERNAL_SOURCE_LOOKUP]: return True return super().lookup_allowed(lookup, value) @staticmethod def _limited_permission(request, obj, has_perm): # Model permission if not has_perm: return False # Object instance permission if obj and obj.dataspace_id != request.user.dataspace_id: return request.user.dataspace.is_reference return True def has_add_permission(self, request): has_perm = super().has_add_permission(request) # Do not display the "Add" link in filter lookup popup mode if IS_FILTER_LOOKUP_VAR in request.GET: return False return has_perm def has_change_permission(self, request, obj=None): has_perm = super().has_change_permission(request, obj) return self._limited_permission(request, obj, has_perm) def has_delete_permission(self, request, obj=None): has_perm = super().has_delete_permission(request, obj) return self._limited_permission(request, obj, has_perm) def has_view_permission(self, request, obj=None): has_perm = super().has_view_permission(request, obj) return self._limited_permission(request, obj, has_perm) def has_importer(self): """Return True if the importer_class has been set.""" if self.importer_class: return True def has_activity_log(self): """Return True if the activity_log has been set.""" if self.activity_log: return True class HiddenValueWidget(forms.TextInput): """Render a hidden value in the UI.""" HIDDEN_VALUE = "*******" def render(self, name, value, attrs=None, renderer=None): value = self.HIDDEN_VALUE if value else None return super().render(name, value, attrs, renderer) class DataspaceConfigurationForm(forms.ModelForm): """ Configure Dataspace settings. This form includes fields for various API keys, with sensitive values hidden in the UI using the HiddenValueWidget. """ hidden_value_fields = [ "scancodeio_api_key", "vulnerablecode_api_key", "purldb_api_key", ] def __init__(self, *args, **kwargs): """Initialize the form and set HiddenValueWidget for specified fields.""" super().__init__(*args, **kwargs) for field_name in self.hidden_value_fields: self.fields[field_name].widget = HiddenValueWidget() def clean(self): """Clean the form data, excluding hidden values from cleaned_data.""" for field_name in self.hidden_value_fields: value = self.cleaned_data.get(field_name) if value == HiddenValueWidget.HIDDEN_VALUE: del self.cleaned_data[field_name] class DataspaceConfigurationInline(DataspacedFKMixin, admin.StackedInline): model = DataspaceConfiguration form = DataspaceConfigurationForm verbose_name_plural = _("Configuration") verbose_name = _("Dataspace configuration") fields = [ "homepage_layout", "scancodeio_url", "scancodeio_api_key", "vulnerablecode_url", "vulnerablecode_api_key", "purldb_url", "purldb_api_key", ] can_delete = False @admin.register(Dataspace, site=dejacode_site) class DataspaceAdmin( ReferenceOnlyPermissions, HistoryAdminMixin, admin.ModelAdmin, ): short_description = ( "A Dataspace is an independent, exclusive set of DejaCode data, " "which can be either nexB reference data or installation-specific data." ) long_description = ( "Each DJE application User is associated with exactly one Dataspace, " "and the data owned by that Dataspace is presented to the user when " "accessing the application. " "An installation of DejaCode typically contains the following Dataspaces:" "nexB: Reference reference data from nexB" "{{mySite}}: Production data for a specific DejaCode installation" "{{sandbox}}: Data for testing, training, or staging activities" ) list_display = ( "name", "full_name", AsURL("homepage_url", short_description="Homepage URL"), AsURL("contact_info", short_description="Contact information"), ) fieldsets = ( ( "", { "fields": ( "name", "homepage_url", "notes", "home_page_announcements", "logo_url", ) }, ), ( "Attribution Package Information", { "fields": ( "full_name", "address", "contact_info", "open_source_information_url", "open_source_download_url", ) }, ), ( "User Interface Settings", { "fields": ( "show_license_profile_in_license_list_view", "show_license_type_in_license_list_view", "show_spdx_short_identifier_in_license_list_view", "show_usage_policy_in_user_views", "show_type_in_component_list_view", "hide_empty_fields_in_component_details_view", ) }, ), ( "Application Process Settings", { "fields": ( "set_usage_policy_on_new_component_from_licenses", "enable_package_scanning", "update_packages_from_scan", "enable_purldb_access", "enable_vulnerablecodedb_access", ) }, ), ) search_fields = ("name",) inlines = [DataspaceConfigurationInline] form = DataspaceAdminForm change_form_template = "admin/dje/dataspace/change_form.html" change_list_template = "admin/change_list_extended.html" def has_change_permission(self, request, obj=None): """ Bypass the ReferenceOnlyPermissions to allow regular Dataspace admins, with the right permission, to edit their own Dataspace. """ return super(admin.ModelAdmin, self).has_change_permission(request, obj) def get_readonly_fields(self, request, obj=None): """Make Dataspace.name field readonly on edit except for reference Dataspace superusers.""" readonly_fields = super().get_readonly_fields(request, obj) user = request.user if obj and not (user.dataspace.is_reference and user.is_superuser): readonly_fields += ("name",) return readonly_fields def get_urls(self): info = self.model._meta.app_label, self.model._meta.model_name urls = [ path( "<pk>/clonedataset/", self.admin_site.admin_view(clone_dataset_view), name="{}_{}_clonedataset".format(*info), ), path( "<pk>/tab_permissions/", self.admin_site.admin_view(manage_tab_permissions_view), name="{}_{}_tab_permissions".format(*info), ), path( "<pk>/copy_defaults/", self.admin_site.admin_view(manage_copy_defaults_view), name="{}_{}_copy_defaults".format(*info), ), ] return urls + super().get_urls() def get_queryset(self, request): """ Limit the QuerySet to the current user Dataspace. + the Reference one. If the user Dataspace is the Reference then show all. """ qs = super().get_queryset(request) if not request.user.dataspace.is_reference: qs = qs.filter(id=request.user.dataspace_id) return qs def get_actions(self, request): """Remove the bulk delete action, it does not make sense for Dataspace.""" actions = super().get_actions(request) if "delete_selected" in actions: del actions["delete_selected"] return actions def changeform_view(self, request, object_id=None, form_url="", extra_context=None): extra_context = extra_context or {} extra_context["template_dataspace"] = settings.TEMPLATE_DATASPACE return super().changeform_view(request, object_id, form_url, extra_context) class ChildRelationshipInline(DataspacedFKMixin, admin.TabularInline): fk_name = "parent" extra = 0 classes = ("grp-collapse grp-open",) raw_id_fields = ("child",) autocomplete_lookup_fields = {"fk": ["child"]} verbose_name = _("Child") class ExternalReferenceInline(DataspacedFKMixin, GenericTabularInline): model = ExternalReference extra = 0 classes = ("grp-collapse grp-open",) @admin.register(ExternalSource, site=dejacode_site) class ExternalSourceAdmin(DataspacedAdmin): def references(self, obj): """ Return links to the content_object changelist of ExternalReference instances, for the given ExternalSource instance, grouped per ContentType. """ changelist_links = [] queryset = obj.externalreference_set grouped = group_by(queryset, "content_type", count_on="object_id", distinct=True) for value in grouped: model_class = ContentType.objects.get(id=value["content_type"]).model_class() opts = model_class._meta url = reverse(f"admin:{opts.app_label}_{opts.model_name}_changelist") params = {EXTERNAL_SOURCE_LOOKUP: obj.id} href = f"{url}?{urlencode(params)}" changelist_link = format_html( CHANGELIST_LINK_TEMPLATE, href, value["count"], opts.verbose_name_plural ) changelist_links.append([changelist_link]) html_list = "<ul>{}</ul>".format(format_html_join("", "<li>{}</li>", changelist_links))
return class_wrap(html_list, "width200")
26
2023-12-07 16:57:42+00:00
24k
wusize/CLIM
src/open_clip/model.py
[ { "identifier": "HFTextEncoder", "path": "src/open_clip/hf_model.py", "snippet": "class HFTextEncoder(nn.Module):\n \"\"\"HuggingFace model adapter\"\"\"\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n model_name_or_path: str,\n output_dim: int,\n config: PretrainedConfig = None,\n pooler_type: str = None,\n proj: str = None,\n pretrained: bool = True,\n output_tokens: bool = False,\n ):\n super().__init__()\n self.output_tokens = output_tokens\n self.output_dim = output_dim\n\n # TODO: find better way to get this information\n uses_transformer_pooler = (pooler_type == \"cls_pooler\")\n\n if transformers is None:\n raise RuntimeError(\"Please `pip install transformers` to use pre-trained HuggingFace models\")\n if config is None:\n self.config = AutoConfig.from_pretrained(model_name_or_path)\n create_func, model_args = (AutoModel.from_pretrained, model_name_or_path) if pretrained else (\n AutoModel.from_config, self.config)\n # TODO: do all model configs have this attribute? PretrainedConfig does so yes??\n if hasattr(self.config, \"is_encoder_decoder\") and self.config.is_encoder_decoder:\n self.transformer = create_func(model_args)\n self.transformer = self.transformer.encoder\n else:\n self.transformer = create_func(model_args, add_pooling_layer=uses_transformer_pooler)\n else:\n self.config = config\n self.transformer = AutoModel.from_config(config)\n if pooler_type is None: # get default arch pooler\n pooler_type = (arch_dict[self.config.model_type][\"pooler\"])\n \n self.pooler = _POOLERS[pooler_type]()\n\n d_model = getattr(self.config, arch_dict[self.config.model_type][\"config_names\"][\"width\"])\n if (d_model == output_dim) and (proj is None): # do we always need a proj?\n self.proj = nn.Identity()\n elif proj == 'linear':\n self.proj = nn.Linear(d_model, output_dim, bias=False)\n elif proj == 'mlp':\n hidden_size = (d_model + output_dim) // 2\n self.proj = nn.Sequential(\n nn.Linear(d_model, hidden_size, bias=False),\n nn.GELU(),\n nn.Linear(hidden_size, output_dim, bias=False),\n )\n\n def forward(self, x: TensorType):\n attn_mask = (x != self.config.pad_token_id).long()\n out = self.transformer(input_ids=x, attention_mask=attn_mask)\n pooled_out = self.pooler(out, attn_mask)\n projected = self.proj(pooled_out)\n\n seq_len = out.last_hidden_state.shape[1]\n tokens = (\n out.last_hidden_state[:, torch.arange(seq_len) != self.pooler.cls_token_position, :] \n if type(self.pooler) == ClsPooler \n else out.last_hidden_state\n )\n \n if self.output_tokens:\n return projected, tokens\n return projected\n\n def lock(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):\n if not unlocked_layers: # full freezing\n for n, p in self.transformer.named_parameters():\n p.requires_grad = (not freeze_layer_norm) if \"LayerNorm\" in n.split(\".\") else False\n return\n\n encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer\n layer_list = getattr(encoder, arch_dict[self.config.model_type][\"config_names\"][\"layer_attr\"])\n print(f\"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model\")\n embeddings = getattr(\n self.transformer, arch_dict[self.config.model_type][\"config_names\"][\"token_embeddings_attr\"])\n modules = [embeddings, *layer_list][:-unlocked_layers]\n # freeze layers\n for module in modules:\n for n, p in module.named_parameters():\n p.requires_grad = (not freeze_layer_norm) if \"LayerNorm\" in n.split(\".\") else False\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.transformer.gradient_checkpointing_enable()\n\n def init_parameters(self):\n pass" }, { "identifier": "ModifiedResNet", "path": "src/open_clip/modified_resnet.py", "snippet": "class ModifiedResNet(nn.Module):\n \"\"\"\n A ResNet class that is similar to torchvision's but contains the following changes:\n - There are now 3 \"stem\" convolutions as opposed to 1, with an average pool instead of a max pool.\n - Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1\n - The final pooling layer is a QKV attention instead of an average pool\n \"\"\"\n\n def __init__(self, layers, output_dim, heads, image_size=224, width=64,\n freeze_output=True,\n freeze_all_bns=True):\n super().__init__()\n self.output_dim = output_dim\n self.image_size = image_size\n self.freeze_output = freeze_output\n self.freeze_all_bns = freeze_all_bns\n # the 3-layer stem\n self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)\n self.bn1 = nn.BatchNorm2d(width // 2)\n self.act1 = nn.ReLU(inplace=True)\n self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(width // 2)\n self.act2 = nn.ReLU(inplace=True)\n self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)\n self.bn3 = nn.BatchNorm2d(width)\n self.act3 = nn.ReLU(inplace=True)\n self.avgpool = nn.AvgPool2d(2)\n\n # residual layers\n self._inplanes = width # this is a *mutable* variable used during construction\n self.layer1 = self._make_layer(width, layers[0])\n self.layer2 = self._make_layer(width * 2, layers[1], stride=2)\n self.layer3 = self._make_layer(width * 4, layers[2], stride=2)\n self.layer4 = self._make_layer(width * 8, layers[3], stride=2)\n\n embed_dim = width * 32 # the ResNet feature dimension\n self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim, freeze_output)\n self.attnpool_input_size = image_size // 32\n\n def _make_layer(self, planes, blocks, stride=1):\n layers = [Bottleneck(self._inplanes, planes, stride)]\n\n self._inplanes = planes * Bottleneck.expansion\n for _ in range(1, blocks):\n layers.append(Bottleneck(self._inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=True):\n assert freeze_bn_stats\n def _lock(module):\n for param in module.parameters():\n param.requires_grad = False\n if freeze_bn_stats:\n freeze_batch_norm_2d(module)\n module.eval()\n\n freeze_at = 5 - unlocked_groups\n print(f'Freeze the resnet at {freeze_at}', flush=True)\n\n if freeze_at >= 1: # stem\n _lock(self.conv1)\n _lock(self.bn1)\n _lock(self.conv2)\n _lock(self.bn2)\n _lock(self.conv3)\n _lock(self.bn3)\n # each stage is a torch.nn.modules.container.Sequential\n for idx, stage in enumerate([self.layer1, self.layer2, self.layer3, self.layer4], start=2):\n if freeze_at >= idx:\n for block in stage.children(): # each block is a Bottleneck\n _lock(block)\n if self.freeze_all_bns:\n print(f'Freeze all bn layers', flush=True) # TODO: study if this is necessary\n freeze_batch_norm_2d(self)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n # FIXME support for non-transformer\n pass\n\n def stem(self, x):\n x = self.act1(self.bn1(self.conv1(x)))\n x = self.act2(self.bn2(self.conv2(x)))\n x = self.act3(self.bn3(self.conv3(x)))\n x = self.avgpool(x)\n return x\n\n def forward(self, x):\n with torch.no_grad():\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n x = self.attnpool(x)\n\n return x\n\n @staticmethod\n def _denormalize_boxes(normed_boxes, x):\n h, w = x.shape[-2:]\n denormed_boxes = []\n for boxes in normed_boxes:\n new_boxes = boxes.clone() # FIXME: do not change the value in normed_boxes!\n new_boxes[:, [0, 2]] *= w\n new_boxes[:, [1, 3]] *= h\n denormed_boxes.append(new_boxes)\n return denormed_boxes\n\n def extract_roi_features(self, x, normed_boxes, extract_type='v2'):\n if extract_type == 'v1':\n return self._extract_roi_features_v1(x, normed_boxes)\n else:\n assert extract_type == 'v2'\n return self._extract_roi_features_v2(x, normed_boxes)\n\n def mask_attn_pool(self, image, masks):\n return self.mask_pool(image, masks)\n\n def mask_pool(self, image, masks):\n x = self.stem(image)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n feature_map = self.attnpool.forward_dense(x)\n feature_map = F.normalize(feature_map, dim=1) # remember to normalize!\n\n feature_map = feature_map.flatten(-2, -1) # bs, c, h*w\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n features = (feature_map * masks[:, None]).sum(-1) / (masks.sum(1, keepdim=True) + 1e-12)\n\n return features\n\n def _extract_roi_features_v1(self, x, normed_boxes, **kwargs):\n with torch.no_grad():\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.attnpool.forward_dense(x)\n x = F.normalize(x, dim=1) # remember to normalize!\n # TODO: debug\n roi_feats = roi_align(x, self._denormalize_boxes(normed_boxes, x),\n (1, 1), 1.0, -1, True)[:, :, 0, 0]\n return roi_feats\n\n def _extract_roi_features_v2(self, x, normed_boxes, **kwargs):\n with torch.no_grad():\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x) # only the last layer is finetuned in our implementation\n\n tar_size = self.attnpool_input_size\n # TODO: debug\n roi_feats = roi_align(x, self._denormalize_boxes(normed_boxes, x),\n (tar_size, tar_size), 1.0, -1, True)\n\n roi_feats = self.attnpool(roi_feats)\n\n return roi_feats\n\n def encode_dense(self, x, keep_shape=True):\n x = self.stem(x)\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n feature_map = self.attnpool.forward_dense(x)\n feature_map = F.normalize(feature_map, dim=1) # remember to normalize!\n\n return feature_map" }, { "identifier": "TimmModel", "path": "src/open_clip/timm_model.py", "snippet": "class TimmModel(nn.Module):\n \"\"\" timm model adapter\n \"\"\"\n\n def __init__(\n self,\n model_name,\n embed_dim,\n image_size=224,\n pool='avg',\n proj='linear',\n proj_bias=False,\n drop=0.,\n drop_path=None,\n patch_drop=None,\n pretrained=False,\n ):\n super().__init__()\n if timm is None:\n raise RuntimeError(\"Please `pip install timm` to use timm models.\")\n self.image_size = to_2tuple(image_size)\n\n # setup kwargs that may not be common across all models\n timm_kwargs = {}\n if drop_path is not None:\n timm_kwargs['drop_path_rate'] = drop_path\n if patch_drop is not None:\n timm_kwargs['patch_drop_rate'] = patch_drop\n\n custom_pool = pool in ('abs_attn', 'rot_attn')\n if not proj and not custom_pool:\n # use network classifier head as projection if no proj specified and no custom pooling used\n self.trunk = timm.create_model(\n model_name,\n num_classes=embed_dim,\n global_pool=pool,\n pretrained=pretrained,\n **timm_kwargs,\n )\n prev_chs = embed_dim\n else:\n self.trunk = timm.create_model(\n model_name,\n pretrained=pretrained,\n **timm_kwargs,\n )\n feat_size = self.trunk.default_cfg.get('pool_size', None)\n feature_ndim = 1 if not feat_size else 2\n if custom_pool:\n assert feature_ndim == 2\n # if attn pooling used, remove both classifier and default pool\n self.trunk.reset_classifier(0, global_pool='')\n else:\n # reset global pool if pool config set, otherwise leave as network default\n reset_kwargs = dict(global_pool=pool) if pool else {}\n self.trunk.reset_classifier(0, **reset_kwargs)\n prev_chs = self.trunk.num_features\n\n head_layers = OrderedDict()\n\n # Add custom pooling to head\n if pool == 'abs_attn':\n head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)\n prev_chs = embed_dim\n elif pool == 'rot_attn':\n head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)\n prev_chs = embed_dim\n\n # NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used\n if proj == 'linear':\n head_layers['drop'] = nn.Dropout(drop)\n head_layers['proj'] = nn.Linear(prev_chs, embed_dim, bias=proj_bias)\n elif proj == 'mlp':\n head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=(drop, 0), bias=(True, proj_bias))\n else:\n assert not proj, f'Unknown projection type {proj}.'\n\n self.head = nn.Sequential(head_layers)\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=False):\n \"\"\" lock modules\n Args:\n unlocked_groups (int): leave last n layer groups unlocked (default: 0)\n \"\"\"\n if not unlocked_groups:\n # lock full model\n for param in self.trunk.parameters():\n param.requires_grad = False\n if freeze_bn_stats:\n freeze_batch_norm_2d(self.trunk)\n else:\n # NOTE: partial freeze requires latest timm (master) branch and is subject to change\n try:\n # FIXME import here until API stable and in an official release\n from timm.models.helpers import group_parameters, group_modules\n except ImportError:\n raise RuntimeError(\n 'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')\n matcher = self.trunk.group_matcher()\n gparams = group_parameters(self.trunk, matcher)\n max_layer_id = max(gparams.keys())\n max_layer_id = max_layer_id - unlocked_groups\n for group_idx in range(max_layer_id + 1):\n group = gparams[group_idx]\n for param in group:\n self.trunk.get_parameter(param).requires_grad = False\n if freeze_bn_stats:\n gmodules = group_modules(self.trunk, matcher, reverse=True)\n gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}\n freeze_batch_norm_2d(self.trunk, gmodules)\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n try:\n self.trunk.set_grad_checkpointing(enable)\n except Exception as e:\n logging.warning('grad checkpointing not supported for this timm image tower, continuing without...')\n\n def forward(self, x):\n x = self.trunk(x)\n x = self.head(x)\n return x\n\n @staticmethod\n def _denormalize_boxes(normed_boxes, x):\n h, w = x.shape[-2:]\n denormed_boxes = []\n for boxes in normed_boxes:\n new_boxes = boxes.clone() # FIXME: do not change the value in normed_boxes!\n new_boxes[:, [0, 2]] *= w\n new_boxes[:, [1, 3]] *= h\n denormed_boxes.append(new_boxes)\n return denormed_boxes\n\n def _extract_roi_features_v1(self, x, normed_boxes, **kwargs):\n h, w = x.shape[-2:]\n x = self.trunk.forward_features(x)\n h_f, w_f = x.shape[-2:]\n tar_h = (self.image_size[0] * h_f) // h\n tar_w = (self.image_size[1] * w_f) // w\n x = roi_align(x, self._denormalize_boxes(normed_boxes, x), (tar_h, tar_w),\n 1.0, -1, True)\n\n x = self.trunk.forward_head(x)\n x = self.head(x)\n\n return x\n\n def encode_dense(self, x, **kwargs):\n x = self.trunk.forward_features(x)\n x = self.dense_trunk_head(x)\n x = self.head(x)\n x = x.permute(0, 3, 1, 2)\n\n return x\n\n def dense_trunk_head(self, x):\n x = self.trunk.head.norm(x)\n x = x.permute(0, 2, 3, 1)\n x = self.trunk.head.drop(x)\n # x = x.permute(0, 3, 1, 2)\n\n return x\n\n def mask_pool(self, image, masks):\n feature_map = self.encode_dense(image)\n feature_map = F.normalize(feature_map, dim=1) # remember to normalize!\n feature_map = feature_map.flatten(-2, -1) # bs, c, h*w\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n features = (feature_map * masks[:, None]).sum(-1) / (masks.sum(1, keepdim=True) + 1e-12)\n\n return features\n\n def extract_roi_features(self, x, normed_boxes, extract_type='v1'):\n assert extract_type == \"v1\"\n if extract_type == 'v1':\n return self._extract_roi_features_v1(x, normed_boxes)\n else:\n assert extract_type == 'v2'\n return self._extract_roi_features_v2(x, normed_boxes)\n\n def _extract_roi_features_v2(self, x, normed_boxes, **kwargs):\n x = self.encode_dense(x)\n x = F.normalize(x, dim=1) # remember to normalize!\n\n roi_feats = roi_align(x, self._denormalize_boxes(normed_boxes, x), (1, 1),\n 1.0, -1, True)[..., 0, 0]\n return roi_feats\n\n def encode_rois_and_image(self, x, normed_boxes, **kwargs):\n h, w = x.shape[-2:]\n x = self.trunk.forward_features(x)\n h_f, w_f = x.shape[-2:]\n tar_h = (self.image_size[0] * h_f) // h\n tar_w = (self.image_size[1] * w_f) // w\n x_image = x\n x_rois = roi_align(x, self._denormalize_boxes(normed_boxes, x), (tar_h, tar_w),\n 1.0, -1, True)\n\n x_rois = self.trunk.forward_head(x_rois)\n x_rois = self.head(x_rois)\n x_rois = F.normalize(x_rois, dim=-1)\n\n x_image = self.trunk.forward_head(x_image)\n x_image = self.head(x_image)\n x_image = F.normalize(x_image, dim=-1)\n\n return x_rois, x_image" }, { "identifier": "LayerNormFp32", "path": "src/open_clip/transformer.py", "snippet": "class LayerNormFp32(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back).\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps)\n return x.to(orig_type)" }, { "identifier": "LayerNorm", "path": "src/open_clip/transformer.py", "snippet": "class LayerNorm(nn.LayerNorm):\n \"\"\"Subclass torch's LayerNorm (with cast back to input dtype).\"\"\"\n\n def forward(self, x: torch.Tensor):\n orig_type = x.dtype\n x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n return x.to(orig_type)" }, { "identifier": "QuickGELU", "path": "src/open_clip/transformer.py", "snippet": "class QuickGELU(nn.Module):\n # NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory\n def forward(self, x: torch.Tensor):\n return x * torch.sigmoid(1.702 * x)" }, { "identifier": "Attention", "path": "src/open_clip/transformer.py", "snippet": "class Attention(nn.Module):\n def __init__(\n self,\n dim,\n num_heads=8,\n qkv_bias=True,\n scaled_cosine=False,\n scale_heads=False,\n logit_scale_max=math.log(1. / 0.01),\n attn_drop=0.,\n proj_drop=0.\n ):\n super().__init__()\n self.scaled_cosine = scaled_cosine\n self.scale_heads = scale_heads\n assert dim % num_heads == 0, 'dim should be divisible by num_heads'\n self.num_heads = num_heads\n self.head_dim = dim // num_heads\n self.scale = self.head_dim ** -0.5\n self.logit_scale_max = logit_scale_max\n\n # keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original\n self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)\n if qkv_bias:\n self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))\n else:\n self.in_proj_bias = None\n\n if self.scaled_cosine:\n self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))\n else:\n self.logit_scale = None\n self.attn_drop = nn.Dropout(attn_drop)\n if self.scale_heads:\n self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))\n else:\n self.head_scale = None\n self.out_proj = nn.Linear(dim, dim)\n self.out_drop = nn.Dropout(proj_drop)\n\n def forward(self, x, attn_mask: Optional[torch.Tensor] = None):\n L, N, C = x.shape\n q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)\n q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)\n\n if self.logit_scale is not None:\n attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))\n logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()\n attn = attn.view(N, self.num_heads, L, L) * logit_scale\n attn = attn.view(-1, L, L)\n else:\n q = q * self.scale\n attn = torch.bmm(q, k.transpose(-1, -2))\n\n if attn_mask is not None:\n if attn_mask.dtype == torch.bool:\n new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)\n new_attn_mask.masked_fill_(attn_mask, float(\"-inf\"))\n attn_mask = new_attn_mask\n attn += attn_mask\n\n attn = attn.softmax(dim=-1)\n attn = self.attn_drop(attn)\n\n x = torch.bmm(attn, v)\n if self.head_scale is not None:\n x = x.view(N, self.num_heads, L, C) * self.head_scale\n x = x.view(-1, L, C)\n x = x.transpose(0, 1).reshape(L, N, C)\n x = self.out_proj(x)\n x = self.out_drop(x)\n return x" }, { "identifier": "VisionTransformer", "path": "src/open_clip/transformer.py", "snippet": "class VisionTransformer(nn.Module):\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n image_size: int,\n patch_size: int,\n width: int,\n layers: int,\n heads: int,\n mlp_ratio: float,\n ls_init_value: float = None,\n global_average_pool: bool = False,\n attentional_pool: bool = False,\n n_queries: int = 256,\n attn_pooler_heads: int = 8,\n output_dim: int = 512,\n patch_dropout: float = 0.,\n input_patchnorm: bool = False,\n act_layer: Callable = nn.GELU,\n norm_layer: Callable = LayerNorm,\n output_tokens: bool = False\n ):\n super().__init__()\n self.output_tokens = output_tokens\n image_height, image_width = self.image_size = to_2tuple(image_size)\n patch_height, patch_width = self.patch_size = to_2tuple(patch_size)\n self.grid_size = (image_height // patch_height, image_width // patch_width)\n self.output_dim = output_dim\n\n # whether to layernorm each patch, as done in dual patchnorm paper - https://arxiv.org/abs/2302.01327v1\n self.input_patchnorm = input_patchnorm\n assert not input_patchnorm\n if input_patchnorm:\n patch_input_dim = patch_height * patch_width * 3\n self.patchnorm_pre_ln = LayerNorm(patch_input_dim)\n self.conv1 = nn.Linear(patch_input_dim, width)\n else:\n self.patchnorm_pre_ln = nn.Identity()\n self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)\n\n # class embeddings and positional embeddings\n scale = width ** -0.5\n self.class_embedding = nn.Parameter(scale * torch.randn(width))\n self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))\n\n # setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn\n self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()\n\n self.ln_pre = norm_layer(width)\n self.transformer = Transformer(\n width,\n layers,\n heads,\n mlp_ratio,\n ls_init_value=ls_init_value,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n self.num_heads = heads\n\n self.global_average_pool = global_average_pool\n if attentional_pool:\n self.attn_pool = AttentionalPooler(output_dim, width, n_head=attn_pooler_heads, n_queries=n_queries)\n self.ln_post = norm_layer(output_dim)\n self.proj = nn.Parameter(scale * torch.randn(output_dim, output_dim))\n else:\n self.attn_pool = None\n self.ln_post = norm_layer(width)\n self.proj = nn.Parameter(scale * torch.randn(width, output_dim))\n\n self.init_parameters()\n\n def lock(self, unlocked_groups=0, freeze_bn_stats=False):\n for param in self.parameters():\n param.requires_grad = False\n\n if unlocked_groups != 0:\n groups = [\n [\n self.conv1,\n self.class_embedding,\n self.ln_pre,\n ],\n self.positional_embedding,\n *self.transformer.resblocks[:-1],\n [\n self.transformer.resblocks[-1],\n # self.ln_post, # fix layer norm\n ],\n # self.proj, # fix output layers\n ]\n\n def _unlock(x):\n if isinstance(x, Sequence):\n for g in x:\n _unlock(g)\n else:\n if isinstance(x, torch.nn.Parameter):\n x.requires_grad = True\n else:\n for p in x.parameters():\n p.requires_grad = True\n\n _unlock(groups[-unlocked_groups:])\n\n def attention_lock(self, **kwargs):\n for name, params in self.named_parameters():\n params.requires_grad = True if \"attn\" in name or \"position\" in name else False\n\n def init_parameters(self):\n # FIXME OpenAI CLIP did not define an init for the VisualTransformer\n # TODO experiment if default PyTorch init, below, or alternate init is best.\n pass\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.transformer.grad_checkpointing = enable\n\n def _global_pool(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:\n if self.global_average_pool:\n return x.mean(dim=1), x\n else:\n return x[:, 0], x[:, 1:]\n\n def forward(self, x: torch.Tensor):\n\n # to patches - whether to use dual patchnorm - https://arxiv.org/abs/2302.01327v1\n # if self.input_patchnorm:\n # # einops - rearrange(x, 'b c (h p1) (w p2) -> b (h w) (c p1 p2)')\n # x = x.reshape(x.shape[0], x.shape[1], self.grid_size[0], self.patch_size[0], self.grid_size[1], self.patch_size[1])\n # x = x.permute(0, 2, 4, 1, 3, 5)\n # x = x.reshape(x.shape[0], self.grid_size[0] * self.grid_size[1], -1)\n # x = self.patchnorm_pre_ln(x)\n # x = self.conv1(x)\n # else:\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n # TODO: Allow interpolating the positional embeddings\n\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n pooled, tokens = self._global_pool(x)\n else:\n pooled, tokens = self._global_pool(x)\n pooled = self.ln_post(pooled)\n\n if self.proj is not None:\n pooled = pooled @ self.proj\n\n if self.output_tokens:\n return pooled, tokens\n \n return pooled\n\n def post_attention(self, x):\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n pooled, tokens = self._global_pool(x)\n else:\n pooled, tokens = self._global_pool(x)\n pooled = self.ln_post(pooled)\n\n if self.proj is not None:\n pooled = pooled @ self.proj\n\n if self.output_tokens:\n return pooled, tokens\n\n return pooled\n\n def extract_roi_features(self, x, normed_boxes, extract_type='v2'):\n if extract_type == 'v1':\n return self._extract_roi_features_v1(x, normed_boxes)\n elif extract_type == 'v2':\n return self._extract_roi_features_v2(x, normed_boxes)\n else:\n raise NotImplementedError\n # assert extract_type == 'v3'\n # return self._extract_roi_features_v3(x, normed_boxes)\n\n def mask_pool(self, x, masks):\n feature_map = self.encode_dense(x)\n feature_map = F.normalize(feature_map, dim=-1)\n\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n features = (feature_map * masks.unsqueeze(-1)).sum(1) / (masks.sum(1, keepdim=True) + 1e-12)\n\n return features\n\n def mask_features(self, x, masks):\n feature_map = self.encode_dense(x)\n feature_map = F.normalize(feature_map, dim=-1)\n\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).flatten(-2, -1) > 0 # bs, h*w\n feature_map = torch.repeat_interleave(\n feature_map, torch.tensor(num_masks_per_image, device=feature_map.device), dim=0)\n\n mask_features = [f[m] for m, f in zip(masks, feature_map)]\n\n return mask_features\n\n def encode_dense(self, x, keep_shape=False):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n # assert h == w # TODO: support input of any shape, need to change the normed boxes to real boxes\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer.extract_feature_map(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n _, tokens = self._global_pool(x)\n else:\n _, tokens = self._global_pool(x)\n tokens = self.ln_post(tokens)\n\n if self.proj is not None:\n tokens = tokens @ self.proj\n\n feature_map = tokens.view(bs, h * w, -1) # .permute(0, 3, 1, 2)\n feature_map = F.normalize(feature_map, dim=-1) # normalize at the last dimension\n if keep_shape:\n feature_map = feature_map.view(bs, h, w, -1).permute(0, 3, 1, 2)\n return feature_map\n\n def mask_crop(self, x, masks):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n num_masks_per_image = [len(masks_per_image) for masks_per_image in masks]\n masks = torch.cat(masks).to(x) # bs, h, w\n x = torch.repeat_interleave(\n x, torch.tensor(num_masks_per_image, device=x.device), dim=0)\n x = x * masks[:, None]\n bs, _, h, w = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n\n # class embeddings and positional embeddings\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n # TODO: Allow interpolating the positional embeddings\n\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n pooled, tokens = self._global_pool(x)\n else:\n pooled, tokens = self._global_pool(x)\n pooled = self.ln_post(pooled)\n\n if self.proj is not None:\n pooled = pooled @ self.proj\n\n return pooled\n\n @staticmethod\n def _generate_masks_per_image(normed_boxes, mask_h, mask_w):\n num_boxes = len(normed_boxes)\n boxes = normed_boxes * torch.tensor(\n [[mask_w, mask_h, mask_w, mask_h]], device=normed_boxes.device)\n masks = torch.zeros(num_boxes, mask_h, mask_w,\n dtype=torch.bool, device=normed_boxes.device)\n for i, box in enumerate(boxes):\n x0, y0, x1, y1 = box.long().tolist()\n masks[i, y0:y1, x0:x1] = True\n\n return masks\n \n @staticmethod\n def _denormalize_boxes(normed_boxes, x):\n h, w = x.shape[-2:]\n denormed_boxes = []\n for boxes in normed_boxes:\n new_boxes = boxes.clone() # FIXME: do not change the value in normed_boxes!\n new_boxes[:, [0, 2]] *= w\n new_boxes[:, [1, 3]] *= h\n denormed_boxes.append(new_boxes)\n return denormed_boxes\n\n def _extract_roi_features_v1(self, x, normed_boxes):\n # used masks\n bs, _, h, w = x.shape\n patch_height, patch_width = self.patch_size\n mask_h, mask_w = h // patch_height, w // patch_width\n masks = [self._generate_masks_per_image(normed_boxes_, mask_h, mask_w)\n for normed_boxes_ in normed_boxes]\n\n return self.mask_attn_pool(x, masks)\n\n def _extract_roi_features_v3(self, x, normed_boxes): # v3 for extract two types\n # used masks\n bs, _, h, w = x.shape\n patch_height, patch_width = self.patch_size\n mask_h, mask_w = h // patch_height, w // patch_width\n masks = [self._generate_masks_per_image(normed_boxes_, mask_h, mask_w)\n for normed_boxes_ in normed_boxes]\n\n roi_features_v1, dense_x = self.mask_attn_pool(x, masks, return_dense=True)\n dense_x = F.normalize(dense_x, dim=-1) # normalize along last dimension\n dense_x = dense_x.permute(0, 3, 1, 2)\n roi_features_v2 = roi_align(dense_x, self._denormalize_boxes(normed_boxes, dense_x), \n (1, 1), 1.0, -1, True)[..., 0, 0]\n\n return roi_features_v1, roi_features_v2\n\n def _extract_roi_features_v2(self, x, normed_boxes):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n # assert h == w # TODO: support input of any shape, need to change the normed boxes to real boxes\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer.extract_feature_map(x)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n _, tokens = self._global_pool(x)\n else:\n _, tokens = self._global_pool(x)\n tokens = self.ln_post(tokens)\n\n if self.proj is not None:\n tokens = tokens @ self.proj\n tokens = F.normalize(tokens, dim=-1) # normalize along last dimension\n tokens = tokens.view(bs, h, w, -1).permute(0, 3, 1, 2)\n return roi_align(tokens, self._denormalize_boxes(normed_boxes, tokens),\n (1, 1), 1.0, -1, True)[..., 0, 0]\n\n def rescale_positional_embedding(self, out_size, dtype):\n h, w = out_size\n rescaled_positional_embedding = \\\n self.positional_embedding.new_zeros(1 + h*w, self.positional_embedding.shape[1])\n rescaled_positional_embedding[0] = self.positional_embedding[0]\n pe_2d = self.positional_embedding[1:].T.contiguous().view(\n 1, -1, *self.grid_size)\n pe_2d = F.interpolate(pe_2d, out_size, mode='bicubic', align_corners=False).view(-1, h*w)\n rescaled_positional_embedding[1:] = pe_2d.T.contiguous()\n\n return rescaled_positional_embedding.to(dtype=dtype)\n\n def _mask_attn_pool(self, x: torch.Tensor, attn_mask: torch.Tensor, num_mask_tokens: int, return_dense=False):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [\n self.class_embedding.to(x.dtype)\n + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x,\n ],\n dim=1,\n ) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n cls_embed = x[0:1]\n cls_embed = cls_embed.expand(num_mask_tokens, -1, -1)\n x = torch.cat([cls_embed, x], dim=0)\n if return_dense:\n x, x_dense = self.transformer.forward_image_dense(x, attn_mask)\n x_dense = x_dense.permute(1, 0, 2) # LND -> NLD\n x_dense = x_dense[:, num_mask_tokens + 1:]\n\n x_dense = self.ln_post(x_dense)\n\n if self.proj is not None:\n x_dense = x_dense @ self.proj\n x_dense = F.normalize(x_dense, dim=-1) # normalize along last dimension\n x_dense = x_dense.view(bs, h, w, -1)\n else:\n x = self.transformer(x, attn_mask)\n x_dense = None\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # [N, L, D]\n x = self.ln_post(x[:, :num_mask_tokens, :])\n\n if self.proj is not None:\n x = torch.einsum(\"nld,dc->nlc\", x, self.proj)\n\n return x, x_dense\n\n def mask_attn_pool(self, image, masks, return_dense=False):\n assert hasattr(self, \"positional_embedding\")\n batch_size = image.shape[0]\n assert batch_size == len(masks)\n num_masks_per_image = [mask.shape[0] for mask in masks]\n num_queries = max(num_masks_per_image)\n mask_h, mask_w = masks[0].shape[1:]\n\n batch_masks = torch.ones(batch_size, num_queries, mask_h, mask_w, dtype=torch.bool).to(image.device)\n for batch_id, mask in enumerate(masks):\n batch_masks[batch_id, :mask.shape[0]] = mask\n\n mask_token_attn_mask = torch.logical_not(batch_masks)\n # [B, Q, H//P x W//P]\n mask_token_attn_mask = mask_token_attn_mask.reshape(batch_size, num_queries, -1)\n\n num_mask_token = num_queries\n num_image_cls_token = (mask_h * mask_w + 1)\n num_image_token = num_image_cls_token - 1\n num_all_token = num_mask_token + num_image_cls_token\n\n # we start with no mask out\n attn_mask = torch.zeros(\n (num_all_token, num_all_token), dtype=torch.bool, device=image.device\n )\n\n # mask+cls+image token to mask token attention is masked out\n attn_mask[:, :num_mask_token] = True\n\n attn_mask = attn_mask.unsqueeze(0).repeat_interleave(batch_size, dim=0)\n attn_mask[:, :num_mask_token, -num_image_token:] = mask_token_attn_mask\n num_heads = self.num_heads # head width 64\n attn_mask = attn_mask.unsqueeze(1).expand(-1, num_heads, -1, -1)\n attn_mask = attn_mask.reshape(batch_size * num_heads, num_all_token, num_all_token)\n\n batch_mask_features, x_dense = self._mask_attn_pool(image, attn_mask, num_mask_token,\n return_dense=return_dense)\n\n mask_features = [batch_mask_features[batch_id, :num_masks]\n for batch_id, num_masks, in enumerate(num_masks_per_image)]\n if return_dense:\n # x_dense = F.normalize(x_dense, dim=-1).flatten(1, 2) # bs, h*w, c\n # masks = torch.cat(masks).float().flatten(-2, -1) # bs, h*w\n # x_dense = torch.repeat_interleave(\n # x_dense, torch.tensor(num_masks_per_image, device=x_dense.device), dim=0)\n # x_dense = (x_dense * masks.unsqueeze(-1)).sum(1) / masks.sum(1, keepdim=True)\n\n return torch.cat(mask_features), x_dense\n else:\n return torch.cat(mask_features)\n\n def encode_rois_and_image(self, x, normed_boxes):\n x = self.conv1(x) # shape = [*, width, grid, grid]\n bs, _, h, w = x.shape\n # assert h == w # TODO: support input of any shape, need to change the normed boxes to real boxes\n x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]\n x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]\n x = torch.cat(\n [self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),\n x], dim=1) # shape = [*, grid ** 2 + 1, width]\n if (h, w) == self.grid_size:\n pe = self.positional_embedding.to(x.dtype)\n else:\n pe = self.rescale_positional_embedding(out_size=(h, w), dtype=x.dtype)\n\n x = x + pe\n\n # a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in\n x = self.patch_dropout(x)\n x = self.ln_pre(x)\n\n x = x.permute(1, 0, 2) # NLD -> LND\n x, x_image = self.transformer.extract_feature_map(x, return_forward=True)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n if self.attn_pool is not None:\n x = self.attn_pool(x)\n x = self.ln_post(x)\n _, tokens = self._global_pool(x)\n else:\n _, tokens = self._global_pool(x)\n tokens = self.ln_post(tokens)\n\n if self.proj is not None:\n tokens = tokens @ self.proj\n\n feature_map = tokens.view(bs, h * w, -1) # .permute(0, 3, 1, 2)\n feature_map = F.normalize(feature_map, dim=-1)\n feature_map = feature_map.view(bs, h, w, -1).permute(0, 3, 1, 2)\n x_rois = roi_align(feature_map, self._denormalize_boxes(normed_boxes, feature_map),\n (1, 1), 1.0, -1, True)[..., 0, 0]\n x_rois = F.normalize(x_rois, dim=-1)\n\n x_image = self.post_attention(x_image)\n x_image = F.normalize(x_image, dim=-1)\n\n return x_rois, x_image" }, { "identifier": "TextTransformer", "path": "src/open_clip/transformer.py", "snippet": "class TextTransformer(nn.Module):\n output_tokens: torch.jit.Final[bool]\n\n def __init__(\n self,\n context_length: int = 77,\n vocab_size: int = 49408,\n width: int = 512,\n heads: int = 8,\n layers: int = 12,\n ls_init_value: float = None,\n output_dim: int = 512,\n act_layer: Callable = nn.GELU,\n norm_layer: Callable = LayerNorm,\n embed_cls: bool = False,\n pad_id: int = 0,\n output_tokens: bool = False,\n ):\n super().__init__()\n self.output_tokens = output_tokens\n self.num_pos = self.context_length = context_length\n self.vocab_size = vocab_size\n self.width = width\n self.output_dim = output_dim\n self.heads = heads\n self.pad_id = pad_id\n\n self.text_projection = nn.Parameter(torch.empty(width, output_dim))\n\n if embed_cls:\n self.cls_emb = nn.Parameter(torch.empty(width))\n self.num_pos += 1\n else:\n self.cls_emb = None\n\n self.token_embedding = nn.Embedding(vocab_size, width)\n self.positional_embedding = nn.Parameter(torch.empty(self.num_pos, width))\n self.transformer = Transformer(\n width=width,\n layers=layers,\n heads=heads,\n ls_init_value=ls_init_value,\n act_layer=act_layer,\n norm_layer=norm_layer,\n )\n self.ln_final = norm_layer(width)\n\n self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)\n\n self.init_parameters()\n\n def init_parameters(self):\n nn.init.normal_(self.token_embedding.weight, std=0.02)\n nn.init.normal_(self.positional_embedding, std=0.01)\n if self.cls_emb is not None:\n nn.init.normal_(self.cls_emb, std=0.01)\n\n proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)\n attn_std = self.transformer.width ** -0.5\n fc_std = (2 * self.transformer.width) ** -0.5\n for block in self.transformer.resblocks:\n nn.init.normal_(block.attn.in_proj_weight, std=attn_std)\n nn.init.normal_(block.attn.out_proj.weight, std=proj_std)\n nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)\n nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)\n\n if self.text_projection is not None:\n nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)\n\n def lock(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):\n assert unlocked_layers == 0 and freeze_layer_norm\n print(f'Freeze the text encoder', flush=True)\n for p in self.parameters():\n p.requires_grad = False\n\n @torch.jit.ignore\n def set_grad_checkpointing(self, enable=True):\n self.transformer.grad_checkpointing = enable\n\n def build_attention_mask(self):\n # lazily create causal attention mask, with full attention between the tokens\n # pytorch uses additive attention mask; fill with -inf\n mask = torch.empty(self.num_pos, self.num_pos)\n mask.fill_(float(\"-inf\"))\n mask.triu_(1) # zero out the lower diagonal\n return mask\n\n def build_cls_mask(self, text, cast_dtype: torch.dtype):\n cls_mask = (text != self.pad_id).unsqueeze(1)\n cls_mask = F.pad(cls_mask, (1, 0, cls_mask.shape[2], 0), value=1.0)\n additive_mask = torch.empty(cls_mask.shape, dtype=cast_dtype, device=cls_mask.device)\n additive_mask.fill_(0)\n additive_mask.masked_fill_(~cls_mask, float(\"-inf\"))\n additive_mask = torch.repeat_interleave(additive_mask, self.heads, 0)\n return additive_mask\n\n def _repeat(self, t, N: int):\n return t.reshape(1, 1, -1).repeat(N, 1, 1)\n\n def forward(self, text):\n cast_dtype = self.transformer.get_cast_dtype()\n seq_len = text.shape[1]\n\n x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]\n attn_mask = self.attn_mask\n if self.cls_emb is not None:\n seq_len += 1\n x = torch.cat([x, self._repeat(self.cls_emb, x.shape[0])], dim=1)\n cls_mask = self.build_cls_mask(text, cast_dtype)\n attn_mask = attn_mask[None, :seq_len, :seq_len] + cls_mask[:, :seq_len, :seq_len]\n\n x = x + self.positional_embedding[:seq_len].to(cast_dtype)\n x = x.permute(1, 0, 2) # NLD -> LND\n x = self.transformer(x, attn_mask=attn_mask)\n x = x.permute(1, 0, 2) # LND -> NLD\n\n # x.shape = [batch_size, n_ctx, transformer.width]\n # take features from the eot embedding (eot_token is the highest number in each sequence)\n if self.cls_emb is not None:\n pooled, tokens = x[:, -1], x[:, :-1]\n pooled = self.ln_final(pooled)\n else:\n x = self.ln_final(x)\n pooled, tokens = x[torch.arange(x.shape[0]), text.argmax(dim=-1)], x\n\n if self.text_projection is not None:\n pooled = pooled @ self.text_projection\n\n if self.output_tokens:\n return pooled, tokens\n\n return pooled" }, { "identifier": "to_2tuple", "path": "src/open_clip/utils.py", "snippet": "def freeze_batch_norm_2d(module, module_match={}, name=''):\ndef _ntuple(n):\n def parse(x):" } ]
from dataclasses import dataclass from typing import Optional, Tuple, Union from torch import nn from torch.utils.checkpoint import checkpoint from .hf_model import HFTextEncoder from .modified_resnet import ModifiedResNet from .timm_model import TimmModel from .transformer import LayerNormFp32, LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer from .utils import to_2tuple import logging import math import numpy as np import torch import torch.nn.functional as F
15,880
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer n_queries: int = 256 # n_queries for attentional pooler attn_pooler_heads: int = 8 # n heads for attentional_pooling timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection timm_drop: float = 0. # head dropout timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, proj_bias=vision_cfg.timm_proj_bias, drop=vision_cfg.timm_drop, drop_path=vision_cfg.timm_drop_path, patch_drop=vision_cfg.patch_dropout if vision_cfg.patch_dropout > 0 else None, embed_dim=embed_dim, image_size=vision_cfg.image_size, ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width, freeze_output=vision_cfg.freeze_output, freeze_all_bns=vision_cfg.freeze_all_bns ) else: vision_heads = vision_cfg.width // vision_cfg.head_width
""" CLIP Model Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI. """ @dataclass class CLIPVisionCfg: layers: Union[Tuple[int, int, int, int], int] = 12 width: int = 768 head_width: int = 64 mlp_ratio: float = 4.0 patch_size: int = 16 image_size: Union[Tuple[int, int], int] = 224 ls_init_value: Optional[float] = None # layer scale initial value patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results input_patchnorm: bool = False # whether to use dual patchnorm - would only apply the input layernorm on each patch, as post-layernorm already exist in original clip vit design global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580) attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer n_queries: int = 256 # n_queries for attentional pooler attn_pooler_heads: int = 8 # n heads for attentional_pooling timm_model_name: str = None # a valid model name overrides layers, width, patch_size timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '') timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '') timm_proj_bias: bool = False # enable bias final projection timm_drop: float = 0. # head dropout timm_drop_path: Optional[float] = None # backbone stochastic depth output_tokens: bool = False freeze_output = True freeze_all_bns = True @dataclass class CLIPTextCfg: context_length: int = 77 vocab_size: int = 49408 width: int = 512 heads: int = 8 layers: int = 12 ls_init_value: Optional[float] = None # layer scale initial value hf_model_name: str = None hf_tokenizer_name: str = None hf_model_pretrained: bool = True proj: str = 'mlp' pooler_type: str = 'mean_pooler' embed_cls: bool = False pad_id: int = 0 output_tokens: bool = False def get_cast_dtype(precision: str): cast_dtype = None if precision == 'bf16': cast_dtype = torch.bfloat16 elif precision == 'fp16': cast_dtype = torch.float16 return cast_dtype def _build_vision_tower( embed_dim: int, vision_cfg: CLIPVisionCfg, quick_gelu: bool = False, cast_dtype: Optional[torch.dtype] = None ): if isinstance(vision_cfg, dict): vision_cfg = CLIPVisionCfg(**vision_cfg) # OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more # memory efficient in recent PyTorch releases (>= 1.10). # NOTE: timm models always use native GELU regardless of quick_gelu flag. act_layer = QuickGELU if quick_gelu else nn.GELU if vision_cfg.timm_model_name: visual = TimmModel( vision_cfg.timm_model_name, pretrained=vision_cfg.timm_model_pretrained, pool=vision_cfg.timm_pool, proj=vision_cfg.timm_proj, proj_bias=vision_cfg.timm_proj_bias, drop=vision_cfg.timm_drop, drop_path=vision_cfg.timm_drop_path, patch_drop=vision_cfg.patch_dropout if vision_cfg.patch_dropout > 0 else None, embed_dim=embed_dim, image_size=vision_cfg.image_size, ) act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models elif isinstance(vision_cfg.layers, (tuple, list)): vision_heads = vision_cfg.width * 32 // vision_cfg.head_width visual = ModifiedResNet( layers=vision_cfg.layers, output_dim=embed_dim, heads=vision_heads, image_size=vision_cfg.image_size, width=vision_cfg.width, freeze_output=vision_cfg.freeze_output, freeze_all_bns=vision_cfg.freeze_all_bns ) else: vision_heads = vision_cfg.width // vision_cfg.head_width
norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
3
2023-12-09 05:43:08+00:00
24k
LkPrtctrd/BSL-V53
Heart/Logic/LogicLaserMessageFactory.py
[ { "identifier": "ClientHelloMessage", "path": "Heart/Packets/Client/Authentification/ClientHelloMessage.py", "snippet": "class ClientHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Protocol\"] = self.readInt()\n fields[\"KeyVersion\"] = self.readInt()\n fields[\"MajorVersion\"] = self.readInt()\n fields[\"MinorVersion\"] = self.readInt()\n fields[\"Build\"] = self.readInt()\n fields[\"ContentHash\"] = self.readString()\n fields[\"DeviceType\"] = self.readInt()\n fields[\"AppStore\"] = self.readInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20100, fields, cryptoInit)\n\n def getMessageType(self):\n return 10100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginMessage", "path": "Heart/Packets/Client/Authentification/LoginMessage.py", "snippet": "class LoginMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"ClientMajor\"] = self.readInt()\n fields[\"ClientMinor\"] = self.readInt()\n fields[\"ClientBuild\"] = self.readInt()\n fields[\"ResourceSha\"] = self.readString()\n fields[\"Device\"] = self.readString()\n fields[\"PreferredLanguage\"] = self.readDataReference()\n fields[\"PreferredDeviceLanguage\"] = self.readString()\n fields[\"OSVersion\"] = self.readString()\n fields[\"isAndroid\"] = self.readBoolean()\n fields[\"IMEI\"] = self.readString()\n fields[\"AndroidID\"] = self.readString()\n fields[\"isAdvertisingEnabled\"] = self.readBoolean()\n fields[\"AppleIFV\"] = self.readString()\n fields[\"RndKey\"] = self.readInt()\n fields[\"AppStore\"] = self.readVInt()\n fields[\"ClientVersion\"] = self.readString()\n fields[\"TencentOpenId\"] = self.readString()\n fields[\"TencentToken\"] = self.readString()\n fields[\"TencentPlatform\"] = self.readVInt()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n fields[\"AppLicensingSignature\"] = self.readString()\n fields[\"DeviceVerifierResponse\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n if fields[\"ClientMajor\"]==53:\n calling_instance.player.ClientVersion = f'{str(fields[\"ClientMajor\"])}.{str(fields[\"ClientBuild\"])}.{str(fields[\"ClientMinor\"])}'\n fields[\"Socket\"] = calling_instance.client\n db_instance = DatabaseHandler()\n if db_instance.playerExist(fields[\"PassToken\"], fields[\"AccountID\"]):\n player_data = json.loads(db_instance.getPlayerEntry(fields[\"AccountID\"])[2])\n db_instance.loadAccount(calling_instance.player, fields[\"AccountID\"])\n else:\n db_instance.createAccount(calling_instance.player.getDataTemplate(fields[\"AccountID\"][0], fields[\"AccountID\"][1], fields[\"PassToken\"]))\n ClientsManager.AddPlayer(calling_instance.player.ID, calling_instance.client)\n Messaging.sendMessage(20104, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n Messaging.sendMessage(24399, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForBattleEndMessage", "path": "Heart/Packets/Client/Battle/AskForBattleEndMessage.py", "snippet": "class AskForBattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"Unk1\"] = self.readVInt()\n fields[\"Result\"] = self.readVInt()\n fields[\"Rank\"] = self.readVInt()\n fields[\"MapID\"] = self.readDataReference()\n fields[\"HeroesCount\"] = self.readVInt()\n fields[\"Heroes\"] = []\n for i in range(fields[\"HeroesCount\"]): fields[\"Heroes\"].append({\"Brawler\": {\"ID\": self.readDataReference(), \"SkinID\": self.readDataReference()}, \"Team\": self.readVInt(), \"IsPlayer\": self.readBoolean(), \"PlayerName\": self.readString()})\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(23456, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14110\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ChangeAvatarNameMessage", "path": "Heart/Packets/Client/Home/ChangeAvatarNameMessage.py", "snippet": "class ChangeAvatarNameMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeString(fields[\"Name\"])\n self.writeBoolean(fields[\"NameSetByUser\"])\n\n def decode(self):\n fields = {}\n fields[\"Name\"] = self.readString()\n fields[\"NameSetByUser\"] = self.readBoolean()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n db_instance = DatabaseHandler()\n playerData = db_instance.getPlayer(calling_instance.player.ID)\n playerData[\"Name\"] = fields[\"Name\"]\n playerData[\"Registered\"] = True\n db_instance.updatePlayerData(playerData, calling_instance)\n fields[\"Socket\"] = calling_instance.client\n fields[\"Command\"] = {\"ID\": 201}\n Messaging.sendMessage(24111, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 10212\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "EndClientTurnMessage", "path": "Heart/Packets/Client/Home/EndClientTurnMessage.py", "snippet": "class EndClientTurnMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n fields[\"Tick\"] = self.readVInt()\n fields[\"Checksum\"] = self.readVInt()\n fields[\"CommandsCount\"] = self.readVInt()\n super().decode(fields)\n fields[\"Commands\"] = []\n for i in range(fields[\"CommandsCount\"]):\n fields[\"Commands\"].append({\"ID\": self.readVInt()})\n if LogicCommandManager.commandExist(fields[\"Commands\"][i][\"ID\"]):\n command = LogicCommandManager.createCommand(fields[\"Commands\"][i][\"ID\"])\n print(\"Command\", LogicCommandManager.getCommandsName(fields[\"Commands\"][i][\"ID\"]))\n if command is not None:\n fields[\"Commands\"][i][\"Fields\"] = command.decode(self)\n fields[\"Commands\"][i][\"Instance\"] = command\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n for command in fields[\"Commands\"]:\n if \"Instance\" not in command.keys():\n return\n\n if hasattr(command[\"Instance\"], 'execute'):\n command[\"Instance\"].execute(calling_instance, command[\"Fields\"], cryptoInit)\n if command[\"ID\"] == 519:\n Messaging.sendMessage(24104, {\"Socket\": calling_instance.client, \"ServerChecksum\": 0, \"ClientChecksum\": 0, \"Tick\": 0}, cryptoInit)\n\n def getMessageType(self):\n return 14102\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeFromOfflinePractiseMessage", "path": "Heart/Packets/Client/Home/GoHomeFromOfflinePractiseMessage.py", "snippet": "class GoHomeFromOfflinePractiseMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14109\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GoHomeMessage", "path": "Heart/Packets/Client/Home/GoHomeMessage.py", "snippet": "class GoHomeMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n self.readBoolean()\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24101, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 17750\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "GetPlayerProfileMessage", "path": "Heart/Packets/Client/Home/GetPlayerProfileMessage.py", "snippet": "class GetPlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"BattleInfoBoolean\"] = self.readBoolean()\n if fields[\"BattleInfoBoolean\"]:\n fields[\"unk1\"] = self.readVInt()\n fields[\"AnotherID\"] = self.readLong()\n fields[\"unk2\"] = self.readVInt()\n for i in self.readVInt():\n fields[\"CsvID\"] = self.readDataReference()\n fields[\"unk3\"] = self.readVInt()\n fields[\"unk4\"] = self.readVInt()\n fields[\"unk5\"] = self.readVInt()\n fields[\"unk6\"] = self.readVInt()\n fields[\"PlayerName\"] = self.readString()\n fields[\"unk7\"] = self.readVInt()\n fields[\"Thumbnail\"] = self.readVInt()\n fields[\"NameColor\"] = self.readVInt()\n fields[\"unk10\"] = self.readVInt()\n fields[\"unk11\"] = self.readVInt()\n fields[\"PlayerHighID\"] = self.readInt()\n fields[\"PlayerLowID\"] = self.readInt()\n super().decode(fields)\n\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24113, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 15081\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AskForAllianceDataMessage", "path": "Heart/Packets/Client/Home/AskForAllianceDataMessage.py", "snippet": "class AskForAllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n fields = {}\n fields[\"id\"] = self.readVLong()\n fields[\"isInAlliance\"] = self.readBoolean()\n if fields[\"isInAlliance\"] == True:\n fields[\"anotherIDHigh\"] = self.readVInt()\n fields[\"anotherIDLow\"] = self.readVInt()\n super().decode(fields)\n\n return fields\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(24301, fields, cryptoInit, calling_instance.player)\n\n def getMessageType(self):\n return 14302\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveMessage", "path": "Heart/Packets/Client/Socket/KeepAliveMessage.py", "snippet": "class KeepAliveMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields, cryptoInit):\n fields[\"Socket\"] = calling_instance.client\n Messaging.sendMessage(20108, fields, cryptoInit)\n\n def getMessageType(self):\n return 10108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginFailedMessage", "path": "Heart/Packets/Server/Authentification/LoginFailedMessage.py", "snippet": "class LoginFailedMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeInt(fields['ErrorID'])\n self.writeString(fields['FingerprintData'])\n self.writeString()\n self.writeString(fields['ContentURL'])\n self.writeString()\n self.writeString(fields['Message'])\n self.writeInt(0)\n self.writeBoolean(False)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeString()\n self.writeVInt(0)\n self.writeString()\n self.writeBoolean(False)\n\n def decode(self):\n fields = {}\n fields[\"ErrorCode\"] = self.readInt()\n fields[\"ResourceFingerprintData\"] = self.readString()\n fields[\"RedirectDomain\"] = self.readString()\n fields[\"ContentURL\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"Reason\"] = self.readString()\n fields[\"SecondsUntilMaintenanceEnd\"] = self.readInt()\n fields[\"ShowContactSupportForBan\"] = self.readBoolean()\n fields[\"CompressedFingerprintData\"] = self.readBytesWithoutLength()\n fields[\"ContentURLListCount\"] = self.readInt()\n fields[\"ContentURLList\"] = []\n for i in range(fields[\"ContentURLListCount\"]):\n fields[\"ContentURLList\"].append(self.readString())\n fields[\"KunlunAppStore\"] = self.readInt()\n fields[\"MaintenanceType\"] = self.readInt()\n fields[\"HelpshiftFaqId\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"Unk1\"] = self.readBoolean()\n fields[\"Unk2\"] = self.readBoolean()\n fields[\"Unk3\"] = self.readString()\n fields[\"Unk4\"] = self.readVInt()\n fields[\"Unk5\"] = self.readString()\n fields[\"OptionalTargetedAccountIdState\"] = self.readBoolean()\n if fields[\"OptionalTargetedAccountIdState\"] == True:\n fields[\"OptionalTargetedAccountId\"] = self.readLong()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20103\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LoginOkMessage", "path": "Heart/Packets/Server/Authentification/LoginOkMessage.py", "snippet": "class LoginOkMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 1\n\n def encode(self, fields, player):\n self.writeLong(player.ID[0], player.ID[1])\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(player.Token)\n self.writeString()\n self.writeString()\n self.writeInt(53)\n self.writeInt(176)\n self.writeInt(1)\n self.writeString(\"dev\")\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeString(\"RU\")\n self.writeString()\n self.writeInt(0)\n self.writeString()\n self.writeInt(2)\n self.writeString('https://game-assets.brawlstarsgame.com')\n self.writeString('http://a678dbc1c015a893c9fd-4e8cc3b1ad3a3c940c504815caefa967.r87.cf2.rackcdn.com')\n self.writeInt(2)\n self.writeString('https://event-assets.brawlstars.com')\n self.writeString('https://24b999e6da07674e22b0-8209975788a0f2469e68e84405ae4fcf.ssl.cf2.rackcdn.com/event-assets')\n self.writeVInt(0)\n self.writeCompressedString(b'')\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeString()\n self.writeString()\n self.writeString()\n self.writeString('https://play.google.com/store/apps/details?id=com.supercell.brawlstars')\n self.writeString()\n self.writeBoolean(False)\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n self.writeBoolean(False)\n if False:\n self.writeString()\n\n\n def decode(self):\n fields = {}\n fields[\"AccountID\"] = self.readLong()\n fields[\"HomeID\"] = self.readLong()\n fields[\"PassToken\"] = self.readString()\n fields[\"FacebookID\"] = self.readString()\n fields[\"GamecenterID\"] = self.readString()\n fields[\"ServerMajorVersion\"] = self.readInt()\n fields[\"ContentVersion\"] = self.readInt()\n fields[\"ServerBuild\"] = self.readInt()\n fields[\"ServerEnvironment\"] = self.readString()\n fields[\"SessionCount\"] = self.readInt()\n fields[\"PlayTimeSeconds\"] = self.readInt()\n fields[\"DaysSinceStartedPlaying\"] = self.readInt()\n fields[\"FacebookAppID\"] = self.readString()\n fields[\"ServerTime\"] = self.readString()\n fields[\"AccountCreatedDate\"] = self.readString()\n fields[\"StartupCooldownSeconds\"] = self.readInt()\n fields[\"GoogleServiceID\"] = self.readString()\n fields[\"LoginCountry\"] = self.readString()\n fields[\"KunlunID\"] = self.readString()\n fields[\"Tier\"] = self.readInt()\n fields[\"TencentID\"] = self.readString()\n\n ContentUrlCount = self.readInt()\n fields[\"GameAssetsUrls\"] = []\n for i in range(ContentUrlCount):\n fields[\"GameAssetsUrls\"].append(self.readString())\n\n EventUrlCount = self.readInt()\n fields[\"EventAssetsUrls\"] = []\n for i in range(EventUrlCount):\n fields[\"EventAssetsUrls\"].append(self.readString())\n\n fields[\"SecondsUntilAccountDeletion\"] = self.readVInt()\n fields[\"SupercellIDToken\"] = self.readCompressedString()\n fields[\"IsSupercellIDLogoutAllDevicesAllowed\"] = self.readBoolean()\n fields[\"isSupercellIDEligible\"] = self.readBoolean()\n fields[\"LineID\"] = self.readString()\n fields[\"SessionID\"] = self.readString()\n fields[\"KakaoID\"] = self.readString()\n fields[\"UpdateURL\"] = self.readString()\n fields[\"YoozooPayNotifyUrl\"] = self.readString()\n fields[\"UnbotifyEnabled\"] = self.readBoolean()\n\n Unknown1 = self.readBoolean()\n fields[\"Unknown1\"] = Unknown1\n if Unknown1:\n fields[\"Unknown2\"] = self.readString()\n\n Unknown3 = self.readBoolean()\n fields[\"Unknown3\"] = Unknown1\n if Unknown3:\n fields[\"Unknown4\"] = self.readString()\n\n Unknown5 = self.readBoolean()\n fields[\"Unknown5\"] = Unknown1\n if Unknown5:\n fields[\"Unknown6\"] = self.readString()\n\n Unknown7 = self.readBoolean()\n fields[\"Unknown7\"] = Unknown1\n if Unknown7:\n fields[\"Unknown8\"] = self.readString()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OutOfSyncMessage", "path": "Heart/Packets/Server/Authentification/OutOfSyncMessage.py", "snippet": "class OutOfSyncMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeVInt(fields[\"ServerChecksum\"])\n self.writeVInt(fields[\"ClientChecksum\"])\n self.writeVInt(fields[\"Tick\"])\n\n def decode(self):\n fields = {}\n fields[\"ServerChecksum\"] = self.readVInt()\n fields[\"ClientChecksum\"] = self.readVInt()\n fields[\"Tick\"] = self.readVInt()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24104\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "ServerHelloMessage", "path": "Heart/Packets/Server/Authentification/ServerHelloMessage.py", "snippet": "class ServerHelloMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n self.writeBytes(urandom(24), 24)\n\n def decode(self):\n fields = {}\n fields[\"Random\"] = self.readBytesWithoutLength()\n super().decode(fields)\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20100\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "BattleEndMessage", "path": "Heart/Packets/Server/Battle/BattleEndMessage.py", "snippet": "class BattleEndMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeLong(0, 0) # Battle UUID High\n self.writeLong(0, 0) # Battle UUID Low\n self.writeVInt(2) # Battle End Game Mode (gametype)\n self.writeVInt(fields[\"Rank\"]) # Result (Victory/Defeat/Draw/Rank Score)\n self.writeVInt(0) # Tokens Gained (Gained Keys)\n self.writeVInt(0) # Trophies Result (Metascore change)\n self.writeVInt(0) # Power Play Points Gained (Pro League Points)\n self.writeVInt(0) # Doubled Tokens (Double Keys)\n self.writeVInt(0) # Double Token Event (Double Event Keys)\n self.writeVInt(0) # Token Doubler Remaining (Double Keys Remaining)\n self.writeVInt(0) # game Lenght In Seconds\n self.writeVInt(0) # Epic Win Power Play Points Gained (op Win Points)\n self.writeVInt(0) # Championship Level Reached (CC Wins)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n\n self.writeVInt(fields[\"HeroesCount\"])\n for heroEntry in fields[\"Heroes\"]:\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeBoolean(bool(heroEntry[\"Team\"]))\n self.writeByte(1)\n for i in range(1):\n self.writeDataReference(heroEntry[\"Brawler\"][\"ID\"][0], heroEntry[\"Brawler\"][\"ID\"][1])\n self.writeByte(1)\n for i in range(1):\n if (heroEntry[\"Brawler\"][\"SkinID\"] is None):\n self.writeVInt(0)\n else:\n self.writeDataReference(heroEntry[\"Brawler\"][\"SkinID\"][0], heroEntry[\"Brawler\"][\"SkinID\"][1])\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(1250)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(11)\n self.writeByte(1)\n for i in range(1):\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(heroEntry[\"IsPlayer\"])\n if heroEntry[\"IsPlayer\"]:\n self.writeLong(player.ID[0], player.ID[1])\n self.writeString(heroEntry[\"PlayerName\"])\n self.writeVInt(100)\n self.writeVInt(28000000)\n self.writeVInt(43000000)\n self.writeVInt(-2)\n if heroEntry[\"IsPlayer\"]:\n self.writeBoolean(True)\n self.writeVLong(5, 4181497)\n self.writeString('haccer club')\n self.writeDataReference(8, 16)\n else:\n self.writeBoolean(False)\n\n self.writeInt8(1)\n self.writeVInt(5978)\n self.writeInt8(1)\n self.writeVInt(0)\n\n self.writeInt16(5)\n self.writeInt16(3)\n self.writeInt(27328)\n self.writeInt(25659)\n\n self.writeDataReference(0)\n\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n self.writeBoolean(False) # 0x0\n\n def decode(self):\n fields = {}\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23456\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AvailableServerCommandMessage", "path": "Heart/Packets/Server/Home/AvailableServerCommandMessage.py", "snippet": "class AvailableServerCommandMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(fields[\"Command\"][\"ID\"])\n command = LogicCommandManager.createCommand(fields[\"Command\"][\"ID\"], self.messagePayload)\n self.messagePayload = command.encode(fields)\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24111\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "LobbyInfoMessage", "path": "Heart/Packets/Server/Home/LobbyInfoMessage.py", "snippet": "class LobbyInfoMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(ClientsManager.GetCount())\n self.writeString(f\"\"\"Version: {player.ClientVersion}\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\\n\"\"\")\n self.writeVInt(0) # count event\n self.writeVInt(0) # new timer in v51\n\n def decode(self):\n fields = {}\n fields[\"PlayerCount\"] = self.readVInt()\n fields[\"Text\"] = self.readString()\n fields[\"Unk1\"] = self.readVInt()\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 23457\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "OwnHomeDataMessage", "path": "Heart/Packets/Server/Home/OwnHomeDataMessage.py", "snippet": "class OwnHomeDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1688816070)\n self.writeVInt(1191532375)\n self.writeVInt(2023189)\n self.writeVInt(73530)\n\n self.writeVInt(player.Trophies)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(player.HighestTrophies) \n self.writeVInt(player.TrophyRoadTier)\n self.writeVInt(player.Experience)\n self.writeDataReference(28, player.Thumbnail)\n self.writeDataReference(43, player.Namecolor)\n\n self.writeVInt(26)\n for x in range(26):\n self.writeVInt(x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n \n self.writeVInt(len(player.OwnedSkins))\n for x in player.OwnedSkins:\n self.writeDataReference(29, x)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n\n self.writeVInt(0)\n self.writeVInt(player.HighestTrophies)\n self.writeVInt(0)\n self.writeVInt(2)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(115)\n self.writeVInt(335442)\n self.writeVInt(1001442)\n self.writeVInt(5778642) \n\n self.writeVInt(120)\n self.writeVInt(200)\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(1) # Shop Offers\n\n self.writeVInt(1) # RewardCount\n\n self.writeVInt(38) # ItemType\n self.writeVInt(1337) # Amount\n self.writeDataReference(0) # CsvID\n self.writeVInt(0) # SkinID\n\n self.writeVInt(0) # Currency(0-Gems, 1-Gold, 3-StarpoInts)\n self.writeVInt(0) # Cost\n self.writeVInt(0) # Time\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # Daily Offer\n self.writeVInt(0) # Old price\n self.writeString('Offer') # Text\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString(\"offer_bgr_xmas23\") # Background\n self.writeVInt(0)\n self.writeBoolean(False) # This purchase is already being processed\n self.writeVInt(0) # Type Benefit\n self.writeVInt(0) # Benefit\n self.writeString()\n self.writeBoolean(False) # One time offer\n self.writeBoolean(False) # Claimed\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n \n self.writeVInt(20)\n self.writeVInt(1428)\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n self.writeVInt(30)\n\n self.writeByte(1) # count brawlers selected\n self.writeDataReference(16, player.SelectedBrawlers[0]) # selected brawler\n self.writeString(player.Region) # location\n self.writeString(player.ContentCreator) # supported creator\n\n self.writeVInt(6) \n self.writeVInt(1) \n self.writeVInt(9) \n self.writeVInt(1) \n self.writeVInt(22) \n self.writeVInt(3) \n self.writeVInt(25) \n self.writeVInt(1) \n self.writeVInt(24) \n self.writeVInt(0)\n self.writeVInt(15)\n self.writeVInt(32447)\n self.writeVInt(28)\n\n\n self.writeVInt(0)\n\n self.writeVInt(1)\n for season in range(1):\n self.writeVInt(22-1)\n self.writeVInt(40000)\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeBoolean(True)\n self.writeBoolean(True)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n self.writeInt(0)\n\n self.writeVInt(0)\n\n self.writeBoolean(True)\n self.writeVInt(0)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(0) \n\n self.writeBoolean(True) # Vanity items\n self.writeVInt(len(player.OwnedThumbnails)+len(player.OwnedPins))\n for x in player.OwnedThumbnails:\n self.writeVInt(28)\n self.writeVInt(x)\n self.writeVInt(0)\n for x in player.OwnedPins:\n self.writeVInt(52)\n self.writeVInt(x)\n self.writeVInt(0)\n\n\n self.writeBoolean(False) # Power league season data\n\n self.writeInt(0)\n self.writeVInt(0)\n self.writeVInt(16)\n self.writeVInt(76)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2023189)\n\n self.writeVInt(35) # event slot id\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(3)\n self.writeVInt(4)\n self.writeVInt(5)\n self.writeVInt(6)\n self.writeVInt(7)\n self.writeVInt(8)\n self.writeVInt(9)\n self.writeVInt(10)\n self.writeVInt(11)\n self.writeVInt(12)\n self.writeVInt(13) \n self.writeVInt(14)\n self.writeVInt(15)\n self.writeVInt(16)\n self.writeVInt(17)\n self.writeVInt(18) \n self.writeVInt(19)\n self.writeVInt(20)\n self.writeVInt(21) \n self.writeVInt(22)\n self.writeVInt(23)\n self.writeVInt(24)\n self.writeVInt(25)\n self.writeVInt(26)\n self.writeVInt(27)\n self.writeVInt(28)\n self.writeVInt(29)\n self.writeVInt(30)\n self.writeVInt(31)\n self.writeVInt(32)\n self.writeVInt(33)\n self.writeVInt(34)\n self.writeVInt(35)\n\n self.writeVInt(1)\n\n self.writeVInt(4)\n self.writeVInt(7)\n self.writeVInt(1)\n self.writeVInt(0)\n self.writeVInt(72292)\n self.writeVInt(10) \n self.writeDataReference(15, 21) # map id\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeString(\"\")\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False) # MapMaker map structure array\n self.writeVInt(0)\n self.writeBoolean(False) # Power League array entry\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeVInt(-1)\n self.writeVInt(0) \n self.writeVInt(0) \n self.writeVInt(0) \n self.writeBoolean(False) \n\n self.writeVInt(0)\n \n ByteStreamHelper.encodeIntList(self, [20, 35, 75, 140, 290, 480, 800, 1250, 1875, 2800])\n ByteStreamHelper.encodeIntList(self, [30, 80, 170, 360]) # Shop Coins Price\n ByteStreamHelper.encodeIntList(self, [300, 880, 2040, 4680]) # Shop Coins Amount\n\n self.writeVInt(0) \n\n self.writeVInt(1)\n self.writeVInt(41000086) # theme\n self.writeVInt(1)\n\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(2)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(-1)\n self.writeVInt(2)\n self.writeVInt(1)\n self.writeVInt(4)\n\n ByteStreamHelper.encodeIntList(self, [0, 29, 79, 169, 349, 699])\n ByteStreamHelper.encodeIntList(self, [0, 160, 450, 500, 1250, 2500])\n\n self.writeLong(0, 1) # Player ID\n\n self.writeVInt(0) # Notification factory\n \n self.writeVInt(1)\n self.writeBoolean(False)\n self.writeVInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeBoolean(False) # Login Calendar\n self.writeVInt(0)\n self.writeBoolean(True) # Starr Road\n for i in range(7):\n self.writeVInt(0)\n\n self.writeVInt(0) # Mastery\n\n #BattleCard\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n self.writeBoolean(False)\n\n self.writeVInt(0) #Brawler's BattleCards\n\n self.writeVInt(5)\n for i in range(5):\n self.writeDataReference(80, i)\n self.writeVInt(-1)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeInt(0)\n self.writeVInt(0) \n self.writeVInt(0)\n self.writeVInt(86400*24)\n self.writeVInt(0)\n self.writeVInt(0)\n\n self.writeBoolean(False)\n\n # end LogicClientHome\n\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeVLong(player.ID[0], player.ID[1])\n self.writeStringReference(player.Name)\n self.writeBoolean(player.Registered)\n self.writeInt(-1)\n\n self.writeVInt(17)\n unlocked_brawler = [i['CardID'] for x,i in player.OwnedBrawlers.items()]\n self.writeVInt(len(unlocked_brawler) + 2)\n for x in unlocked_brawler:\n self.writeDataReference(23, x)\n self.writeVInt(-1)\n self.writeVInt(1)\n\n self.writeDataReference(5, 8)\n self.writeVInt(-1)\n self.writeVInt(player.Coins)\n\n self.writeDataReference(5, 23)\n self.writeVInt(-1)\n self.writeVInt(player.Blings)\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"Trophies\"])\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroHighScore\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"HighestTrophies\"])\n\n self.writeVInt(0) # Array\n\n self.writeVInt(0) # HeroPower\n \n self.writeVInt(len(player.OwnedBrawlers)) # HeroLevel\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(i[\"PowerLevel\"]-1)\n\n self.writeVInt(0) # hero star power gadget and hypercharge\n\n self.writeVInt(len(player.OwnedBrawlers)) # HeroSeenState\n for x,i in player.OwnedBrawlers.items():\n self.writeDataReference(16, x)\n self.writeVInt(-1)\n self.writeVInt(2)\n\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n self.writeVInt(0) # Array\n\n self.writeVInt(player.Gems) # Diamonds\n self.writeVInt(player.Gems) # Free Diamonds\n self.writeVInt(10) # Player Level\n self.writeVInt(100)\n self.writeVInt(0) # CumulativePurchasedDiamonds or Avatar User Level Tier | 10000 < Level Tier = 3 | 1000 < Level Tier = 2 | 0 < Level Tier = 1\n self.writeVInt(100) # Battle Count\n self.writeVInt(10) # WinCount\n self.writeVInt(80) # LoseCount\n self.writeVInt(50) # WinLooseStreak\n self.writeVInt(20) # NpcWinCount\n self.writeVInt(0) # NpcLoseCount\n self.writeVInt(2) # TutorialState | shouldGoToFirstTutorialBattle = State == 0\n self.writeVInt(12)\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeString()\n self.writeVInt(0)\n self.writeVInt(0)\n self.writeVInt(1)\n\n def decode(self):\n fields = {}\n return fields\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24101\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "KeepAliveServerMessage", "path": "Heart/Packets/Server/Socket/KeepAliveServerMessage.py", "snippet": "class KeepAliveServerMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields):\n pass\n\n def decode(self):\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 20108\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "PlayerProfileMessage", "path": "Heart/Packets/Server/Home/PlayerProfileMessage.py", "snippet": "class PlayerProfileMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVLong(fields[\"PlayerHighID\"], fields[\"PlayerLowID\"])\n self.writeDataReference(16,11) # \n self.writeVInt(70)\n for i in range(70):\n self.writeDataReference(16, i)\n self.writeDataReference(0)\n self.writeVInt(500) # trophies\n self.writeVInt(1250) # highestTrophies\n self.writeVInt(11) #power level\n \n self.writeVInt(18)\n\n self.writeVInt(1) \n self.writeVInt(1) # 3v3 victories\n\n self.writeVInt(2)\n self.writeVInt(528859) # total exp\n\n self.writeVInt(3)\n self.writeVInt(3) # current trophies\n\n self.writeVInt(4)\n self.writeVInt(4) # highest trophies\n\n self.writeVInt(5) \n self.writeVInt(5) # unlocked brawler?\n\n self.writeVInt(8)\n self.writeVInt(6) # solo victories\n\n self.writeVInt(11) \n self.writeVInt(7) # duo victories\n\n self.writeVInt(9) \n self.writeVInt(8) # highest level robo rumble\n\n self.writeVInt(12) \n self.writeVInt(9) # highest level boss fight\n\n self.writeVInt(13)\n self.writeVInt(10) # highest power league points\n\n self.writeVInt(14)\n self.writeVInt(11) # some power league stuff\n\n self.writeVInt(15)\n self.writeVInt(12) # most challenge win\n\n self.writeVInt(16) #highest level city rampage\n self.writeVInt(13)\n\n self.writeVInt(18) #highest solo power league rank\n self.writeVInt(14)\n\n self.writeVInt(17) #highest team power league rank\n self.writeVInt(15)\n\n self.writeVInt(19) # highest Club league rank\n self.writeVInt(16)\n\n self.writeVInt(20) # number fame\n self.writeVInt(1000)\n\n self.writeVInt(21)\n self.writeVInt(502052) #v50\n\n self.writeString(player.Name) #PlayerInfo\n self.writeVInt(100)\n self.writeVInt(28000000 + player.Thumbnail)\n self.writeVInt(43000000 + player.Namecolor)\n self.writeVInt(14)\n\n self.writeBoolean(True)\n self.writeVInt(300)\n\n self.writeString(\"hello world\")\n self.writeVInt(100)\n self.writeVInt(200)\n self.writeDataReference(29, 558)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n self.writeDataReference(0)\n\n self.writeBoolean(True) #alliance\n self.writeLong(0,1) #alliance ID\n self.writeString(\"haccers\") #alliance name\n self.writeDataReference(8,1) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(10000) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeDataReference(0)\n self.writeString(\"RU\") #location\n self.writeVInt(4) # unknown\n self.writeBoolean(True) #is Family friendly\n self.writeVInt(0)\n \n\n self.writeDataReference(25, 1) #alliance role\n self.writeVInt(16)\n\n def decode(self):\n pass\n # fields = {}\n # fields[\"PlayerCount\"] = self.readVInt()\n # fields[\"Text\"] = self.readString()\n # fields[\"Unk1\"] = self.readVInt()\n # super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24113\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "MyAllianceMessage", "path": "Heart/Packets/Server/Home/MyAllianceMessage.py", "snippet": "class MyAllianceMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeVInt(1) # Online people in alliance\n self.writeBoolean(True) # isInAlliance\n self.writeDataReference(25, 4)\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(3) # type\n self.writeVInt(1) # member count\n self.writeVInt(9500) # total trophies\n self.writeVInt(1) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(3) # unknown\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24399\n\n def getMessageVersion(self):\n return self.messageVersion" }, { "identifier": "AllianceDataMessage", "path": "Heart/Packets/Server/Home/AllianceDataMessage.py", "snippet": "class AllianceDataMessage(PiranhaMessage):\n def __init__(self, messageData):\n super().__init__(messageData)\n self.messageVersion = 0\n\n def encode(self, fields, player):\n self.writeBoolean(True)\n\n self.writeLong(0, 1) # alliance ID\n self.writeString(player.ContentCreator) # alliance name\n self.writeDataReference(8, 37) # alliance icon\n self.writeVInt(1) # type\n self.writeVInt(1) # member count\n self.writeVInt(player.Trophies) # total trophies\n self.writeVInt(0) # minimum trophies to enter\n self.writeVInt(0) # 0\n self.writeString('RU') # location\n self.writeVInt(1) # people online\n self.writeBoolean(True) # isFamilyFriendly\n self.writeVInt(0)\n\n self.writeString(\"this is the hacciest club in the world\")\n\n self.writeVInt(1) # member count\n self.writeLong(player.ID[0], player.ID[1]) # player ID\n self.writeVInt(2) # role\n self.writeVInt(player.Trophies) # trophies\n self.writeVInt(0) # status: 0=offline 2=online\n self.writeVInt(1) # last connected time seconds ?\n highestPowerLeagueRank = 2\n self.writeVInt(highestPowerLeagueRank)\n if highestPowerLeagueRank != 0:\n self.writeVInt(2) #solo\n self.writeVInt(1) #duo\n self.writeBoolean(False) # boolean always false?\n\n self.writeString(player.Name) # player name\n self.writeVInt(100) # VInt always 100\n self.writeVInt(28000000 + player.Thumbnail) # thumbnail\n self.writeVInt(43000000 + player.Namecolor) # name color\n self.writeVInt(46000000 + player.Namecolor)\n\n self.writeVInt(-1) # most people have it -1 but some with something\n self.writeBoolean(False) # whats this ? only 2/30 people have it true in my club\n week = 58 # week 58 of club league as of 2023/07/05, this number is 0 if you just arrived in the club\n self.writeVInt(week)\n if week != 0: # club league week number?\n self.writeVInt(3) # day\n self.writeVInt(18) # total club trophies earned\n self.writeVInt(0) # event day club trophies earned\n self.writeVInt(8) # total tickets used\n self.writeVInt(0) # event day tickets used\n self.writeVInt(6) # event day max tickets\n self.writeVInt(6) # event day tickets left\n self.writeVInt(0) # event day player ranking\n self.writeBoolean(True) # everyone have it to true\n self.writeVInt(200) # player experience lvl but why tf it doesn't show for some people\n\n def decode(self):\n fields = {}\n super().decode(fields)\n return {}\n\n def execute(message, calling_instance, fields):\n pass\n\n def getMessageType(self):\n return 24301\n\n def getMessageVersion(self):\n return self.messageVersion" } ]
from Heart.Packets.Client.Authentification.ClientHelloMessage import ClientHelloMessage from Heart.Packets.Client.Authentification.LoginMessage import LoginMessage from Heart.Packets.Client.Battle.AskForBattleEndMessage import AskForBattleEndMessage from Heart.Packets.Client.Home.ChangeAvatarNameMessage import ChangeAvatarNameMessage from Heart.Packets.Client.Home.EndClientTurnMessage import EndClientTurnMessage from Heart.Packets.Client.Home.GoHomeFromOfflinePractiseMessage import GoHomeFromOfflinePractiseMessage from Heart.Packets.Client.Home.GoHomeMessage import GoHomeMessage from Heart.Packets.Client.Home.GetPlayerProfileMessage import GetPlayerProfileMessage from Heart.Packets.Client.Home.AskForAllianceDataMessage import AskForAllianceDataMessage from Heart.Packets.Client.Socket.KeepAliveMessage import KeepAliveMessage from Heart.Packets.Server.Authentification.LoginFailedMessage import LoginFailedMessage from Heart.Packets.Server.Authentification.LoginOkMessage import LoginOkMessage from Heart.Packets.Server.Authentification.OutOfSyncMessage import OutOfSyncMessage from Heart.Packets.Server.Authentification.ServerHelloMessage import ServerHelloMessage from Heart.Packets.Server.Battle.BattleEndMessage import BattleEndMessage from Heart.Packets.Server.Home.AvailableServerCommandMessage import AvailableServerCommandMessage from Heart.Packets.Server.Home.LobbyInfoMessage import LobbyInfoMessage from Heart.Packets.Server.Home.OwnHomeDataMessage import OwnHomeDataMessage from Heart.Packets.Server.Socket.KeepAliveServerMessage import KeepAliveServerMessage from Heart.Packets.Server.Home.PlayerProfileMessage import PlayerProfileMessage from Heart.Packets.Server.Home.MyAllianceMessage import MyAllianceMessage from Heart.Packets.Server.Home.AllianceDataMessage import AllianceDataMessage
16,878
14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage', 15081: GetPlayerProfileMessage, #v50 15793: 'GetTokenFriendMessage', 16000: 'LogicDeviceLinkCodeRequestMessage', 16001: 'LogicDeviceLinkMenuClosedMessage', 16002: 'LogicDeviceLinkEnterCodeMessage', 16003: 'LogicDeviceLinkConfirmYesMessage', 16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage, 20104: LoginOkMessage, 20105: 'FriendListMessage', 20106: 'FriendListUpdateMessage', 20107: 'AddableFriendsMessage', 20108: KeepAliveServerMessage, 20109: 'FriendOnlineStatusMessage', 20110: 'FriendLoggedInMessage', 20111: 'FriendLoggedOutMessage', 20112: 'AddFriendFailedMessage', 20117: 'ReportUserStatusMessage', 20118: 'ChatAccountBanStatusMessage', 20121: 'BillingRequestFailedMessage', 20132: 'UnlockAccountOkMessage', 20133: 'UnlockAccountFailedMessage', 20151: 'AppleBillingProcessedByServerMessage', 20152: 'GoogleBillingProcessedByServerMessage', 20153: 'TencentBillingProcessedByServerMessage', 20154: 'CafeBazaarBillingProcessedByServerMessage', 20156: 'KunlunBillingProcessedByServerMessage', 20161: 'ShutdownStartedMessage', 20171: 'PersonalBreakStartedMessage', 20173: 'YoozooBillingProcessedByServerMessage', 20199: 'FriendSuggestionsMessage', 20205: 'AvatarNameChangeFailedMessage', 20206: 'AvatarOnlineStatusUpdated', 20207: 'AllianceOnlineStatusUpdatedMessage', 20300: 'AvatarNameCheckResponseMessage', 20402: 'CreateGameFailedMessage', 20405: 'MatchMakingStatusMessage', 20406: 'MatchMakingCancelledMessage', 20501: 'AcceptFriendFailedMessage', 20523: 'YoozooOrderAvailableMessage', 20545: 'YoozooOrderDeliveryFailedMessage', 20559: 'StartLoadingMessage', 20801: 'NotificationMessage', 20802: 'OpponentRejoinsMatchNotificationMessage', 20931: 'AntiAddictionDataUpdatedMessage', 22089: 'GetTokenFriendResultMessage', 22100: 'CreatePlayerMapResponseMessage', 22101: 'DeletePlayerMapResponseMessage', 22102: 'PlayerMapsMessage', 22103: 'UpdatePlayerMapResponseMessage', 22104: 'SubmitPlayerMapResponseMessage', 22105: 'PublishPlayerMapResponseMessage', 22106: 'ChangePlayerMapNameMResponseMessage', 22107: 'PlayerMapInfoUpdatedMessage', 22109: 'DebugPlayerMapReviewResultOverrideSetMessage', 22111: 'PlayerMapGreenlightedMessage', 22125: 'ReportPlayerMapResponseMessage', 22150: 'RankedMatchStartMessage', 22151: 'RankedMatchBanStartedMessage', 22152: 'RankedMatchBanHeroResponseMessage', 22153: 'RankedMatchBanEndedMessage', 22154: 'RankedMatchPickStartedMessage', 22155: 'RankedMatchPickHeroFailedMessage', 22156: 'RankedMatchHeroPickedMessage', 22157: 'RankedMatchHeroDataUpdatedMessage', 22158: 'RankedMatchFinalPreparationStartedMessage', 22159: 'RankedMatchTerminatedMessage', 22202: 'MapPreviewMessage', 22377: 'GoogleServiceAccountBoundMessage', 22687: 'GamecenterAccountAlreadyBoundMessage', 22957: 'PvpMatchmakeNotificationMessage', 23067: 'SCIDLogoutAllDevicesResultMessage', 23302: 'GetAllianceInviteTokenResultMessage', 23456: BattleEndMessage, 23457: LobbyInfoMessage, 23458: 'BattleLogMessage', 23459: 'BattleLogReplayAvailableMessage', 23494: 'GoogleServiceAccountAlreadyBoundMessage', 23774: 'PlayerJWTokenMessage',
class LogicLaserMessageFactory: messagesList = { 10055: 'AskPlayerJWTokenMessage', 10099: 'ClientCryptoErrorMessage', 10100: ClientHelloMessage, 10101: LoginMessage, 10102: 'LoginUsingSessionMessage', 10103: 'CreateAccountMessage', 10107: 'ClientCapabilitiesMessage', 10108: KeepAliveMessage, 10109: 'UdpCheckConnectionMessage', 10110: 'AnalyticEventMessage', 10111: 'AccountIdentifiersMessage', 10112: 'AuthenticationCheckMessage', 10113: 'SetDeviceTokenMessage', 10116: 'ResetAccountMessage', 10117: 'ReportUserMessage', 10118: 'AccountSwitchedMessage', 10119: 'ReportAllianceStreamMessage', 10121: 'UnlockAccountMessage', 10150: 'AppleBillingRequestMessage', 10151: 'GoogleBillingRequestMessage', 10152: 'TencentBillingRequestMessage', 10153: 'CafeBazaarBillingRequestMessage', 10159: 'KunlunBillingRequestMessage', 10160: 'BillingCancelledByClientMessage', 10177: 'ClientInfoMessage', 10212: ChangeAvatarNameMessage, 10309: 'GetAllianceInviteTokenMessage', 10321: 'AttributionEventMessage', 10401: 'CreateGameMessage', 10501: 'AcceptFriendMessage', 10502: 'AddFriendMessage', 10503: 'AskForAddableFriendsMessage', 10504: 'AskForFriendListMessage', 10506: 'RemoveFriendMessage', 10507: 'AddFriendByEmailMessage', 10509: 'AddFriendByAvatarNameAndCodeMessage', 10512: 'AskForPlayingGamecenterFriendsMessage', 10513: 'AskForPlayingFacebookFriendsMessage', 10514: 'AskForPlayingKakaoFriendsMessage', 10515: 'AskForPlayingTencentFriendsMessage', 10516: 'AskForPlayingLineFriendsMessage', 10517: 'AskForPlayingSupercellFriendsMessage', 10523: 'YoozooBillingRequestMessage', 10555: 'ClientInputMessage', 10576: 'SetBlockFriendRequestsMessage', 10599: 'AskForFriendSuggestionsMessage', 10636: 'SCIDBindAccountMessage', 11736: 'SCIDLogoutAllDevicesMessage', 12100: 'CreatePlayerMapMessage', 12101: 'DeletePlayerMapMessage', 12102: 'GetPlayerMapsMessage', 12103: 'UpdatePlayerMapMessage', 12104: 'SubmitPlayerMapMessage', 12105: 'PublishPlayerMapMessage', 12106: 'ChangePlayerMapNameMessage', 12107: 'EnterMapEditorMessage', 12108: 'GoHomeFromMapEditorMessage', 12110: 'TeamSetPlayerMapMessage', 12111: 'SignoffPlayerMapMessage', 12125: 'ReportPlayerMapMessage', 12152: 'RankedMatchBanHeroMessage', 12155: 'RankedMatchPickHeroMessage', 12157: 'RankedMatchUpdateHeroDataMessage', 12905: 'GetCurrentBattleReplayDataMessage', 12998: 'SetCountryMessage', 13922: 'AcceptTokenFriendMessage', 14101: GoHomeMessage, 14102: EndClientTurnMessage, 14103: 'StartGameMessage', 14104: 'StartSpectateMessage', 14105: 'HomeLogicStoppedMessage', 14106: 'CancelMatchmakingMessage', 14107: 'StopSpectateMessage', 14108: 'GoHomeFromSpectateMessage', #14109: GoHomeFromOfflinePractiseMessage, //before v50 14110: AskForBattleEndMessage, #14113: GetPlayerProfileMessage, //before v50 14114: 'GetBattleLogMessage', 14115: 'BattleLogViewReplayMessage', 14116: 'ViewReplayByStringMessage', 14117: 'RequestMatchCancelMessage', 14118: 'SinglePlayerMatchRequestMessage', 14166: 'ChronosEventSeenMessage', 14167: 'ChronosEventSeenMessage', 14177: 'PlayAgainMessage', 14178: 'DebugCommandMessage', 14199: 'LookForGameRoomRequestMessage', 14211: 'UnbindFacebookAccountMessage', 14201: 'BindFacebookAccountMessage', 14202: 'BindKakaoAccountMessage', 14203: 'BingLineAccountMessage', 14212: 'BindGamecenterAccountMessage', 14213: 'UnbindKakaoAccountMessage', 14214: 'UnbindLineAccountMessage', 14262: 'BindGoogleServiceAccountMessage', 14266: 'BindTencentAccountMessage', 14268: 'TencentCheckCanPayMessage', 14276: 'TencentAntiAddictionInstructionExecutedMessage', 14277: 'GetSeasonRewardsMessage', 14299: 'SetAllianceCountryMessage', 14301: 'CreateAllianceMessage', 14302: AskForAllianceDataMessage, 14303: 'AskForJoinableAlliancesListMessage', 14304: 'AskForAllianceStreamMessage', 14305: 'JoinAllianceMessage', 14306: 'ChangeAllianceMemberRoleMessage', 14307: 'KickAllianceMemberMessage', 14308: 'LeaveAllianceMessage', 14315: 'ChatToAllianceStreamMessage', 14316: 'ChangeAllianceSettingsMessage', 14317: 'RequestJoinAllianceMessage', 14321: 'RespondToAllianceJoinRequestMessage', 14322: 'SendAllianceInvitationMessage', 14323: 'JoinAllianceUsingInvitationMessage', 14324: 'SearchAlliancesMessage', 14326: 'SendAllianceInvitationToFriendMessage', 14330: 'SendAllianceMailMessage', 14350: 'TeamCreateMessage', 14351: 'TeamJoinMessage', 14352: 'TeamKickMessage', 14353: 'TeamLeaveMessage', 14354: 'TeamChangeMemberSettingsMessage', 14355: 'TeamSetMemberReadyMessage', 14356: 'TeamTogglePractiseMessage', 14357: 'TeamToggleMemberSideMessage', 14358: 'TeamSpectateMessage', 14359: 'TeamChatMessage', 14360: 'TeamPostAdMessage', 14361: 'TeamMemberStatusMessage', 14362: 'TeamSetEventMessage', 14363: 'TeamSetLocationMessage', 14364: 'TeamReportChatMessage', 14365: 'TeamInviteMessage', 14366: 'PlayerStatusMessage', 14367: 'TeamClearInviteMessage', 14368: 'TeamInviteResponseMessage', 14369: 'TeamPremadeChatMessage', 14370: 'TeamAllianceMemberInviteMessage', 14371: 'TeamJoinOrCreateGameRoomMessage', 14372: 'TeamToggleSettingsMessage', 14373: 'TeamBotSlotDisableMessage', 14403: 'GetLeaderboardMessage', 14405: 'AskForAvatarStreamMessage', 14406: 'AskForBattleReplayStreamMessage', 14418: 'RemoveAvatarStreamEntryMessage', 14469: 'AlliancePremadeChatMessage', 14479: 'TeamInvitationResponseMessage', 14600: 'AvatarNameCheckRequestMessage', 14700: 'ListBrawlTvChannelsMessage', 14701: 'TuneBrawlTvChannelMessage', 14715: 'SendGlobalChatLineMessage', 14777: 'SetInvitesBlockedMessage', 14778: 'SetTeamChatMutedMessage', 14867: 'SetRegionMessage', 14880: 'TeamRequestJoinCancelMessage', 14881: 'TeamRequestJoinMessage', 14882: 'TeamRequestJoinApproveMessage', 15081: GetPlayerProfileMessage, #v50 15793: 'GetTokenFriendMessage', 16000: 'LogicDeviceLinkCodeRequestMessage', 16001: 'LogicDeviceLinkMenuClosedMessage', 16002: 'LogicDeviceLinkEnterCodeMessage', 16003: 'LogicDeviceLinkConfirmYesMessage', 16939: 'AskApiTokenMessage', 17000: 'LogicAccountTransferCodeRequestMessage', 17190: 'JoinAllianceUsingTokenMessage', 17337: 'UnbotifyReportMessage', 17338: 'AdjustPackageMessage', 17750: GoHomeFromOfflinePractiseMessage, #v50 18686: 'SetSupportedCreatorMessage', 19001: 'LatencyTestResultMessage', 19002: 'UdpLatencyTestRequestMessage', 19003: 'TriggerStartLatencyTestMessage', 19004: 'RequestLatencyTestStatusMessage', 20000: 'SetEncryptionMessage', 20100: ServerHelloMessage, 20101: 'CreateAccountOkMessage', 20103: LoginFailedMessage, 20104: LoginOkMessage, 20105: 'FriendListMessage', 20106: 'FriendListUpdateMessage', 20107: 'AddableFriendsMessage', 20108: KeepAliveServerMessage, 20109: 'FriendOnlineStatusMessage', 20110: 'FriendLoggedInMessage', 20111: 'FriendLoggedOutMessage', 20112: 'AddFriendFailedMessage', 20117: 'ReportUserStatusMessage', 20118: 'ChatAccountBanStatusMessage', 20121: 'BillingRequestFailedMessage', 20132: 'UnlockAccountOkMessage', 20133: 'UnlockAccountFailedMessage', 20151: 'AppleBillingProcessedByServerMessage', 20152: 'GoogleBillingProcessedByServerMessage', 20153: 'TencentBillingProcessedByServerMessage', 20154: 'CafeBazaarBillingProcessedByServerMessage', 20156: 'KunlunBillingProcessedByServerMessage', 20161: 'ShutdownStartedMessage', 20171: 'PersonalBreakStartedMessage', 20173: 'YoozooBillingProcessedByServerMessage', 20199: 'FriendSuggestionsMessage', 20205: 'AvatarNameChangeFailedMessage', 20206: 'AvatarOnlineStatusUpdated', 20207: 'AllianceOnlineStatusUpdatedMessage', 20300: 'AvatarNameCheckResponseMessage', 20402: 'CreateGameFailedMessage', 20405: 'MatchMakingStatusMessage', 20406: 'MatchMakingCancelledMessage', 20501: 'AcceptFriendFailedMessage', 20523: 'YoozooOrderAvailableMessage', 20545: 'YoozooOrderDeliveryFailedMessage', 20559: 'StartLoadingMessage', 20801: 'NotificationMessage', 20802: 'OpponentRejoinsMatchNotificationMessage', 20931: 'AntiAddictionDataUpdatedMessage', 22089: 'GetTokenFriendResultMessage', 22100: 'CreatePlayerMapResponseMessage', 22101: 'DeletePlayerMapResponseMessage', 22102: 'PlayerMapsMessage', 22103: 'UpdatePlayerMapResponseMessage', 22104: 'SubmitPlayerMapResponseMessage', 22105: 'PublishPlayerMapResponseMessage', 22106: 'ChangePlayerMapNameMResponseMessage', 22107: 'PlayerMapInfoUpdatedMessage', 22109: 'DebugPlayerMapReviewResultOverrideSetMessage', 22111: 'PlayerMapGreenlightedMessage', 22125: 'ReportPlayerMapResponseMessage', 22150: 'RankedMatchStartMessage', 22151: 'RankedMatchBanStartedMessage', 22152: 'RankedMatchBanHeroResponseMessage', 22153: 'RankedMatchBanEndedMessage', 22154: 'RankedMatchPickStartedMessage', 22155: 'RankedMatchPickHeroFailedMessage', 22156: 'RankedMatchHeroPickedMessage', 22157: 'RankedMatchHeroDataUpdatedMessage', 22158: 'RankedMatchFinalPreparationStartedMessage', 22159: 'RankedMatchTerminatedMessage', 22202: 'MapPreviewMessage', 22377: 'GoogleServiceAccountBoundMessage', 22687: 'GamecenterAccountAlreadyBoundMessage', 22957: 'PvpMatchmakeNotificationMessage', 23067: 'SCIDLogoutAllDevicesResultMessage', 23302: 'GetAllianceInviteTokenResultMessage', 23456: BattleEndMessage, 23457: LobbyInfoMessage, 23458: 'BattleLogMessage', 23459: 'BattleLogReplayAvailableMessage', 23494: 'GoogleServiceAccountAlreadyBoundMessage', 23774: 'PlayerJWTokenMessage',
24101: OwnHomeDataMessage,
17
2023-12-14 18:57:56+00:00
24k
GXNU-ZhongLab/ODTrack
lib/train/base_functions.py
[ { "identifier": "Lasot", "path": "lib/train/dataset/lasot.py", "snippet": "class Lasot(BaseVideoDataset):\n \"\"\" LaSOT dataset.\n\n Publication:\n LaSOT: A High-quality Benchmark for Large-scale Single Object Tracking\n Heng Fan, Liting Lin, Fan Yang, Peng Chu, Ge Deng, Sijia Yu, Hexin Bai, Yong Xu, Chunyuan Liao and Haibin Ling\n CVPR, 2019\n https://arxiv.org/pdf/1809.07845.pdf\n\n Download the dataset from https://cis.temple.edu/lasot/download.html\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_dir if root is None else root\n super().__init__('LaSOT', root, image_loader)\n\n # Keep a list of all classes\n self.class_list = [f for f in os.listdir(self.root)]\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n sequence_list = pandas.read_csv(file_path, header=None).squeeze(\"columns\").values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n with open(out_of_view_file, 'r') as f:\n out_of_view = torch.ByteTensor([int(v) for v in list(csv.reader(f))[0]])\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(self.root, class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k", "path": "lib/train/dataset/got10k.py", "snippet": "class Got10k(BaseVideoDataset):\n \"\"\" GOT-10k dataset.\n\n Publication:\n GOT-10k: A Large High-Diversity Benchmark for Generic Object Tracking in the Wild\n Lianghua Huang, Xin Zhao, and Kaiqi Huang\n arXiv:1810.11981, 2018\n https://arxiv.org/pdf/1810.11981.pdf\n\n Download dataset from http://got-10k.aitestunion.com/downloads\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().got10k_dir if root is None else root\n super().__init__('GOT10k', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(ltr_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n # seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n seq_ids = pandas.read_csv(file_path, header=None, dtype=np.int64).squeeze(\"columns\").values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n sequence_meta_info = {s: self._read_meta(os.path.join(self.root, s)) for s in self.sequence_list}\n return sequence_meta_info\n\n def _read_meta(self, seq_path):\n try:\n with open(os.path.join(seq_path, 'meta_info.ini')) as f:\n meta_info = f.readlines()\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1][:-1],\n 'motion_class': meta_info[6].split(': ')[-1][:-1],\n 'major_class': meta_info[7].split(': ')[-1][:-1],\n 'root_class': meta_info[8].split(': ')[-1][:-1],\n 'motion_adverb': meta_info[9].split(': ')[-1][:-1]})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n with open(os.path.join(self.root, 'list.txt')) as f:\n dir_list = list(csv.reader(f))\n dir_list = [dir_name[0] for dir_name in dir_list]\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False, low_memory=False).values\n return torch.tensor(gt)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n\n with open(occlusion_file, 'r', newline='') as f:\n occlusion = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n with open(cover_file, 'r', newline='') as f:\n cover = torch.ByteTensor([int(v[0]) for v in csv.reader(f)])\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(self.root, self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return self.image_loader(self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "TrackingNet", "path": "lib/train/dataset/tracking_net.py", "snippet": "class TrackingNet(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_dir if root is None else root\n super().__init__('TrackingNet', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root, self.set_ids)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n bb_anno_file = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"anno\", vid_name + \".txt\")\n gt = pandas.read_csv(bb_anno_file, delimiter=',', header=None, dtype=np.float32, na_filter=False,\n low_memory=False).values\n return torch.tensor(gt)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n frame_path = os.path.join(self.root, \"TRAIN_\" + str(set_id), \"frames\", vid_name, str(frame_id) + \".jpg\")\n return self.image_loader(frame_path)\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID", "path": "lib/train/dataset/imagenetvid.py", "snippet": "class ImagenetVID(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid\", root, image_loader)\n\n cache_file = os.path.join(root, 'cache.json')\n if os.path.isfile(cache_file):\n # If available, load the pre-processed cache file containing meta-info for each sequence\n with open(cache_file, 'r') as f:\n sequence_list_dict = json.load(f)\n\n self.sequence_list = sequence_list_dict\n else:\n # Else process the imagenet annotations and generate the cache file\n self.sequence_list = self._process_anno(root)\n\n with open(cache_file, 'w') as f:\n json.dump(self.sequence_list, f)\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join(self.root, 'Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return self.image_loader(frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta\n\n def _process_anno(self, root):\n # Builds individual tracklets\n base_vid_anno_path = os.path.join(root, 'Annotations', 'VID', 'train')\n\n all_sequences = []\n for set in sorted(os.listdir(base_vid_anno_path)):\n set_id = int(set.split('_')[-1])\n for vid in sorted(os.listdir(os.path.join(base_vid_anno_path, set))):\n\n vid_id = int(vid.split('_')[-1])\n anno_files = sorted(os.listdir(os.path.join(base_vid_anno_path, set, vid)))\n\n frame1_anno = ET.parse(os.path.join(base_vid_anno_path, set, vid, anno_files[0]))\n image_size = [int(frame1_anno.find('size/width').text), int(frame1_anno.find('size/height').text)]\n\n objects = [ET.ElementTree(file=os.path.join(base_vid_anno_path, set, vid, f)).findall('object')\n for f in anno_files]\n\n tracklets = {}\n\n # Find all tracklets along with start frame\n for f_id, all_targets in enumerate(objects):\n for target in all_targets:\n tracklet_id = target.find('trackid').text\n if tracklet_id not in tracklets:\n tracklets[tracklet_id] = f_id\n\n for tracklet_id, tracklet_start in tracklets.items():\n tracklet_anno = []\n target_visible = []\n class_name_id = None\n\n for f_id in range(tracklet_start, len(objects)):\n found = False\n for target in objects[f_id]:\n if target.find('trackid').text == tracklet_id:\n if not class_name_id:\n class_name_id = target.find('name').text\n x1 = int(target.find('bndbox/xmin').text)\n y1 = int(target.find('bndbox/ymin').text)\n x2 = int(target.find('bndbox/xmax').text)\n y2 = int(target.find('bndbox/ymax').text)\n\n tracklet_anno.append([x1, y1, x2 - x1, y2 - y1])\n target_visible.append(target.find('occluded').text == '0')\n\n found = True\n break\n if not found:\n break\n\n new_sequence = {'set_id': set_id, 'vid_id': vid_id, 'class_name': class_name_id,\n 'start_frame': tracklet_start, 'anno': tracklet_anno,\n 'target_visible': target_visible, 'image_size': image_size}\n all_sequences.append(new_sequence)\n\n return all_sequences" }, { "identifier": "MSCOCOSeq", "path": "lib/train/dataset/coco_seq.py", "snippet": "class MSCOCOSeq(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO', root, image_loader)\n\n self.img_pth = os.path.join(root, 'images/{}{}/'.format(split, version))\n self.anno_path = os.path.join(root, 'annotations/instances_{}{}.json'.format(split, version))\n\n # Load the COCO set.\n self.coco_set = COCO(self.anno_path)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n img = self.image_loader(os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "Got10k_lmdb", "path": "lib/train/dataset/got10k_lmdb.py", "snippet": "class Got10k_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, split=None, seq_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the got-10k training data. Note: This should point to the 'train' folder inside GOT-10k\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n split - 'train' or 'val'. Note: The validation split here is a subset of the official got-10k train split,\n not NOT the official got-10k validation split. To use the official validation split, provide that as\n the root folder instead.\n seq_ids - List containing the ids of the videos to be used for training. Note: Only one of 'split' or 'seq_ids'\n options can be used at the same time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n use_lmdb - whether the dataset is stored in lmdb format\n \"\"\"\n root = env_settings().got10k_lmdb_dir if root is None else root\n super().__init__('GOT10k_lmdb', root, image_loader)\n\n # all folders inside the root\n self.sequence_list = self._get_sequence_list()\n\n # seq_id is the index of the folder inside the got10k root path\n if split is not None:\n if seq_ids is not None:\n raise ValueError('Cannot set both split_name and seq_ids.')\n train_lib_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_split.txt')\n elif split == 'val':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_val_split.txt')\n elif split == 'train_full':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_train_full_split.txt')\n elif split == 'vottrain':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_train_split.txt')\n elif split == 'votval':\n file_path = os.path.join(train_lib_path, 'data_specs', 'got10k_vot_val_split.txt')\n else:\n raise ValueError('Unknown split name.')\n seq_ids = pandas.read_csv(file_path, header=None, squeeze=True, dtype=np.int64).values.tolist()\n elif seq_ids is None:\n seq_ids = list(range(0, len(self.sequence_list)))\n\n self.sequence_list = [self.sequence_list[i] for i in seq_ids]\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.sequence_meta_info = self._load_meta_info()\n self.seq_per_class = self._build_seq_per_class()\n\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def get_name(self):\n return 'got10k_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def _load_meta_info(self):\n def _read_meta(meta_info):\n\n object_meta = OrderedDict({'object_class_name': meta_info[5].split(': ')[-1],\n 'motion_class': meta_info[6].split(': ')[-1],\n 'major_class': meta_info[7].split(': ')[-1],\n 'root_class': meta_info[8].split(': ')[-1],\n 'motion_adverb': meta_info[9].split(': ')[-1]})\n\n return object_meta\n sequence_meta_info = {}\n for s in self.sequence_list:\n try:\n meta_str = decode_str(self.root, \"train/%s/meta_info.ini\" %s)\n sequence_meta_info[s] = _read_meta(meta_str.split('\\n'))\n except:\n sequence_meta_info[s] = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return sequence_meta_info\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n\n for i, s in enumerate(self.sequence_list):\n object_class = self.sequence_meta_info[s]['object_class_name']\n if object_class in seq_per_class:\n seq_per_class[object_class].append(i)\n else:\n seq_per_class[object_class] = [i]\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _get_sequence_list(self):\n dir_str = decode_str(self.root, 'train/list.txt')\n dir_list = dir_str.split('\\n')\n return dir_list\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line in got10k is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # full occlusion and out_of_view files\n occlusion_file = os.path.join(seq_path, \"absence.label\")\n cover_file = os.path.join(seq_path, \"cover.label\")\n # Read these files\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split('\\n')[:-1])) # the last line in got10k is empty\n occlusion = torch.ByteTensor(occ_list)\n cover_list = list(map(int, decode_str(self.root, cover_file).split('\\n')[:-1])) # the last line in got10k is empty\n cover = torch.ByteTensor(cover_list)\n\n target_visible = ~occlusion & (cover>0).byte()\n\n visible_ratio = cover.float() / 8\n return target_visible, visible_ratio\n\n def _get_sequence_path(self, seq_id):\n return os.path.join(\"train\", self.sequence_list[seq_id])\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible, visible_ratio = self._read_target_visible(seq_path)\n visible = visible & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible, 'visible_ratio': visible_ratio}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def get_class_name(self, seq_id):\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n return obj_meta['object_class_name']\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n obj_meta = self.sequence_meta_info[self.sequence_list[seq_id]]\n\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n return frame_list, anno_frames, obj_meta" }, { "identifier": "Lasot_lmdb", "path": "lib/train/dataset/lasot_lmdb.py", "snippet": "class Lasot_lmdb(BaseVideoDataset):\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, vid_ids=None, split=None, data_fraction=None):\n \"\"\"\n args:\n root - path to the lasot dataset.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n vid_ids - List containing the ids of the videos (1 - 20) used for training. If vid_ids = [1, 3, 5], then the\n videos with subscripts -1, -3, and -5 from each class will be used for training.\n split - If split='train', the official train split (protocol-II) is used for training. Note: Only one of\n vid_ids or split option can be used at a time.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().lasot_lmdb_dir if root is None else root\n super().__init__('LaSOT_lmdb', root, image_loader)\n\n self.sequence_list = self._build_sequence_list(vid_ids, split)\n class_list = [seq_name.split('-')[0] for seq_name in self.sequence_list]\n self.class_list = []\n for ele in class_list:\n if ele not in self.class_list:\n self.class_list.append(ele)\n # Keep a list of all classes\n self.class_to_id = {cls_name: cls_id for cls_id, cls_name in enumerate(self.class_list)}\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n\n self.seq_per_class = self._build_class_list()\n\n def _build_sequence_list(self, vid_ids=None, split=None):\n if split is not None:\n if vid_ids is not None:\n raise ValueError('Cannot set both split_name and vid_ids.')\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n if split == 'train':\n file_path = os.path.join(ltr_path, 'data_specs', 'lasot_train_split.txt')\n else:\n raise ValueError('Unknown split name.')\n sequence_list = pandas.read_csv(file_path, header=None, squeeze=True).values.tolist()\n elif vid_ids is not None:\n sequence_list = [c+'-'+str(v) for c in self.class_list for v in vid_ids]\n else:\n raise ValueError('Set either split_name or vid_ids.')\n\n return sequence_list\n\n def _build_class_list(self):\n seq_per_class = {}\n for seq_id, seq_name in enumerate(self.sequence_list):\n class_name = seq_name.split('-')[0]\n if class_name in seq_per_class:\n seq_per_class[class_name].append(seq_id)\n else:\n seq_per_class[class_name] = [seq_id]\n\n return seq_per_class\n\n def get_name(self):\n return 'lasot_lmdb'\n\n def has_class_info(self):\n return True\n\n def has_occlusion_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_path):\n bb_anno_file = os.path.join(seq_path, \"groundtruth.txt\")\n gt_str_list = decode_str(self.root, bb_anno_file).split('\\n')[:-1] # the last line is empty\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def _read_target_visible(self, seq_path):\n # Read full occlusion and out_of_view\n occlusion_file = os.path.join(seq_path, \"full_occlusion.txt\")\n out_of_view_file = os.path.join(seq_path, \"out_of_view.txt\")\n\n occ_list = list(map(int, decode_str(self.root, occlusion_file).split(',')))\n occlusion = torch.ByteTensor(occ_list)\n out_view_list = list(map(int, decode_str(self.root, out_of_view_file).split(',')))\n out_of_view = torch.ByteTensor(out_view_list)\n\n target_visible = ~occlusion & ~out_of_view\n\n return target_visible\n\n def _get_sequence_path(self, seq_id):\n seq_name = self.sequence_list[seq_id]\n class_name = seq_name.split('-')[0]\n vid_id = seq_name.split('-')[1]\n\n return os.path.join(class_name, class_name + '-' + vid_id)\n\n def get_sequence_info(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n bbox = self._read_bb_anno(seq_path)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = self._read_target_visible(seq_path) & valid.byte()\n\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame_path(self, seq_path, frame_id):\n return os.path.join(seq_path, 'img', '{:08}.jpg'.format(frame_id+1)) # frames start from 1\n\n def _get_frame(self, seq_path, frame_id):\n return decode_img(self.root, self._get_frame_path(seq_path, frame_id))\n\n def _get_class(self, seq_path):\n raw_class = seq_path.split('/')[-2]\n return raw_class\n\n def get_class_name(self, seq_id):\n seq_path = self._get_sequence_path(seq_id)\n obj_class = self._get_class(seq_path)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n seq_path = self._get_sequence_path(seq_id)\n\n obj_class = self._get_class(seq_path)\n frame_list = [self._get_frame(seq_path, f_id) for f_id in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "ImagenetVID_lmdb", "path": "lib/train/dataset/imagenetvid_lmdb.py", "snippet": "class ImagenetVID_lmdb(BaseVideoDataset):\n \"\"\" Imagenet VID dataset.\n\n Publication:\n ImageNet Large Scale Visual Recognition Challenge\n Olga Russakovsky, Jia Deng, Hao Su, Jonathan Krause, Sanjeev Satheesh, Sean Ma, Zhiheng Huang, Andrej Karpathy,\n Aditya Khosla, Michael Bernstein, Alexander C. Berg and Li Fei-Fei\n IJCV, 2015\n https://arxiv.org/pdf/1409.0575.pdf\n\n Download the dataset from http://image-net.org/\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, min_length=0, max_target_area=1):\n \"\"\"\n args:\n root - path to the imagenet vid dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n min_length - Minimum allowed sequence length.\n max_target_area - max allowed ratio between target area and image area. Can be used to filter out targets\n which cover complete image.\n \"\"\"\n root = env_settings().imagenet_dir if root is None else root\n super().__init__(\"imagenetvid_lmdb\", root, image_loader)\n\n sequence_list_dict = decode_json(root, \"cache.json\")\n self.sequence_list = sequence_list_dict\n\n # Filter the sequences based on min_length and max_target_area in the first frame\n self.sequence_list = [x for x in self.sequence_list if len(x['anno']) >= min_length and\n get_target_to_image_ratio(x) < max_target_area]\n\n def get_name(self):\n return 'imagenetvid_lmdb'\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def get_sequence_info(self, seq_id):\n bb_anno = torch.Tensor(self.sequence_list[seq_id]['anno'])\n valid = (bb_anno[:, 2] > 0) & (bb_anno[:, 3] > 0)\n visible = torch.ByteTensor(self.sequence_list[seq_id]['target_visible']) & valid.byte()\n return {'bbox': bb_anno, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, sequence, frame_id):\n set_name = 'ILSVRC2015_VID_train_{:04d}'.format(sequence['set_id'])\n vid_name = 'ILSVRC2015_train_{:08d}'.format(sequence['vid_id'])\n frame_number = frame_id + sequence['start_frame']\n frame_path = os.path.join('Data', 'VID', 'train', set_name, vid_name,\n '{:06d}.JPEG'.format(frame_number))\n return decode_img(self.root, frame_path)\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n sequence = self.sequence_list[seq_id]\n\n frame_list = [self._get_frame(sequence, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n # Create anno dict\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n # added the class info to the meta info\n object_meta = OrderedDict({'object_class': sequence['class_name'],\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "MSCOCOSeq_lmdb", "path": "lib/train/dataset/coco_seq_lmdb.py", "snippet": "class MSCOCOSeq_lmdb(BaseVideoDataset):\n \"\"\" The COCO dataset. COCO is an image dataset. Thus, we treat each image as a sequence of length 1.\n\n Publication:\n Microsoft COCO: Common Objects in Context.\n Tsung-Yi Lin, Michael Maire, Serge J. Belongie, Lubomir D. Bourdev, Ross B. Girshick, James Hays, Pietro Perona,\n Deva Ramanan, Piotr Dollar and C. Lawrence Zitnick\n ECCV, 2014\n https://arxiv.org/pdf/1405.0312.pdf\n\n Download the images along with annotations from http://cocodataset.org/#download. The root folder should be\n organized as follows.\n - coco_root\n - annotations\n - instances_train2014.json\n - instances_train2017.json\n - images\n - train2014\n - train2017\n\n Note: You also have to install the coco pythonAPI from https://github.com/cocodataset/cocoapi.\n \"\"\"\n\n def __init__(self, root=None, image_loader=jpeg4py_loader, data_fraction=None, split=\"train\", version=\"2014\"):\n \"\"\"\n args:\n root - path to the coco dataset.\n image_loader (default_image_loader) - The function to read the images. If installed,\n jpeg4py (https://github.com/ajkxyz/jpeg4py) is used by default. Else,\n opencv's imread is used.\n data_fraction (None) - Fraction of images to be used. The images are selected randomly. If None, all the\n images will be used\n split - 'train' or 'val'.\n version - version of coco dataset (2014 or 2017)\n \"\"\"\n root = env_settings().coco_dir if root is None else root\n super().__init__('COCO_lmdb', root, image_loader)\n self.root = root\n self.img_pth = 'images/{}{}/'.format(split, version)\n self.anno_path = 'annotations/instances_{}{}.json'.format(split, version)\n\n # Load the COCO set.\n print('loading annotations into memory...')\n tic = time.time()\n coco_json = decode_json(root, self.anno_path)\n print('Done (t={:0.2f}s)'.format(time.time() - tic))\n\n self.coco_set = COCO(coco_json)\n\n self.cats = self.coco_set.cats\n\n self.class_list = self.get_class_list()\n\n self.sequence_list = self._get_sequence_list()\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list)*data_fraction))\n self.seq_per_class = self._build_seq_per_class()\n\n def _get_sequence_list(self):\n ann_list = list(self.coco_set.anns.keys())\n seq_list = [a for a in ann_list if self.coco_set.anns[a]['iscrowd'] == 0]\n\n return seq_list\n\n def is_video_sequence(self):\n return False\n\n def get_num_classes(self):\n return len(self.class_list)\n\n def get_name(self):\n return 'coco_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_class_list(self):\n class_list = []\n for cat_id in self.cats.keys():\n class_list.append(self.cats[cat_id]['name'])\n return class_list\n\n def has_segmentation_info(self):\n return True\n\n def get_num_sequences(self):\n return len(self.sequence_list)\n\n def _build_seq_per_class(self):\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = self.cats[self.coco_set.anns[seq]['category_id']]['name']\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_per_class\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def get_sequence_info(self, seq_id):\n anno = self._get_anno(seq_id)\n\n bbox = torch.Tensor(anno['bbox']).view(1, 4)\n\n mask = torch.Tensor(self.coco_set.annToMask(anno)).unsqueeze(dim=0)\n\n '''2021.1.3 To avoid too small bounding boxes. Here we change the threshold to 50 pixels'''\n valid = (bbox[:, 2] > 50) & (bbox[:, 3] > 50)\n\n visible = valid.clone().byte()\n\n return {'bbox': bbox, 'mask': mask, 'valid': valid, 'visible': visible}\n\n def _get_anno(self, seq_id):\n anno = self.coco_set.anns[self.sequence_list[seq_id]]\n\n return anno\n\n def _get_frames(self, seq_id):\n path = self.coco_set.loadImgs([self.coco_set.anns[self.sequence_list[seq_id]]['image_id']])[0]['file_name']\n # img = self.image_loader(os.path.join(self.img_pth, path))\n img = decode_img(self.root, os.path.join(self.img_pth, path))\n return img\n\n def get_meta_info(self, seq_id):\n try:\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n object_meta = OrderedDict({'object_class_name': cat_dict_current['name'],\n 'motion_class': None,\n 'major_class': cat_dict_current['supercategory'],\n 'root_class': None,\n 'motion_adverb': None})\n except:\n object_meta = OrderedDict({'object_class_name': None,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n return object_meta\n\n\n def get_class_name(self, seq_id):\n cat_dict_current = self.cats[self.coco_set.anns[self.sequence_list[seq_id]]['category_id']]\n return cat_dict_current['name']\n\n def get_frames(self, seq_id=None, frame_ids=None, anno=None):\n # COCO is an image dataset. Thus we replicate the image denoted by seq_id len(frame_ids) times, and return a\n # list containing these replicated images.\n frame = self._get_frames(seq_id)\n\n frame_list = [frame.copy() for _ in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[0, ...] for _ in frame_ids]\n\n object_meta = self.get_meta_info(seq_id)\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "TrackingNet_lmdb", "path": "lib/train/dataset/tracking_net_lmdb.py", "snippet": "class TrackingNet_lmdb(BaseVideoDataset):\n \"\"\" TrackingNet dataset.\n\n Publication:\n TrackingNet: A Large-Scale Dataset and Benchmark for Object Tracking in the Wild.\n Matthias Mueller,Adel Bibi, Silvio Giancola, Salman Al-Subaihi and Bernard Ghanem\n ECCV, 2018\n https://ivul.kaust.edu.sa/Documents/Publications/2018/TrackingNet%20A%20Large%20Scale%20Dataset%20and%20Benchmark%20for%20Object%20Tracking%20in%20the%20Wild.pdf\n\n Download the dataset using the toolkit https://github.com/SilvioGiancola/TrackingNet-devkit.\n \"\"\"\n def __init__(self, root=None, image_loader=jpeg4py_loader, set_ids=None, data_fraction=None):\n \"\"\"\n args:\n root - The path to the TrackingNet folder, containing the training sets.\n image_loader (jpeg4py_loader) - The function to read the images. jpeg4py (https://github.com/ajkxyz/jpeg4py)\n is used by default.\n set_ids (None) - List containing the ids of the TrackingNet sets to be used for training. If None, all the\n sets (0 - 11) will be used.\n data_fraction - Fraction of dataset to be used. The complete dataset is used by default\n \"\"\"\n root = env_settings().trackingnet_lmdb_dir if root is None else root\n super().__init__('TrackingNet_lmdb', root, image_loader)\n\n if set_ids is None:\n set_ids = [i for i in range(12)]\n\n self.set_ids = set_ids\n\n # Keep a list of all videos. Sequence list is a list of tuples (set_id, video_name) containing the set_id and\n # video_name for each sequence\n self.sequence_list = list_sequences(self.root)\n\n if data_fraction is not None:\n self.sequence_list = random.sample(self.sequence_list, int(len(self.sequence_list) * data_fraction))\n\n self.seq_to_class_map, self.seq_per_class = self._load_class_info()\n\n # we do not have the class_lists for the tracking net\n self.class_list = list(self.seq_per_class.keys())\n self.class_list.sort()\n\n def _load_class_info(self):\n ltr_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')\n class_map_path = os.path.join(ltr_path, 'data_specs', 'trackingnet_classmap.txt')\n\n with open(class_map_path, 'r') as f:\n seq_to_class_map = {seq_class.split('\\t')[0]: seq_class.rstrip().split('\\t')[1] for seq_class in f}\n\n seq_per_class = {}\n for i, seq in enumerate(self.sequence_list):\n class_name = seq_to_class_map.get(seq[1], 'Unknown')\n if class_name not in seq_per_class:\n seq_per_class[class_name] = [i]\n else:\n seq_per_class[class_name].append(i)\n\n return seq_to_class_map, seq_per_class\n\n def get_name(self):\n return 'trackingnet_lmdb'\n\n def has_class_info(self):\n return True\n\n def get_sequences_in_class(self, class_name):\n return self.seq_per_class[class_name]\n\n def _read_bb_anno(self, seq_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n gt_str_list = decode_str(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"anno\", vid_name + \".txt\")).split('\\n')[:-1]\n gt_list = [list(map(float, line.split(','))) for line in gt_str_list]\n gt_arr = np.array(gt_list).astype(np.float32)\n return torch.tensor(gt_arr)\n\n def get_sequence_info(self, seq_id):\n bbox = self._read_bb_anno(seq_id)\n\n valid = (bbox[:, 2] > 0) & (bbox[:, 3] > 0)\n visible = valid.clone().byte()\n return {'bbox': bbox, 'valid': valid, 'visible': visible}\n\n def _get_frame(self, seq_id, frame_id):\n set_id = self.sequence_list[seq_id][0]\n vid_name = self.sequence_list[seq_id][1]\n return decode_img(os.path.join(self.root, \"TRAIN_%d_lmdb\" % set_id),\n os.path.join(\"frames\", vid_name, str(frame_id) + \".jpg\"))\n\n def _get_class(self, seq_id):\n seq_name = self.sequence_list[seq_id][1]\n return self.seq_to_class_map[seq_name]\n\n def get_class_name(self, seq_id):\n obj_class = self._get_class(seq_id)\n\n return obj_class\n\n def get_frames(self, seq_id, frame_ids, anno=None):\n frame_list = [self._get_frame(seq_id, f) for f in frame_ids]\n\n if anno is None:\n anno = self.get_sequence_info(seq_id)\n\n anno_frames = {}\n for key, value in anno.items():\n anno_frames[key] = [value[f_id, ...].clone() for f_id in frame_ids]\n\n obj_class = self._get_class(seq_id)\n\n object_meta = OrderedDict({'object_class_name': obj_class,\n 'motion_class': None,\n 'major_class': None,\n 'root_class': None,\n 'motion_adverb': None})\n\n return frame_list, anno_frames, object_meta" }, { "identifier": "sampler", "path": "lib/train/data/sampler.py", "snippet": "def no_processing(data):\n def __init__(self, datasets, p_datasets, samples_per_epoch, max_gap,\n num_search_frames, num_template_frames=1, processing=no_processing, frame_sample_mode='causal',\n train_cls=False, pos_prob=0.5):\n def __len__(self):\n def _sample_visible_ids(self, visible, num_ids=1, min_id=None, max_id=None,\n allow_invisible=False, force_invisible=False):\n def __getitem__(self, index):\n def getitem(self):\n def getitem_cls(self):\n def get_center_box(self, H, W, ratio=1/8):\n def sample_seq_from_dataset(self, dataset, is_video_dataset):\n def get_one_search(self):\n def get_frame_ids_trident(self, visible):\n def get_frame_ids_stark(self, visible, valid):\nclass TrackingSampler(torch.utils.data.Dataset):\n H, W, _ = template_frames[0].shape\n H, W, _ = template_frames[0].shape\n H, W, _ = search_frames[0].shape" }, { "identifier": "processing", "path": "lib/train/data/processing.py", "snippet": "def stack_tensors(x):\n def __init__(self, transform=transforms.ToTensor(), template_transform=None, search_transform=None, joint_transform=None):\n def __call__(self, data: TensorDict):\n def __init__(self, search_area_factor, output_sz, center_jitter_factor, scale_jitter_factor,\n mode='pair', settings=None, *args, **kwargs):\n def _get_jittered_box(self, box, mode):\n def __call__(self, data: TensorDict):\nclass BaseProcessing:\nclass STARKProcessing(BaseProcessing):" }, { "identifier": "LTRLoader", "path": "lib/train/data/loader.py", "snippet": "class LTRLoader(torch.utils.data.dataloader.DataLoader):\n \"\"\"\n Data loader. Combines a dataset and a sampler, and provides\n single- or multi-process iterators over the dataset.\n\n Note: The only difference with default pytorch DataLoader is that an additional option stack_dim is available to\n select along which dimension the data should be stacked to form a batch.\n\n Arguments:\n dataset (Dataset): dataset from which to load the data.\n batch_size (int, optional): how many samples per batch to load\n (default: 1).\n shuffle (bool, optional): set to ``True`` to have the data reshuffled\n at every epoch (default: False).\n sampler (Sampler, optional): defines the strategy to draw samples from\n the dataset. If specified, ``shuffle`` must be False.\n batch_sampler (Sampler, optional): like sampler, but returns a batch of\n indices at a time. Mutually exclusive with batch_size, shuffle,\n sampler, and drop_last.\n num_workers (int, optional): how many subprocesses to use for data\n loading. 0 means that the data will be loaded in the main process.\n (default: 0)\n collate_fn (callable, optional): merges a list of samples to form a mini-batch.\n stack_dim (int): Dimension along which to stack to form the batch. (default: 0)\n pin_memory (bool, optional): If ``True``, the data loader will copy tensors\n into CUDA pinned memory before returning them.\n drop_last (bool, optional): set to ``True`` to drop the last incomplete batch,\n if the dataset size is not divisible by the batch size. If ``False`` and\n the size of dataset is not divisible by the batch size, then the last batch\n will be smaller. (default: False)\n timeout (numeric, optional): if positive, the timeout value for collecting a batch\n from workers. Should always be non-negative. (default: 0)\n worker_init_fn (callable, optional): If not None, this will be called on each\n worker subprocess with the worker id (an int in ``[0, num_workers - 1]``) as\n input, after seeding and before data loading. (default: None)\n\n .. note:: By default, each worker will have its PyTorch seed set to\n ``base_seed + worker_id``, where ``base_seed`` is a long generated\n by main process using its RNG. However, seeds for other libraries\n may be duplicated upon initializing workers (w.g., NumPy), causing\n each worker to return identical random numbers. (See\n :ref:`dataloader-workers-random-seed` section in FAQ.) You may\n use ``torch.initial_seed()`` to access the PyTorch seed for each\n worker in :attr:`worker_init_fn`, and use it to set other seeds\n before data loading.\n\n .. warning:: If ``spawn`` start method is used, :attr:`worker_init_fn` cannot be an\n unpicklable object, e.g., a lambda function.\n \"\"\"\n\n __initialized = False\n\n def __init__(self, name, dataset, training=True, batch_size=1, shuffle=False, sampler=None, batch_sampler=None,\n num_workers=0, epoch_interval=1, collate_fn=None, stack_dim=0, pin_memory=False, drop_last=False,\n timeout=0, worker_init_fn=None):\n if collate_fn is None:\n if stack_dim == 0:\n collate_fn = ltr_collate\n elif stack_dim == 1:\n collate_fn = ltr_collate_stack1\n else:\n raise ValueError('Stack dim no supported. Must be 0 or 1.')\n\n super(LTRLoader, self).__init__(dataset, batch_size, shuffle, sampler, batch_sampler,\n num_workers, collate_fn, pin_memory, drop_last,\n timeout, worker_init_fn)\n\n self.name = name\n self.training = training\n self.epoch_interval = epoch_interval\n self.stack_dim = stack_dim" }, { "identifier": "opencv_loader", "path": "lib/train/data/image_loader.py", "snippet": "def opencv_loader(path):\n \"\"\" Read image using opencv's imread function and returns it in rgb format\"\"\"\n try:\n im = cv.imread(path, cv.IMREAD_COLOR)\n\n # convert to rgb and return\n return cv.cvtColor(im, cv.COLOR_BGR2RGB)\n except Exception as e:\n print('ERROR: Could not read image \"{}\"'.format(path))\n print(e)\n return None" }, { "identifier": "is_main_process", "path": "lib/utils/misc.py", "snippet": "def is_main_process():\n return get_rank() == 0" } ]
import os import torch import lib.train.data.transforms as tfm from torch.utils.data.distributed import DistributedSampler from lib.train.dataset import Lasot, Got10k, MSCOCOSeq, ImagenetVID, TrackingNet from lib.train.dataset import Lasot_lmdb, Got10k_lmdb, MSCOCOSeq_lmdb, ImagenetVID_lmdb, TrackingNet_lmdb from lib.train.data import sampler, opencv_loader, processing, LTRLoader from lib.utils.misc import is_main_process
18,653
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB") datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader)) return datasets def build_dataloaders(cfg, settings): # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05), tfm.RandomHorizontalFlip(probability=0.5)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip_Norm(probability=0.5), # tfm.RandomHorizontalFlip(probability=0.5), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) # The tracking pairs processing module output_sz = settings.output_sz search_area_factor = settings.search_area_factor data_processing_train = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_train, joint_transform=transform_joint, settings=settings) data_processing_val = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_val, joint_transform=transform_joint, settings=settings) # Train sampler and loader settings.num_template = getattr(cfg.DATA.TEMPLATE, "NUMBER", 1) settings.num_search = getattr(cfg.DATA.SEARCH, "NUMBER", 1) sampler_mode = getattr(cfg.DATA, "SAMPLER_MODE", "causal") train_cls = getattr(cfg.TRAIN, "TRAIN_CLS", False) print("sampler_mode: ", sampler_mode)
# datasets related def update_settings(settings, cfg): settings.print_interval = cfg.TRAIN.PRINT_INTERVAL settings.search_area_factor = {'template': cfg.DATA.TEMPLATE.FACTOR, 'search': cfg.DATA.SEARCH.FACTOR} settings.output_sz = {'template': cfg.DATA.TEMPLATE.SIZE, 'search': cfg.DATA.SEARCH.SIZE} settings.center_jitter_factor = {'template': cfg.DATA.TEMPLATE.CENTER_JITTER, 'search': cfg.DATA.SEARCH.CENTER_JITTER} settings.scale_jitter_factor = {'template': cfg.DATA.TEMPLATE.SCALE_JITTER, 'search': cfg.DATA.SEARCH.SCALE_JITTER} settings.grad_clip_norm = cfg.TRAIN.GRAD_CLIP_NORM settings.print_stats = None settings.batchsize = cfg.TRAIN.BATCH_SIZE settings.scheduler_type = cfg.TRAIN.SCHEDULER.TYPE def names2datasets(name_list: list, settings, image_loader): assert isinstance(name_list, list) datasets = [] for name in name_list: assert name in ["LASOT", "GOT10K_vottrain", "GOT10K_votval", "GOT10K_train_full", "GOT10K_official_val", "COCO17", "VID", "TRACKINGNET", ] # Tracking Task if name == "LASOT": if settings.use_lmdb: print("Building lasot dataset from lmdb") datasets.append(Lasot_lmdb(settings.env.lasot_lmdb_dir, split='train', image_loader=image_loader)) else: datasets.append(Lasot(settings.env.lasot_dir, split='train', image_loader=image_loader)) if name == "GOT10K_vottrain": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='vottrain', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='vottrain', image_loader=image_loader)) if name == "GOT10K_train_full": if settings.use_lmdb: print("Building got10k_train_full from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='train_full', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='train_full', image_loader=image_loader)) if name == "GOT10K_votval": if settings.use_lmdb: print("Building got10k from lmdb") datasets.append(Got10k_lmdb(settings.env.got10k_lmdb_dir, split='votval', image_loader=image_loader)) else: datasets.append(Got10k(settings.env.got10k_dir, split='votval', image_loader=image_loader)) if name == "GOT10K_official_val": if settings.use_lmdb: raise ValueError("Not implement") else: datasets.append(Got10k(settings.env.got10k_val_dir, split=None, image_loader=image_loader)) if name == "COCO17": if settings.use_lmdb: print("Building COCO2017 from lmdb") datasets.append(MSCOCOSeq_lmdb(settings.env.coco_lmdb_dir, version="2017", image_loader=image_loader)) else: datasets.append(MSCOCOSeq(settings.env.coco_dir, version="2017", image_loader=image_loader)) if name == "VID": if settings.use_lmdb: print("Building VID from lmdb") datasets.append(ImagenetVID_lmdb(settings.env.imagenet_lmdb_dir, image_loader=image_loader)) else: datasets.append(ImagenetVID(settings.env.imagenet_dir, image_loader=image_loader)) if name == "TRACKINGNET": if settings.use_lmdb: print("Building TrackingNet from lmdb") datasets.append(TrackingNet_lmdb(settings.env.trackingnet_lmdb_dir, image_loader=image_loader)) else: # raise ValueError("NOW WE CAN ONLY USE TRACKINGNET FROM LMDB") datasets.append(TrackingNet(settings.env.trackingnet_dir, image_loader=image_loader)) return datasets def build_dataloaders(cfg, settings): # Data transform transform_joint = tfm.Transform(tfm.ToGrayscale(probability=0.05), tfm.RandomHorizontalFlip(probability=0.5)) transform_train = tfm.Transform(tfm.ToTensorAndJitter(0.2), tfm.RandomHorizontalFlip_Norm(probability=0.5), # tfm.RandomHorizontalFlip(probability=0.5), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) transform_val = tfm.Transform(tfm.ToTensor(), tfm.Normalize(mean=cfg.DATA.MEAN, std=cfg.DATA.STD)) # The tracking pairs processing module output_sz = settings.output_sz search_area_factor = settings.search_area_factor data_processing_train = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_train, joint_transform=transform_joint, settings=settings) data_processing_val = processing.STARKProcessing(search_area_factor=search_area_factor, output_sz=output_sz, center_jitter_factor=settings.center_jitter_factor, scale_jitter_factor=settings.scale_jitter_factor, mode='sequence', transform=transform_val, joint_transform=transform_joint, settings=settings) # Train sampler and loader settings.num_template = getattr(cfg.DATA.TEMPLATE, "NUMBER", 1) settings.num_search = getattr(cfg.DATA.SEARCH, "NUMBER", 1) sampler_mode = getattr(cfg.DATA, "SAMPLER_MODE", "causal") train_cls = getattr(cfg.TRAIN, "TRAIN_CLS", False) print("sampler_mode: ", sampler_mode)
dataset_train = sampler.TrackingSampler(datasets=names2datasets(cfg.DATA.TRAIN.DATASETS_NAME, settings, opencv_loader),
13
2023-12-10 03:57:19+00:00
24k